Compare commits
	
		
			368 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					e0a03d1f9c | ||
| 
						 | 
					d07be523f5 | ||
| 
						 | 
					e4cdde96b3 | ||
| 
						 | 
					252a0ccb80 | ||
| 
						 | 
					dad0c9d686 | ||
| 
						 | 
					e173834af8 | ||
| 
						 | 
					63e3a97815 | ||
| 
						 | 
					61796a8999 | ||
| 
						 | 
					7d9c059a35 | ||
| 
						 | 
					52572ca1b3 | ||
| 
						 | 
					220bbc44c2 | ||
| 
						 | 
					f378636d90 | ||
| 
						 | 
					98f33430a2 | ||
| 
						 | 
					6672f672d9 | ||
| 
						 | 
					3002e5976d | ||
| 
						 | 
					1e8ff7d21d | ||
| 
						 | 
					dae6c19995 | ||
| 
						 | 
					989adf5ee5 | ||
| 
						 | 
					09d406eec8 | ||
| 
						 | 
					d1b62a9a07 | ||
| 
						 | 
					29c8fac3f7 | ||
| 
						 | 
					8c33d40a7b | ||
| 
						 | 
					f406bf3fa9 | ||
| 
						 | 
					54bec22a6e | ||
| 
						 | 
					e1b2c93a23 | ||
| 
						 | 
					e529ff52a0 | ||
| 
						 | 
					fb7e76d1cf | ||
| 
						 | 
					96047b3150 | ||
| 
						 | 
					2545defeac | ||
| 
						 | 
					2d97ad38ed | ||
| 
						 | 
					f6c628f029 | ||
| 
						 | 
					3f743e3e4c | ||
| 
						 | 
					f2dbd64bde | ||
| 
						 | 
					7bce659e18 | ||
| 
						 | 
					cb9379065f | ||
| 
						 | 
					4261778dbd | ||
| 
						 | 
					ba88a6e4e4 | ||
| 
						 | 
					e0407a7bf7 | ||
| 
						 | 
					d575984dfc | ||
| 
						 | 
					9ea1e82d68 | ||
| 
						 | 
					5cd2cdf33f | ||
| 
						 | 
					a5a6f6fec3 | ||
| 
						 | 
					88544e8ceb | ||
| 
						 | 
					f183eaa3ad | ||
| 
						 | 
					d773d7775a | ||
| 
						 | 
					f15f4cefd7 | ||
| 
						 | 
					0ec75a04e5 | ||
| 
						 | 
					34fb994d93 | ||
| 
						 | 
					acafd1814e | ||
| 
						 | 
					bb01956d67 | ||
| 
						 | 
					bc0c49b83e | ||
| 
						 | 
					0008a87cb1 | ||
| 
						 | 
					2aa6592338 | ||
| 
						 | 
					2b14d98086 | ||
| 
						 | 
					a05f86ec10 | ||
| 
						 | 
					9bdb254b98 | ||
| 
						 | 
					358d1f6e01 | ||
| 
						 | 
					ea28034f5d | ||
| 
						 | 
					6452b31599 | ||
| 
						 | 
					a14969253a | ||
| 
						 | 
					fab9a7be76 | ||
| 
						 | 
					3a67865963 | ||
| 
						 | 
					3fb754712c | ||
| 
						 | 
					e5294f407a | ||
| 
						 | 
					ed1ad2f5eb | ||
| 
						 | 
					e780c3daaf | ||
| 
						 | 
					7f954ca502 | ||
| 
						 | 
					8e9e57ed0c | ||
| 
						 | 
					1bd6372cd3 | ||
| 
						 | 
					40ffa99dfa | ||
| 
						 | 
					ff79f6b35a | ||
| 
						 | 
					e2a83d72da | ||
| 
						 | 
					6c3985713b | ||
| 
						 | 
					7d97cc8d87 | ||
| 
						 | 
					a56a9e65c6 | ||
| 
						 | 
					428b629eb2 | ||
| 
						 | 
					194d12345d | ||
| 
						 | 
					b3d8276d2d | ||
| 
						 | 
					c2eb668617 | ||
| 
						 | 
					c9c223ba00 | ||
| 
						 | 
					9d0ff6436e | ||
| 
						 | 
					02b7b125b5 | ||
| 
						 | 
					5643668308 | ||
| 
						 | 
					2d18e7f3ef | ||
| 
						 | 
					f1b5830182 | ||
| 
						 | 
					c588316555 | ||
| 
						 | 
					afd1f61944 | ||
| 
						 | 
					16a9c5ea9e | ||
| 
						 | 
					83fb31a76d | ||
| 
						 | 
					fc5b32877a | ||
| 
						 | 
					10e023c4fa | ||
| 
						 | 
					27a3a59428 | ||
| 
						 | 
					8ab849cddc | ||
| 
						 | 
					5191b00155 | ||
| 
						 | 
					1131e7a1a4 | ||
| 
						 | 
					26becbcd2a | ||
| 
						 | 
					366cdd3548 | ||
| 
						 | 
					e3b08b3ad4 | ||
| 
						 | 
					fa16440659 | ||
| 
						 | 
					c36fd16aaa | ||
| 
						 | 
					8c5897632a | ||
| 
						 | 
					b34fce9c54 | ||
| 
						 | 
					742f9aa879 | ||
| 
						 | 
					79041d92ee | ||
| 
						 | 
					82cebc0e05 | ||
| 
						 | 
					09abca6802 | ||
| 
						 | 
					43d64829e6 | ||
| 
						 | 
					4a479fd3e6 | ||
| 
						 | 
					4f41717d01 | ||
| 
						 | 
					6896dcbf5f | ||
| 
						 | 
					14404170b9 | ||
| 
						 | 
					e9e42beed2 | ||
| 
						 | 
					abd6decd55 | ||
| 
						 | 
					0385c824f1 | ||
| 
						 | 
					c4e764aa69 | ||
| 
						 | 
					9d02e38d3f | ||
| 
						 | 
					30cf47c6f0 | ||
| 
						 | 
					b45cd17d29 | ||
| 
						 | 
					26b6d70c72 | ||
| 
						 | 
					32919db4fb | ||
| 
						 | 
					56f44c26f0 | ||
| 
						 | 
					fe87a40de6 | ||
| 
						 | 
					0f6e309b97 | ||
| 
						 | 
					96e13c9897 | ||
| 
						 | 
					e72c0a0466 | ||
| 
						 | 
					dfddefa13a | ||
| 
						 | 
					ce94955b3c | ||
| 
						 | 
					dde95268cc | ||
| 
						 | 
					d20ac551a8 | ||
| 
						 | 
					352b0969e2 | ||
| 
						 | 
					b479b42b26 | ||
| 
						 | 
					36cab9c408 | ||
| 
						 | 
					34592d04fb | ||
| 
						 | 
					544accc895 | ||
| 
						 | 
					f41622ecb4 | ||
| 
						 | 
					fc8eb4c1f9 | ||
| 
						 | 
					02bae9f013 | ||
| 
						 | 
					5cb2a1c3f0 | ||
| 
						 | 
					a221c9bd76 | ||
| 
						 | 
					b2583c2b62 | ||
| 
						 | 
					bd553941ae | ||
| 
						 | 
					e0aa76d38a | ||
| 
						 | 
					a014b9614e | ||
| 
						 | 
					31c21d2f69 | ||
| 
						 | 
					3cd1c8653b | ||
| 
						 | 
					194485cfba | ||
| 
						 | 
					81cfe39113 | ||
| 
						 | 
					ef0c503d37 | ||
| 
						 | 
					1103aec1df | ||
| 
						 | 
					b40ab81d1f | ||
| 
						 | 
					314f055c29 | ||
| 
						 | 
					2c566744c4 | ||
| 
						 | 
					adad1ba5d8 | ||
| 
						 | 
					a80a7131d1 | ||
| 
						 | 
					3ab63abbd4 | ||
| 
						 | 
					d2c76782e0 | ||
| 
						 | 
					4dc8b4d7d0 | ||
| 
						 | 
					9ff0467566 | ||
| 
						 | 
					4407b38b28 | ||
| 
						 | 
					8caaf260a6 | ||
| 
						 | 
					e1f51bbd1f | ||
| 
						 | 
					4b7c149306 | ||
| 
						 | 
					e8919d6522 | ||
| 
						 | 
					b017785fa5 | ||
| 
						 | 
					01507eb1f8 | ||
| 
						 | 
					938ff93710 | ||
| 
						 | 
					0c88d539f8 | ||
| 
						 | 
					e39a992bd1 | ||
| 
						 | 
					72a12f61ef | ||
| 
						 | 
					30ae080e9d | ||
| 
						 | 
					dca463b728 | ||
| 
						 | 
					25b462cab9 | ||
| 
						 | 
					7c6a8afa7e | ||
| 
						 | 
					b052525f9b | ||
| 
						 | 
					90c7bfb9be | ||
| 
						 | 
					7bdd348e58 | ||
| 
						 | 
					af3d003658 | ||
| 
						 | 
					c00beff5e0 | ||
| 
						 | 
					57a43142ba | ||
| 
						 | 
					99905118a8 | ||
| 
						 | 
					dcf560204c | ||
| 
						 | 
					5b0e2eb041 | ||
| 
						 | 
					d461e077a5 | ||
| 
						 | 
					9a884b7b97 | ||
| 
						 | 
					9abe0bfb7f | ||
| 
						 | 
					13682b48e9 | ||
| 
						 | 
					f25e6e0c25 | ||
| 
						 | 
					80239a8bb1 | ||
| 
						 | 
					26bbc1c242 | ||
| 
						 | 
					efe259a27e | ||
| 
						 | 
					49f11e12d5 | ||
| 
						 | 
					d130fae519 | ||
| 
						 | 
					dde996bf99 | ||
| 
						 | 
					cad2958fd7 | ||
| 
						 | 
					29d61d73b1 | ||
| 
						 | 
					9a6a710998 | ||
| 
						 | 
					daaef403d1 | ||
| 
						 | 
					207f5a138a | ||
| 
						 | 
					e9c8a9aaa6 | ||
| 
						 | 
					7b7d8b8794 | ||
| 
						 | 
					90d6b563fe | ||
| 
						 | 
					dce2f820e9 | ||
| 
						 | 
					4aab3f868f | ||
| 
						 | 
					48609236da | ||
| 
						 | 
					d8fe695779 | ||
| 
						 | 
					0f42e06651 | ||
| 
						 | 
					230c4c6ad9 | ||
| 
						 | 
					f4489c9558 | ||
| 
						 | 
					0e5d9fe2a7 | ||
| 
						 | 
					9ae2aaea50 | ||
| 
						 | 
					2513314912 | ||
| 
						 | 
					e727cbf0be | ||
| 
						 | 
					bcc25353cf | ||
| 
						 | 
					6a10263f16 | ||
| 
						 | 
					bcc6429c01 | ||
| 
						 | 
					bdb219435e | ||
| 
						 | 
					a7338ae8ac | ||
| 
						 | 
					6776c2c04f | ||
| 
						 | 
					facd3dbc6e | ||
| 
						 | 
					8796c3b7d3 | ||
| 
						 | 
					7430f3064f | ||
| 
						 | 
					eac281b06c | ||
| 
						 | 
					b5210f4eae | ||
| 
						 | 
					10379d50be | ||
| 
						 | 
					cd874cf8e6 | ||
| 
						 | 
					82a3e469c6 | ||
| 
						 | 
					f859fed03d | ||
| 
						 | 
					991e6fa35b | ||
| 
						 | 
					09dca51066 | ||
| 
						 | 
					40de74d0eb | ||
| 
						 | 
					e2811c2ede | ||
| 
						 | 
					25d14b716a | ||
| 
						 | 
					f1de93dec3 | ||
| 
						 | 
					738d68de85 | ||
| 
						 | 
					00ecce5c8b | ||
| 
						 | 
					6ba07e9948 | ||
| 
						 | 
					125bea15d1 | ||
| 
						 | 
					70e3cc282b | ||
| 
						 | 
					242df26b44 | ||
| 
						 | 
					46c2dba20e | ||
| 
						 | 
					3caa6a5a57 | ||
| 
						 | 
					bf08665e2e | ||
| 
						 | 
					c4f5f4dbd3 | ||
| 
						 | 
					29df24252a | ||
| 
						 | 
					b920c1d5ad | ||
| 
						 | 
					2b9ee7d5b9 | ||
| 
						 | 
					f800cacada | ||
| 
						 | 
					5227eac5b0 | ||
| 
						 | 
					bb40f8f5e2 | ||
| 
						 | 
					ad8bf22086 | ||
| 
						 | 
					7f8804296d | ||
| 
						 | 
					f67e75b5dc | ||
| 
						 | 
					35e63f35b0 | ||
| 
						 | 
					3bfb7a2537 | ||
| 
						 | 
					4a1e7a6fb7 | ||
| 
						 | 
					ff1e982205 | ||
| 
						 | 
					bb116e6ba3 | ||
| 
						 | 
					ebe356bf1c | ||
| 
						 | 
					30099413ec | ||
| 
						 | 
					186e0ff067 | ||
| 
						 | 
					2642ad9f55 | ||
| 
						 | 
					95ddd2227b | ||
| 
						 | 
					3faebed6fa | ||
| 
						 | 
					3aee1fa5b6 | ||
| 
						 | 
					89a9c84ebb | ||
| 
						 | 
					0adde39e04 | ||
| 
						 | 
					03ae616b19 | ||
| 
						 | 
					830c3058ff | ||
| 
						 | 
					b12c5cbbb2 | ||
| 
						 | 
					82c96b5ad8 | ||
| 
						 | 
					3e4b957847 | ||
| 
						 | 
					cbabbe8220 | ||
| 
						 | 
					80122a3af3 | ||
| 
						 | 
					a475755b3a | ||
| 
						 | 
					66030e8133 | ||
| 
						 | 
					46f8d838b3 | ||
| 
						 | 
					bc3648d4b4 | ||
| 
						 | 
					27e6b4a3ff | ||
| 
						 | 
					b82860caa7 | ||
| 
						 | 
					3d05625136 | ||
| 
						 | 
					ddd3301bad | ||
| 
						 | 
					123981930f | ||
| 
						 | 
					3171e2360a | ||
| 
						 | 
					3533a850e7 | ||
| 
						 | 
					6d56bc9a6d | ||
| 
						 | 
					2c5e1d0933 | ||
| 
						 | 
					b37b83214a | ||
| 
						 | 
					6d7ab09788 | ||
| 
						 | 
					227cfc1f10 | ||
| 
						 | 
					416847d195 | ||
| 
						 | 
					bd4ad1a1d5 | ||
| 
						 | 
					6230de03aa | ||
| 
						 | 
					45acc228a6 | ||
| 
						 | 
					d37fac6dbb | ||
| 
						 | 
					7940306a47 | ||
| 
						 | 
					eabefe83f4 | ||
| 
						 | 
					eaa79b79b2 | ||
| 
						 | 
					c761379825 | ||
| 
						 | 
					ea3309eba7 | ||
| 
						 | 
					1c1e252cd1 | ||
| 
						 | 
					ca2c9d6b9b | ||
| 
						 | 
					fa6b99d351 | ||
| 
						 | 
					d79cb6947e | ||
| 
						 | 
					5aa4b29bbe | ||
| 
						 | 
					e4cbd0d6e5 | ||
| 
						 | 
					0ede7b5344 | ||
| 
						 | 
					5b933be089 | ||
| 
						 | 
					f2693e98b4 | ||
| 
						 | 
					c3861e14ce | ||
| 
						 | 
					daa5a988e2 | ||
| 
						 | 
					db67b7c31b | ||
| 
						 | 
					a643a47d41 | ||
| 
						 | 
					23af29e882 | ||
| 
						 | 
					7d995cd1b8 | ||
| 
						 | 
					72a58c0772 | ||
| 
						 | 
					d525423006 | ||
| 
						 | 
					4b476e6aa4 | ||
| 
						 | 
					124c78fd44 | ||
| 
						 | 
					a1ab3300c8 | ||
| 
						 | 
					1af235f6b3 | ||
| 
						 | 
					82031e41f8 | ||
| 
						 | 
					222e7549a7 | ||
| 
						 | 
					eb2244ece9 | ||
| 
						 | 
					b967c10029 | ||
| 
						 | 
					7ff4cd2acc | ||
| 
						 | 
					c4149c4d54 | ||
| 
						 | 
					8ad2f45964 | ||
| 
						 | 
					5df52b0131 | ||
| 
						 | 
					596d3e20ae | ||
| 
						 | 
					00d5ff6431 | ||
| 
						 | 
					437179e9c8 | ||
| 
						 | 
					031d3b66c2 | ||
| 
						 | 
					b76871d870 | ||
| 
						 | 
					15ae305007 | ||
| 
						 | 
					3c72204ae0 | ||
| 
						 | 
					ba21499648 | ||
| 
						 | 
					7933039ade | ||
| 
						 | 
					4015829acc | ||
| 
						 | 
					39dc4a6bb3 | ||
| 
						 | 
					a6a2d8eb8f | ||
| 
						 | 
					58556826a8 | ||
| 
						 | 
					bc2c9a479a | ||
| 
						 | 
					9cc22be032 | ||
| 
						 | 
					33e1bca651 | ||
| 
						 | 
					9841617b7f | ||
| 
						 | 
					2897481f64 | ||
| 
						 | 
					646c564de5 | ||
| 
						 | 
					cd6281abef | ||
| 
						 | 
					697be8173b | ||
| 
						 | 
					1853d8bb7a | ||
| 
						 | 
					1779cd7695 | ||
| 
						 | 
					bb4820727f | ||
| 
						 | 
					affc7687d3 | ||
| 
						 | 
					3569470693 | ||
| 
						 | 
					1d1df82093 | ||
| 
						 | 
					de187e3e9e | ||
| 
						 | 
					7754d48381 | ||
| 
						 | 
					63169474b3 | ||
| 
						 | 
					b3f106cb1f | ||
| 
						 | 
					9b6ccf0f24 | ||
| 
						 | 
					298d66c8de | ||
| 
						 | 
					4be1b68d52 | ||
| 
						 | 
					92edc13d69 | ||
| 
						 | 
					c9f015f1c6 | ||
| 
						 | 
					db6b2ca0b3 | ||
| 
						 | 
					3503ec8461 | ||
| 
						 | 
					ecc5e42d92 | ||
| 
						 | 
					f87ce262f6 | 
							
								
								
									
										1
									
								
								.gitattributes
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.gitattributes
									
									
									
									
										vendored
									
									
								
							@@ -1 +0,0 @@
 | 
			
		||||
*.pnm -diff -text
 | 
			
		||||
							
								
								
									
										7
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							@@ -15,7 +15,6 @@
 | 
			
		||||
*.pdb
 | 
			
		||||
*.so
 | 
			
		||||
*.so.*
 | 
			
		||||
*.swp
 | 
			
		||||
*.ver
 | 
			
		||||
*-example
 | 
			
		||||
*-test
 | 
			
		||||
@@ -37,9 +36,8 @@
 | 
			
		||||
/doc/avoptions_format.texi
 | 
			
		||||
/doc/doxy/html/
 | 
			
		||||
/doc/examples/avio_reading
 | 
			
		||||
/doc/examples/decoding_encoding
 | 
			
		||||
/doc/examples/avcodec
 | 
			
		||||
/doc/examples/demuxing_decoding
 | 
			
		||||
/doc/examples/extract_mvs
 | 
			
		||||
/doc/examples/filter_audio
 | 
			
		||||
/doc/examples/filtering_audio
 | 
			
		||||
/doc/examples/filtering_video
 | 
			
		||||
@@ -50,7 +48,6 @@
 | 
			
		||||
/doc/examples/resampling_audio
 | 
			
		||||
/doc/examples/scaling_video
 | 
			
		||||
/doc/examples/transcode_aac
 | 
			
		||||
/doc/examples/transcoding
 | 
			
		||||
/doc/fate.txt
 | 
			
		||||
/doc/print_options
 | 
			
		||||
/lcov/
 | 
			
		||||
@@ -62,7 +59,6 @@
 | 
			
		||||
/tests/audiogen
 | 
			
		||||
/tests/base64
 | 
			
		||||
/tests/data/
 | 
			
		||||
/tests/pixfmts.mak
 | 
			
		||||
/tests/rotozoom
 | 
			
		||||
/tests/tiny_psnr
 | 
			
		||||
/tests/tiny_ssim
 | 
			
		||||
@@ -84,5 +80,4 @@
 | 
			
		||||
/tools/qt-faststart
 | 
			
		||||
/tools/trasher
 | 
			
		||||
/tools/seek_print
 | 
			
		||||
/tools/uncoded_frame
 | 
			
		||||
/tools/zmqsend
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										330
									
								
								Changelog
									
									
									
									
									
								
							
							
						
						
									
										330
									
								
								Changelog
									
									
									
									
									
								
							@@ -1,328 +1,6 @@
 | 
			
		||||
Entries are sorted chronologically from oldest to youngest within each release,
 | 
			
		||||
releases are sorted from youngest to oldest.
 | 
			
		||||
 | 
			
		||||
version 2.4.9:
 | 
			
		||||
- alac: reject rice_limit 0 if compression is used
 | 
			
		||||
- lavf: Reset global flag on deinit
 | 
			
		||||
- bink: check vst->index_entries before using it
 | 
			
		||||
- mpeg4videodec: only allow a positive length
 | 
			
		||||
- alsdec: check sample pointer range in revert_channel_correlation
 | 
			
		||||
- avcodec/h264_refs: Do not set reference to things which do not exist
 | 
			
		||||
- avcodec/h264: Fail for invalid mixed IDR / non IDR frames in slice threading mode
 | 
			
		||||
- h264: avoid unnecessary calls to get_format
 | 
			
		||||
- avutil/pca: Check for av_malloc* failures
 | 
			
		||||
- alsdec: validate time diff index
 | 
			
		||||
- avcodec/alsdec: Use av_mallocz_array() for chan_data to ensure the arrays never contain random data
 | 
			
		||||
- alsdec: ensure channel reordering is reversible
 | 
			
		||||
- avcodec/atrac3plusdsp: fix on stack alignment
 | 
			
		||||
- ac3: validate end in ff_ac3_bit_alloc_calc_mask
 | 
			
		||||
- aacpsy: avoid psy_band->threshold becoming NaN
 | 
			
		||||
- aasc: return correct buffer size from aasc_decode_frame
 | 
			
		||||
- aacdec: consistently use avctx for logging in decode_eld_specific_config
 | 
			
		||||
- msrledec: use signed pixel_ptr in msrle_decode_pal4
 | 
			
		||||
- swresample/swresample-test: Randomly wipe out channel counts
 | 
			
		||||
- swresample: Check channel layouts and channels against each other and print human readable error messages
 | 
			
		||||
- swresample: Allow reinitialization without ever setting channel layouts
 | 
			
		||||
- swresample: Allow reinitialization without ever setting channel counts
 | 
			
		||||
- avcodec/h264: Do not fail with randomly truncated VUIs
 | 
			
		||||
- avcodec/h264_ps: Move truncation check from VUI to SPS
 | 
			
		||||
- avcodec/h264: Be more tolerant to changing pps id between slices
 | 
			
		||||
- avcodec/aacdec: Fix storing state before PCE decode
 | 
			
		||||
- avcodec/h264: reset the counts in the correct context
 | 
			
		||||
- avcodec/h264_slice: Do not reset mb_aff_frame per slice
 | 
			
		||||
- tests: Fix test name for pixfmts tests
 | 
			
		||||
- avcodec/h264: finish previous slices before switching to single thread mode
 | 
			
		||||
- avcodec/h264: Fix race between slices where one overwrites data from the next
 | 
			
		||||
- avformat/utils: avoid discarded streams in av_find_default_stream_index()
 | 
			
		||||
- avformat/utils: Ensure that AVFMT_FLAG_CUSTOM_IO is set before use
 | 
			
		||||
- avformat/img2dec: do not rewind custom io buffers
 | 
			
		||||
- fate: Include branch information in the payload header
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
version 2.4.8:
 | 
			
		||||
- avutil/cpu: add missing check for mmxext to av_force_cpu_flags
 | 
			
		||||
- avcodec/msrledec: restructure msrle_decode_pal4() based on the line number instead of the pixel pointer
 | 
			
		||||
- avcodec/hevc_ps: Check cropping parameters more correctly
 | 
			
		||||
- avcodec/dnxhddec: Check that the frame is interlaced before using cur_field
 | 
			
		||||
- avformat/mov: Disallow ".." in dref unless use_absolute_path is set
 | 
			
		||||
- avformat/mov: Check for string truncation in mov_open_dref()
 | 
			
		||||
- ac3_fixed: fix out-of-bound read
 | 
			
		||||
- avcodec/012v: redesign main loop
 | 
			
		||||
- avcodec/012v: Check dimensions more completely
 | 
			
		||||
- asfenc: fix leaking asf->index_ptr on error
 | 
			
		||||
- avcodec/options_table: remove extradata_size from the AVOptions table
 | 
			
		||||
- ffmdec: limit the backward seek to the last resync position
 | 
			
		||||
- ffmdec: make sure the time base is valid
 | 
			
		||||
- ffmdec: fix infinite loop at EOF
 | 
			
		||||
- avcodec/tiff: move bpp check to after "end:"
 | 
			
		||||
- avcodec/opusdec: Fix delayed sample value
 | 
			
		||||
- avcodec/utils: Align YUV411 by as much as the other YUV variants
 | 
			
		||||
- vp9: fix segmentation map retention with threading enabled.
 | 
			
		||||
- doc/protocols/tcp: fix units of listen_timeout option value, from microseconds to milliseconds
 | 
			
		||||
- fix VP9 packet decoder returning 0 instead of the used data size
 | 
			
		||||
- avformat/bit: only accept the g729 codec and 1 channel
 | 
			
		||||
- avformat/adxdec: check avctx->channels for invalid values
 | 
			
		||||
- Fix buffer_size argument to init_put_bits() in multiple encoders.
 | 
			
		||||
- mips/acelp_filters: fix incorrect register constraint
 | 
			
		||||
- avcodec/hevc_ps: Sanity checks for some log2_* values
 | 
			
		||||
- avcodec/zmbv: Check len before reading in decode_frame()
 | 
			
		||||
- avcodec/snowdec: Fix ref value check
 | 
			
		||||
- swscale/utils: More carefully merge and clear coefficients outside the input
 | 
			
		||||
- avcodec/a64multienc: fix use of uninitialized values in to_meta_with_crop
 | 
			
		||||
- avcodec/a64multienc: don't set incorrect packet size
 | 
			
		||||
- webp: ensure that each transform is only used once
 | 
			
		||||
- avcodec/hevc_ps: More complete window reset
 | 
			
		||||
- vp9: make above buffer pointer 32-byte aligned.
 | 
			
		||||
- avformat/rm: limit packet size
 | 
			
		||||
- avcodec/webp: validate the distance prefix code
 | 
			
		||||
- avcodec/gif: fix off by one in column offsetting finding
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
version 2.4.7:
 | 
			
		||||
- avcodec/flac_parser: fix handling EOF if no headers are found
 | 
			
		||||
- avfilter/vf_framepack: Check and update frame_rate
 | 
			
		||||
- avcodec/hevc: Fix handling of skipped_bytes() reallocation failures
 | 
			
		||||
- qpeg: avoid pointless invalid memcpy()
 | 
			
		||||
- avcodec/arm/videodsp_armv5te: Fix linking failure with "g++ -shared -D__STDC_CONSTANT_MACROS -o test.so ... libavcodec.a"
 | 
			
		||||
- avcodec/mjpegdec: Skip blocks which are outside the visible area
 | 
			
		||||
- lavc/aarch64: Do not use the neon horizontal chroma loop filter for H.264 4:2:2. (cherry picked from commit 4faea46bd906b3897018736208123aa36c3f45d5)
 | 
			
		||||
- avcodec/h264_slice: assert that reinit does not occur after the first slice
 | 
			
		||||
- avcodec/h264_slice: ignore SAR changes in slices after the first
 | 
			
		||||
- avcodec/h264_slice: Check picture structure before setting the related fields
 | 
			
		||||
- avcodec/h264_slice: Do not change frame_num after the first slice
 | 
			
		||||
- avutil/opt: Fix type used to access AV_OPT_TYPE_SAMPLE_FMT
 | 
			
		||||
- avutil/opt: Fix types used to access AV_OPT_TYPE_PIXEL_FMT
 | 
			
		||||
- avcodec/h264: Be more strict on rejecting pps/sps changes
 | 
			
		||||
- avcodec/h264: Be more strict on rejecting pps_id changes
 | 
			
		||||
- avcodec/h264_ps: More completely check the bit depths
 | 
			
		||||
- avformat/thp: Check av_get_packet() for failure not only for partial output
 | 
			
		||||
- swscale/utils: Limit filter shifting so as not to read from prior the array
 | 
			
		||||
- avcodec/mpegvideo_motion: Fix gmc chroma dimensions
 | 
			
		||||
- avcodec/mjpegdec: Check number of components for JPEG-LS
 | 
			
		||||
- avcodec/mjpegdec: Check escape sequence validity
 | 
			
		||||
- avformat/mpc8: Use uint64_t in *_get_v() to avoid undefined behavior
 | 
			
		||||
- avformat/mpc8: fix broken pointer math
 | 
			
		||||
- avformat/mpc8: fix hang with fuzzed file
 | 
			
		||||
- avformat/tta: fix crash with corrupted files
 | 
			
		||||
 | 
			
		||||
version 2.4.6:
 | 
			
		||||
- doc/examples: fix lib math dep for decoding_encoding
 | 
			
		||||
- avformat/movenc: workaround bug in "PathScale EKOPath(tm) Compiler Suite Version 4.0.12.1"
 | 
			
		||||
- vp9: fix parser return values in error case
 | 
			
		||||
- ffmpeg: Clear error message array at init.
 | 
			
		||||
- avcodec/dvdsubdec: fix accessing dangling pointers
 | 
			
		||||
- avcodec/dvdsubdec: error on bitmaps with size 0
 | 
			
		||||
- avformat/mov: Fix mixed declaration and statement warning
 | 
			
		||||
- cmdutils: Use 64bit for file size/offset related variable in cmdutils_read_file()
 | 
			
		||||
- avformat/utils: Clear pointer in ff_alloc_extradata() to avoid leaving a stale pointer in memory
 | 
			
		||||
- avformat/matroskadec: Use av_freep() to avoid leaving stale pointers in memory
 | 
			
		||||
- lavfi: check av_strdup() return value
 | 
			
		||||
- mov: Fix negative size calculation in mov_read_default().
 | 
			
		||||
- avformat/mov: fix integer overflow in mov_read_udta_string()
 | 
			
		||||
- mov: Avoid overflow with mov_metadata_raw()
 | 
			
		||||
- avcodec/dvdsubdec: fix out of bounds accesses
 | 
			
		||||
- avfilter/vf_sab: fix filtering tiny images
 | 
			
		||||
- avformat/flvdec: Increase string array size
 | 
			
		||||
- avformat/flvdec: do not inject dts=0 metadata packets which failed to be parsed into a new data stream
 | 
			
		||||
- avformat/cdxl: Fix integer overflow of image_size
 | 
			
		||||
- avformat/segment: Use av_freep() avoid leaving stale pointers in memory
 | 
			
		||||
- avformat/mov: Fix memleaks for duplicate STCO/CO64/STSC atoms
 | 
			
		||||
- mov: avoid a memleak when multiple stss boxes are presen
 | 
			
		||||
 | 
			
		||||
version 2.4.5:
 | 
			
		||||
- lavu/frame: fix malloc error path in av_frame_copy_props()
 | 
			
		||||
- avformat/utils: Do not update programs streams from program-less streams in update_wrap_reference()
 | 
			
		||||
- avformat/aviobuf: Check that avio_seek() target is non negative
 | 
			
		||||
- swresample/soxr_resample: fix error handling
 | 
			
		||||
- avformat/flvdec: fix potential use of uninitialized variables
 | 
			
		||||
- avformat/matroskadec: fix handling of recursive SeekHead elements
 | 
			
		||||
- doc/examples/transcoding: check encoder before using it
 | 
			
		||||
- swscale/x86/rgb2rgb_template: fix crash with tiny size and nv12 output
 | 
			
		||||
- avformat/rmdec: Check codec_data_size
 | 
			
		||||
- avformat/aviobuf: Fix infinite loop in ff_get_line()
 | 
			
		||||
- vc1: Do not assume seek happens after decoding
 | 
			
		||||
- mmvideo: check frame dimensions
 | 
			
		||||
- jvdec: check frame dimensions
 | 
			
		||||
- avcodec/indeo3: ensure offsets are non negative
 | 
			
		||||
- avcodec/h264: Check *log2_weight_denom
 | 
			
		||||
- avcodec/hevc_ps: Check diff_cu_qp_delta_depth
 | 
			
		||||
- avcodec/h264: Clear delayed_pic on deallocation
 | 
			
		||||
- avcodec/hevc: clear filter_slice_edges() on allocation
 | 
			
		||||
- avcodec/dcadec: Check that the added xch channel isnt already there
 | 
			
		||||
- avcodec/indeo3: use signed variables to avoid underflow
 | 
			
		||||
- swscale: increase yuv2rgb table headroom
 | 
			
		||||
- avformat/mov: fix integer overflow of size
 | 
			
		||||
- avformat/mov: check atom nesting depth
 | 
			
		||||
- avcodec/utvideodec: Fix handling of slice_height=0
 | 
			
		||||
- avcodec/vmdvideo: Check len before using it in method 3
 | 
			
		||||
- avformat/flvdec: Use av_freep() avoid leaving stale pointers in memory
 | 
			
		||||
- avformat/hdsenc: Use av_freep() avoid leaving stale pointers in memory
 | 
			
		||||
- configure: create the tests directory like the doc directory
 | 
			
		||||
- v4l2: Make use of the VIDIOC_ENUM_FRAMESIZES ioctl on OpenBSD
 | 
			
		||||
- avcodec/motion_est: use 2x8x8 for interlaced qpel
 | 
			
		||||
- Treat all '*.pnm' files as non-text file
 | 
			
		||||
 | 
			
		||||
version 2.4.4:
 | 
			
		||||
- avformat: replace some odd 30-60 rates by higher less odd ones in  get_std_framerate()
 | 
			
		||||
- swscale: fix yuv2yuvX_8 assembly on x86
 | 
			
		||||
- avcodec/hevc_ps: Check num_long_term_ref_pics_sps
 | 
			
		||||
- avcodec/mjpegdec: Fix integer overflow in shift
 | 
			
		||||
- avcodec/hevc_ps: Check return code from pps_range_extensions()
 | 
			
		||||
- avcodec/rawdec: Check the return code of avpicture_get_size()
 | 
			
		||||
- avcodec/pngdec: Check IHDR/IDAT order
 | 
			
		||||
- avcodec/flacdec: Call ff_flacdsp_init() unconditionally
 | 
			
		||||
- avcodec/utils: Check that the data is complete in avpriv_bprint_to_extradata()
 | 
			
		||||
- avcodec/mjpegdec: Fix context fields becoming inconsistent
 | 
			
		||||
- avcodec/mjpegdec: Check for pixfmtid 0x42111100 || 0x24111100 with more than 8 bits
 | 
			
		||||
- swscale/x86/rgb2rgb_template: handle the first 2 lines with C in rgb24toyv12_*()
 | 
			
		||||
- doc/APIchanges: Fix some wrong versions
 | 
			
		||||
- avformat/hlsenc: Free context after hls_append_segment
 | 
			
		||||
- avcodec/mpeg4video_parser: fix spurious extradata parse warnings
 | 
			
		||||
- lavu/opt: fix av_opt_get function
 | 
			
		||||
- avcodec/wmaprodec: Fix integer overflow in sfb_offsets initialization
 | 
			
		||||
- avcodec/utvideodec: fix assumtation that slice_height >= 1
 | 
			
		||||
- avcodec/options_table fix min of audio channels and sample rate
 | 
			
		||||
- libavutil/thread.h: Support OS/2 threads
 | 
			
		||||
- fix Makefile objects for pulseaudio support
 | 
			
		||||
- opusdec: make sure all substreams have the same number of coded samples
 | 
			
		||||
- lavu: add wrappers for the pthreads mutex API
 | 
			
		||||
- avformat/avidec: fix handling dv in avi
 | 
			
		||||
- avfilter/vf_lut: gammaval709()
 | 
			
		||||
- cinedec: report white balance gain coefficients using metadata
 | 
			
		||||
- swscale/utils: support bayer input + scaling, and bayer input + any supported output
 | 
			
		||||
- swscale: support internal scaler cascades
 | 
			
		||||
- avformat/dtsdec: dts_probe: check reserved bit, check lfe, check sr_code similarity
 | 
			
		||||
- avformat/segment: export inner muxer timebase
 | 
			
		||||
- Remove fminf() emulation, fix build issues
 | 
			
		||||
- avcodec/mpegaudio_parser: fix off by 1 error in bitrate calculation
 | 
			
		||||
- Use -fno-optimize-sibling-calls on parisc also for gcc 4.9.
 | 
			
		||||
- ffmpeg_opt: store canvas size in decoder context
 | 
			
		||||
- avcodec/mpeg12dec: do not trust AVCodecContext input dimensions
 | 
			
		||||
 | 
			
		||||
version 2.4.3:
 | 
			
		||||
- avcodec/svq1dec: zero terminate embedded message before printing
 | 
			
		||||
- avcodec/cook: check that the subpacket sizes fit in block_align
 | 
			
		||||
- avcodec/g2meet: check tile dimensions to avoid integer overflow
 | 
			
		||||
- avcodec/utils: Align dimensions by at least their chroma sub-sampling factors.
 | 
			
		||||
- avcodec/dnxhddec: treat pix_fmt like width/height
 | 
			
		||||
- avcodec/dxa: check dimensions
 | 
			
		||||
- avcodec/dirac_arith: fix integer overflow
 | 
			
		||||
- avcodec/diracdec: Tighter checks on CODEBLOCKS_X/Y
 | 
			
		||||
- avcodec/diracdec: Use 64bit in calculation of codeblock coordinates
 | 
			
		||||
- avcodec/sgidec: fix count check
 | 
			
		||||
- avcodec/sgidec: fix linesize for 16bit
 | 
			
		||||
- avcodec/hevc_ps: Check default display window bitstream and skip if invalid
 | 
			
		||||
- avcodec/tiffenc: properly compute packet size
 | 
			
		||||
- lavd: export all symbols with av_ prefix
 | 
			
		||||
- avformat/mxfdec: Fix termination of mxf_data_essence_container_uls
 | 
			
		||||
- postproc: fix qp count
 | 
			
		||||
- postproc/postprocess: fix quant store for fq mode
 | 
			
		||||
- vf_drawtext: add missing clear of pointers after av_expr_free()
 | 
			
		||||
- utvideoenc: properly set slice height/last line
 | 
			
		||||
- swresample: fix sample drop loop end condition
 | 
			
		||||
- resample: Avoid off-by-1 errors in PTS calcs.
 | 
			
		||||
- imc: fix order of operations in coefficients read
 | 
			
		||||
- hevc_mvs: make sure to always initialize the temporal MV fully
 | 
			
		||||
- hevc_mvs: initialize the temporal MV in case of missing reference
 | 
			
		||||
 | 
			
		||||
version 2.4.2:
 | 
			
		||||
- avcodec/on2avc: Check number of channels
 | 
			
		||||
- avcodec/hevc: fix chroma transform_add size
 | 
			
		||||
- avcodec/h264: Check mode before considering mixed mode intra prediction
 | 
			
		||||
- avformat/mpegts: use a padded buffer in read_sl_header()
 | 
			
		||||
- avformat/mpegts: Check desc_len / get8() return code
 | 
			
		||||
- avcodec/vorbisdec: Fix off by 1 error in ptns_to_read
 | 
			
		||||
- sdp: add support for H.261
 | 
			
		||||
- avcodec/svq3: Do not memcpy AVFrame
 | 
			
		||||
- avcodec/smc: fix off by 1 error
 | 
			
		||||
- avcodec/qpeg: fix off by 1 error in MV bounds check
 | 
			
		||||
- avcodec/gifdec: factorize interleave end handling out
 | 
			
		||||
- avcodec/cinepak: fix integer underflow
 | 
			
		||||
- avcodec/pngdec: Check bits per pixel before setting monoblack pixel format
 | 
			
		||||
- avcodec/pngdec: Calculate MPNG bytewidth more defensively
 | 
			
		||||
- avcodec/tiff: more completely check bpp/bppcount
 | 
			
		||||
- avcodec/mmvideo: Bounds check 2nd line of HHV Intra blocks
 | 
			
		||||
- avcodec/h263dec: Fix decoding messenger.h263
 | 
			
		||||
- avcodec/utils: Add case for jv to avcodec_align_dimensions2()
 | 
			
		||||
- avcodec/mjpegdec: check bits per pixel for changes similar to dimensions
 | 
			
		||||
- avcodec/jpeglsdec: Check run value more completely in ls_decode_line()
 | 
			
		||||
- avformat/hlsenc: export inner muxer timebase
 | 
			
		||||
- configure: add noexecstack to linker options if supported.
 | 
			
		||||
- avcodec/ac3enc_template: fix out of array read
 | 
			
		||||
- avutil/x86/cpu: fix cpuid sub-leaf selection
 | 
			
		||||
- avformat/img2dec: enable generic seeking for image pipes
 | 
			
		||||
- avformat/img2dec: initialize pkt->pos for image pipes
 | 
			
		||||
- avformat/img2dec: pass error code and signal EOF
 | 
			
		||||
- avformat/img2dec: fix error code at EOF for pipes
 | 
			
		||||
- libavutil/opt: fix av_opt_set_channel_layout() to access correct memory address
 | 
			
		||||
- tests/fate-run.sh: Cat .err file in case of error with V>0
 | 
			
		||||
- avformat/riffenc: Filter out "BottomUp" in ff_put_bmp_header()
 | 
			
		||||
- avcodec/webp: fix default palette color 0xff000000 -> 0x00000000
 | 
			
		||||
- avcodec/asvenc: fix AAN scaling
 | 
			
		||||
- Fix compile error on arm4/arm5 platform
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
version 2.4.1:
 | 
			
		||||
- swscale: Allow chroma samples to be above and to the left of luma samples
 | 
			
		||||
- avcodec/libilbc: support for latest git of libilbc
 | 
			
		||||
- avcodec/webp: treat out-of-bound palette index as translucent black
 | 
			
		||||
- vf_deshake: rename Transform.vector to Transform.vec to avoid compiler confusion
 | 
			
		||||
- apetag: Fix APE tag size check
 | 
			
		||||
- tools/crypto_bench: fix build when AV_READ_TIME is unavailable
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
version 2.4:
 | 
			
		||||
- Icecast protocol
 | 
			
		||||
- ported lenscorrection filter from frei0r filter
 | 
			
		||||
- large optimizations in dctdnoiz to make it usable
 | 
			
		||||
- ICY metadata are now requested by default with the HTTP protocol
 | 
			
		||||
- support for using metadata in stream specifiers in fftools
 | 
			
		||||
- LZMA compression support in TIFF decoder
 | 
			
		||||
- support for H.261 RTP payload format (RFC 4587)
 | 
			
		||||
- HEVC/H.265 RTP payload format (draft v6) depacketizer
 | 
			
		||||
- added codecview filter to visualize information exported by some codecs
 | 
			
		||||
- Matroska 3D support thorugh side data
 | 
			
		||||
- HTML generation using texi2html is deprecated in favor of makeinfo/texi2any
 | 
			
		||||
- silenceremove filter
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
version 2.3:
 | 
			
		||||
- AC3 fixed-point decoding
 | 
			
		||||
- shuffleplanes filter
 | 
			
		||||
- subfile protocol
 | 
			
		||||
- Phantom Cine demuxer
 | 
			
		||||
- replaygain data export
 | 
			
		||||
- VP7 video decoder
 | 
			
		||||
- Alias PIX image encoder and decoder
 | 
			
		||||
- Improvements to the BRender PIX image decoder
 | 
			
		||||
- Improvements to the XBM decoder
 | 
			
		||||
- QTKit input device
 | 
			
		||||
- improvements to OpenEXR image decoder
 | 
			
		||||
- support decoding 16-bit RLE SGI images
 | 
			
		||||
- GDI screen grabbing for Windows
 | 
			
		||||
- alternative rendition support for HTTP Live Streaming
 | 
			
		||||
- AVFoundation input device
 | 
			
		||||
- Direct Stream Digital (DSD) decoder
 | 
			
		||||
- Magic Lantern Video (MLV) demuxer
 | 
			
		||||
- On2 AVC (Audio for Video) decoder
 | 
			
		||||
- support for decoding through DXVA2 in ffmpeg
 | 
			
		||||
- libbs2b-based stereo-to-binaural audio filter
 | 
			
		||||
- libx264 reference frames count limiting depending on level
 | 
			
		||||
- native Opus decoder
 | 
			
		||||
- display matrix export and rotation API
 | 
			
		||||
- WebVTT encoder
 | 
			
		||||
- showcqt multimedia filter
 | 
			
		||||
- zoompan filter
 | 
			
		||||
- signalstats filter
 | 
			
		||||
- hqx filter (hq2x, hq3x, hq4x)
 | 
			
		||||
- flanger filter
 | 
			
		||||
- Image format auto-detection
 | 
			
		||||
- LRC demuxer and muxer
 | 
			
		||||
- Samba protocol (via libsmbclient)
 | 
			
		||||
- WebM DASH Manifest muxer
 | 
			
		||||
- libfribidi support in drawtext
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
version 2.2:
 | 
			
		||||
 | 
			
		||||
- HNM version 4 demuxer and video decoder
 | 
			
		||||
@@ -351,7 +29,6 @@ version 2.2:
 | 
			
		||||
- libx265 encoder
 | 
			
		||||
- dejudder filter
 | 
			
		||||
- Autodetect VDA like all other hardware accelerations
 | 
			
		||||
- aliases and defaults for Ogg subtypes (opus, spx)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
version 2.1:
 | 
			
		||||
@@ -539,7 +216,7 @@ version 1.1:
 | 
			
		||||
- JSON captions for TED talks decoding support
 | 
			
		||||
- SOX Resampler support in libswresample
 | 
			
		||||
- aselect filter
 | 
			
		||||
- SGI RLE 8-bit / Silicon Graphics RLE 8-bit video decoder
 | 
			
		||||
- SGI RLE 8-bit decoder
 | 
			
		||||
- Silicon Graphics Motion Video Compressor 1 & 2 decoder
 | 
			
		||||
- Silicon Graphics Movie demuxer
 | 
			
		||||
- apad filter
 | 
			
		||||
@@ -583,9 +260,7 @@ version 1.0:
 | 
			
		||||
- RTMPE protocol support
 | 
			
		||||
- RTMPTE protocol support
 | 
			
		||||
- showwaves and showspectrum filter
 | 
			
		||||
- LucasArts SMUSH SANM playback support
 | 
			
		||||
- LucasArts SMUSH VIMA audio decoder (ADPCM)
 | 
			
		||||
- LucasArts SMUSH demuxer
 | 
			
		||||
- LucasArts SMUSH playback support
 | 
			
		||||
- SAMI, RealText and SubViewer demuxers and decoders
 | 
			
		||||
- Heart Of Darkness PAF playback support
 | 
			
		||||
- iec61883 device
 | 
			
		||||
@@ -709,7 +384,6 @@ version 0.10:
 | 
			
		||||
- ffwavesynth decoder
 | 
			
		||||
- aviocat tool
 | 
			
		||||
- ffeval tool
 | 
			
		||||
- support encoding and decoding 4-channel SGI images
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
version 0.9:
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										15
									
								
								INSTALL
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								INSTALL
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,15 @@
 | 
			
		||||
 | 
			
		||||
1) Type './configure' to create the configuration. A list of configure
 | 
			
		||||
options is printed by running 'configure --help'.
 | 
			
		||||
 | 
			
		||||
'configure' can be launched from a directory different from the FFmpeg
 | 
			
		||||
sources to build the objects out of tree. To do this, use an absolute
 | 
			
		||||
path when launching 'configure', e.g. '/ffmpegdir/ffmpeg/configure'.
 | 
			
		||||
 | 
			
		||||
2) Then type 'make' to build FFmpeg. GNU Make 3.81 or later is required.
 | 
			
		||||
 | 
			
		||||
3) Type 'make install' to install all binaries and libraries you built.
 | 
			
		||||
 | 
			
		||||
NOTICE
 | 
			
		||||
 | 
			
		||||
 - Non system dependencies (e.g. libx264, libvpx) are disabled by default.
 | 
			
		||||
							
								
								
									
										17
									
								
								INSTALL.md
									
									
									
									
									
								
							
							
						
						
									
										17
									
								
								INSTALL.md
									
									
									
									
									
								
							@@ -1,17 +0,0 @@
 | 
			
		||||
#Installing FFmpeg:
 | 
			
		||||
 | 
			
		||||
1. Type `./configure` to create the configuration. A list of configure
 | 
			
		||||
options is printed by running `configure --help`.
 | 
			
		||||
 | 
			
		||||
    `configure` can be launched from a directory different from the FFmpeg
 | 
			
		||||
sources to build the objects out of tree. To do this, use an absolute
 | 
			
		||||
path when launching `configure`, e.g. `/ffmpegdir/ffmpeg/configure`.
 | 
			
		||||
 | 
			
		||||
2. Then type `make` to build FFmpeg. GNU Make 3.81 or later is required.
 | 
			
		||||
 | 
			
		||||
3. Type `make install` to install all binaries and libraries you built.
 | 
			
		||||
 | 
			
		||||
NOTICE
 | 
			
		||||
------
 | 
			
		||||
 | 
			
		||||
 - Non system dependencies (e.g. libx264, libvpx) are disabled by default.
 | 
			
		||||
@@ -1,4 +1,4 @@
 | 
			
		||||
#FFmpeg:
 | 
			
		||||
FFmpeg:
 | 
			
		||||
 | 
			
		||||
Most files in FFmpeg are under the GNU Lesser General Public License version 2.1
 | 
			
		||||
or later (LGPL v2.1+). Read the file COPYING.LGPLv2.1 for details. Some other
 | 
			
		||||
@@ -10,12 +10,11 @@ version 2 or later (GPL v2+). See the file COPYING.GPLv2 for details. None of
 | 
			
		||||
these parts are used by default, you have to explicitly pass --enable-gpl to
 | 
			
		||||
configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
 | 
			
		||||
 | 
			
		||||
Specifically, the GPL parts of FFmpeg are:
 | 
			
		||||
Specifically, the GPL parts of FFmpeg are
 | 
			
		||||
 | 
			
		||||
- libpostproc
 | 
			
		||||
- libmpcodecs
 | 
			
		||||
- optional x86 optimizations in the files
 | 
			
		||||
  libavcodec/x86/flac_dsp_gpl.asm
 | 
			
		||||
  libavcodec/x86/idct_mmx.c
 | 
			
		||||
- libutvideo encoding/decoding wrappers in
 | 
			
		||||
  libavcodec/libutvideo*.cpp
 | 
			
		||||
@@ -34,7 +33,6 @@ Specifically, the GPL parts of FFmpeg are:
 | 
			
		||||
    - vf_geq.c
 | 
			
		||||
    - vf_histeq.c
 | 
			
		||||
    - vf_hqdn3d.c
 | 
			
		||||
    - vf_interlace.c
 | 
			
		||||
    - vf_kerndeint.c
 | 
			
		||||
    - vf_mcdeint.c
 | 
			
		||||
    - vf_mp.c
 | 
			
		||||
							
								
								
									
										61
									
								
								MAINTAINERS
									
									
									
									
									
								
							
							
						
						
									
										61
									
								
								MAINTAINERS
									
									
									
									
									
								
							@@ -44,8 +44,8 @@ Miscellaneous Areas
 | 
			
		||||
===================
 | 
			
		||||
 | 
			
		||||
documentation                           Stefano Sabatini, Mike Melanson, Timothy Gu
 | 
			
		||||
build system (configure, makefiles)     Diego Biurrun, Mans Rullgard
 | 
			
		||||
project server                          Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser
 | 
			
		||||
build system (configure,Makefiles)      Diego Biurrun, Mans Rullgard
 | 
			
		||||
project server                          Árpád Gereöffy, Michael Niedermayer, Reimar Döffinger, Alexander Strasser
 | 
			
		||||
presets                                 Robert Swain
 | 
			
		||||
metadata subsystem                      Aurelien Jacobs
 | 
			
		||||
release management                      Michael Niedermayer
 | 
			
		||||
@@ -54,10 +54,8 @@ release management                      Michael Niedermayer
 | 
			
		||||
Communication
 | 
			
		||||
=============
 | 
			
		||||
 | 
			
		||||
website                                 Deby Barbara Lepage
 | 
			
		||||
fate.ffmpeg.org                         Timothy Gu
 | 
			
		||||
Trac bug tracker                        Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos, Lou Logan
 | 
			
		||||
mailing lists                           Michael Niedermayer, Baptiste Coudurier, Lou Logan
 | 
			
		||||
website                                 Robert Swain, Lou Logan
 | 
			
		||||
mailinglists                            Michael Niedermayer, Baptiste Coudurier, Lou Logan
 | 
			
		||||
Google+                                 Paul B Mahol, Michael Niedermayer, Alexander Strasser
 | 
			
		||||
Twitter                                 Lou Logan
 | 
			
		||||
Launchpad                               Timothy Gu
 | 
			
		||||
@@ -75,7 +73,6 @@ Other:
 | 
			
		||||
  bprint                                Nicolas George
 | 
			
		||||
  bswap.h
 | 
			
		||||
  des                                   Reimar Doeffinger
 | 
			
		||||
  dynarray.h                            Nicolas George
 | 
			
		||||
  eval.c, eval.h                        Michael Niedermayer
 | 
			
		||||
  float_dsp                             Loren Merritt
 | 
			
		||||
  hash                                  Reimar Doeffinger
 | 
			
		||||
@@ -132,7 +129,6 @@ Generic Parts:
 | 
			
		||||
    tableprint.c, tableprint.h          Reimar Doeffinger
 | 
			
		||||
  fixed point FFT:
 | 
			
		||||
    fft*                                Zeljko Lukac
 | 
			
		||||
  Text Subtitles                        Clément Bœsch
 | 
			
		||||
 | 
			
		||||
Codecs:
 | 
			
		||||
  4xm.c                                 Michael Niedermayer
 | 
			
		||||
@@ -166,13 +162,11 @@ Codecs:
 | 
			
		||||
  dnxhd*                                Baptiste Coudurier
 | 
			
		||||
  dpcm.c                                Mike Melanson
 | 
			
		||||
  dv.c                                  Roman Shaposhnik
 | 
			
		||||
  dvbsubdec.c                           Anshul Maheshwari
 | 
			
		||||
  dxa.c                                 Kostya Shishkov
 | 
			
		||||
  eacmv*, eaidct*, eat*                 Peter Ross
 | 
			
		||||
  exif.c, exif.h                        Thilo Borgmann
 | 
			
		||||
  ffv1*                                 Michael Niedermayer
 | 
			
		||||
  ffv1.c                                Michael Niedermayer
 | 
			
		||||
  ffwavesynth.c                         Nicolas George
 | 
			
		||||
  fic.c                                 Derek Buitenhuis
 | 
			
		||||
  flac*                                 Justin Ruggles
 | 
			
		||||
  flashsv*                              Benjamin Larsson
 | 
			
		||||
  flicvideo.c                           Mike Melanson
 | 
			
		||||
@@ -182,7 +176,7 @@ Codecs:
 | 
			
		||||
  h261*                                 Michael Niedermayer
 | 
			
		||||
  h263*                                 Michael Niedermayer
 | 
			
		||||
  h264*                                 Loren Merritt, Michael Niedermayer
 | 
			
		||||
  huffyuv*                              Michael Niedermayer, Christophe Gisquet
 | 
			
		||||
  huffyuv.c                             Michael Niedermayer
 | 
			
		||||
  idcinvideo.c                          Mike Melanson
 | 
			
		||||
  imc*                                  Benjamin Larsson
 | 
			
		||||
  indeo2*                               Kostya Shishkov
 | 
			
		||||
@@ -242,12 +236,12 @@ Codecs:
 | 
			
		||||
  rtjpeg.c, rtjpeg.h                    Reimar Doeffinger
 | 
			
		||||
  rv10.c                                Michael Niedermayer
 | 
			
		||||
  rv3*                                  Kostya Shishkov
 | 
			
		||||
  rv4*                                  Kostya Shishkov, Christophe Gisquet
 | 
			
		||||
  rv4*                                  Kostya Shishkov
 | 
			
		||||
  s3tc*                                 Ivo van Poorten
 | 
			
		||||
  smacker.c                             Kostya Shishkov
 | 
			
		||||
  smc.c                                 Mike Melanson
 | 
			
		||||
  smvjpegdec.c                          Ash Hughes
 | 
			
		||||
  snow*                                 Michael Niedermayer, Loren Merritt
 | 
			
		||||
  snow.c                                Michael Niedermayer, Loren Merritt
 | 
			
		||||
  sonic.c                               Alex Beregszaszi
 | 
			
		||||
  srt*                                  Aurelien Jacobs
 | 
			
		||||
  sunrast.c                             Ivo van Poorten
 | 
			
		||||
@@ -266,13 +260,13 @@ Codecs:
 | 
			
		||||
  v410*.c                               Derek Buitenhuis
 | 
			
		||||
  vb.c                                  Kostya Shishkov
 | 
			
		||||
  vble.c                                Derek Buitenhuis
 | 
			
		||||
  vc1*                                  Kostya Shishkov, Christophe Gisquet
 | 
			
		||||
  vc1*                                  Kostya Shishkov
 | 
			
		||||
  vcr1.c                                Michael Niedermayer
 | 
			
		||||
  vda_h264_dec.c                        Xidorn Quan
 | 
			
		||||
  vima.c                                Paul B Mahol
 | 
			
		||||
  vmnc.c                                Kostya Shishkov
 | 
			
		||||
  vorbisdec.c                           Denes Balatoni, David Conrad
 | 
			
		||||
  vorbisenc.c                           Oded Shimon
 | 
			
		||||
  vorbis_dec.c                          Denes Balatoni, David Conrad
 | 
			
		||||
  vorbis_enc.c                          Oded Shimon
 | 
			
		||||
  vp3*                                  Mike Melanson
 | 
			
		||||
  vp5                                   Aurelien Jacobs
 | 
			
		||||
  vp6                                   Aurelien Jacobs
 | 
			
		||||
@@ -308,20 +302,16 @@ libavdevice
 | 
			
		||||
    libavdevice/avdevice.h
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  avfoundation.m                        Thilo Borgmann
 | 
			
		||||
  dshow.c                               Roger Pack (CC rogerdpack@gmail.com)
 | 
			
		||||
  dshow.c                               Roger Pack
 | 
			
		||||
  fbdev_enc.c                           Lukasz Marek
 | 
			
		||||
  gdigrab.c                             Roger Pack (CC rogerdpack@gmail.com)
 | 
			
		||||
  iec61883.c                            Georg Lippitsch
 | 
			
		||||
  lavfi                                 Stefano Sabatini
 | 
			
		||||
  libdc1394.c                           Roman Shaposhnik
 | 
			
		||||
  opengl_enc.c                          Lukasz Marek
 | 
			
		||||
  pulse_audio_enc.c                     Lukasz Marek
 | 
			
		||||
  qtkit.m                               Thilo Borgmann
 | 
			
		||||
  sdl                                   Stefano Sabatini
 | 
			
		||||
  v4l2.c                                Giorgio Vazzana
 | 
			
		||||
  v4l2.c                                Luca Abeni
 | 
			
		||||
  vfwcap.c                              Ramiro Polla
 | 
			
		||||
  xv.c                                  Lukasz Marek
 | 
			
		||||
 | 
			
		||||
libavfilter
 | 
			
		||||
===========
 | 
			
		||||
@@ -343,9 +333,7 @@ Filters:
 | 
			
		||||
  af_compand.c                          Paul B Mahol
 | 
			
		||||
  af_ladspa.c                           Paul B Mahol
 | 
			
		||||
  af_pan.c                              Nicolas George
 | 
			
		||||
  af_silenceremove.c                    Paul B Mahol
 | 
			
		||||
  avf_avectorscope.c                    Paul B Mahol
 | 
			
		||||
  avf_showcqt.c                         Muhammad Faiz
 | 
			
		||||
  vf_blend.c                            Paul B Mahol
 | 
			
		||||
  vf_colorbalance.c                     Paul B Mahol
 | 
			
		||||
  vf_dejudder.c                         Nicholas Robbins
 | 
			
		||||
@@ -353,10 +341,7 @@ Filters:
 | 
			
		||||
  vf_drawbox.c/drawgrid                 Andrey Utkin
 | 
			
		||||
  vf_extractplanes.c                    Paul B Mahol
 | 
			
		||||
  vf_histogram.c                        Paul B Mahol
 | 
			
		||||
  vf_hqx.c                              Clément Bœsch
 | 
			
		||||
  vf_idet.c                             Pascal Massimino
 | 
			
		||||
  vf_il.c                               Paul B Mahol
 | 
			
		||||
  vf_lenscorrection.c                   Daniel Oberhoff
 | 
			
		||||
  vf_mergeplanes.c                      Paul B Mahol
 | 
			
		||||
  vf_psnr.c                             Paul B Mahol
 | 
			
		||||
  vf_scale.c                            Michael Niedermayer
 | 
			
		||||
@@ -426,7 +411,6 @@ Muxers/Demuxers:
 | 
			
		||||
  matroska.c                            Aurelien Jacobs
 | 
			
		||||
  matroskadec.c                         Aurelien Jacobs
 | 
			
		||||
  matroskaenc.c                         David Conrad
 | 
			
		||||
  matroska subtitles (matroskaenc.c)    John Peebles
 | 
			
		||||
  metadata*                             Aurelien Jacobs
 | 
			
		||||
  mgsts.c                               Paul B Mahol
 | 
			
		||||
  microdvd*                             Aurelien Jacobs
 | 
			
		||||
@@ -436,15 +420,14 @@ Muxers/Demuxers:
 | 
			
		||||
  mpc.c                                 Kostya Shishkov
 | 
			
		||||
  mpeg.c                                Michael Niedermayer
 | 
			
		||||
  mpegenc.c                             Michael Niedermayer
 | 
			
		||||
  mpegts.c                              Marton Balint
 | 
			
		||||
  mpegtsenc.c                           Baptiste Coudurier
 | 
			
		||||
  mpegts*                               Baptiste Coudurier
 | 
			
		||||
  msnwc_tcp.c                           Ramiro Polla
 | 
			
		||||
  mtv.c                                 Reynaldo H. Verdejo Pinochet
 | 
			
		||||
  mxf*                                  Baptiste Coudurier
 | 
			
		||||
  mxfdec.c                              Tomas Härdin
 | 
			
		||||
  nistspheredec.c                       Paul B Mahol
 | 
			
		||||
  nsvdec.c                              Francois Revol
 | 
			
		||||
  nut*                                  Michael Niedermayer
 | 
			
		||||
  nut.c                                 Michael Niedermayer
 | 
			
		||||
  nuv.c                                 Reimar Doeffinger
 | 
			
		||||
  oggdec.c, oggdec.h                    David Conrad
 | 
			
		||||
  oggenc.c                              Baptiste Coudurier
 | 
			
		||||
@@ -461,15 +444,12 @@ Muxers/Demuxers:
 | 
			
		||||
  rmdec.c, rmenc.c                      Ronald S. Bultje, Kostya Shishkov
 | 
			
		||||
  rtmp*                                 Kostya Shishkov
 | 
			
		||||
  rtp.c, rtpenc.c                       Martin Storsjo
 | 
			
		||||
  rtpdec_h261.*, rtpenc_h261.*          Thomas Volkert
 | 
			
		||||
  rtpdec_hevc.*                         Thomas Volkert
 | 
			
		||||
  rtpdec_asf.*                          Ronald S. Bultje
 | 
			
		||||
  rtpenc_mpv.*, rtpenc_aac.*            Martin Storsjo
 | 
			
		||||
  rtsp.c                                Luca Barbato
 | 
			
		||||
  sbgdec.c                              Nicolas George
 | 
			
		||||
  sdp.c                                 Martin Storsjo
 | 
			
		||||
  segafilm.c                            Mike Melanson
 | 
			
		||||
  segment.c                             Stefano Sabatini
 | 
			
		||||
  siff.c                                Kostya Shishkov
 | 
			
		||||
  smacker.c                             Kostya Shishkov
 | 
			
		||||
  smjpeg*                               Paul B Mahol
 | 
			
		||||
@@ -482,7 +462,6 @@ Muxers/Demuxers:
 | 
			
		||||
  voc.c                                 Aurelien Jacobs
 | 
			
		||||
  wav.c                                 Michael Niedermayer
 | 
			
		||||
  wc3movie.c                            Mike Melanson
 | 
			
		||||
  webm dash (matroskaenc.c)             Vignesh Venkatasubramanian
 | 
			
		||||
  webvtt*                               Matthew J Heaney
 | 
			
		||||
  westwood.c                            Mike Melanson
 | 
			
		||||
  wtv.c                                 Peter Ross
 | 
			
		||||
@@ -524,8 +503,6 @@ Amiga / PowerPC                         Colin Ward
 | 
			
		||||
Linux / PowerPC                         Luca Barbato
 | 
			
		||||
Windows MinGW                           Alex Beregszaszi, Ramiro Polla
 | 
			
		||||
Windows Cygwin                          Victor Paesa
 | 
			
		||||
Windows MSVC                            Matthew Oliver
 | 
			
		||||
Windows ICL                             Matthew Oliver
 | 
			
		||||
ADI/Blackfin DSP                        Marc Hoffman
 | 
			
		||||
Sparc                                   Roman Shaposhnik
 | 
			
		||||
x86                                     Michael Niedermayer
 | 
			
		||||
@@ -534,8 +511,8 @@ x86                                     Michael Niedermayer
 | 
			
		||||
Releases
 | 
			
		||||
========
 | 
			
		||||
 | 
			
		||||
2.4                                     Michael Niedermayer
 | 
			
		||||
2.2                                     Michael Niedermayer
 | 
			
		||||
2.1                                     Michael Niedermayer
 | 
			
		||||
1.2                                     Michael Niedermayer
 | 
			
		||||
 | 
			
		||||
If you want to maintain an older release, please contact us
 | 
			
		||||
@@ -552,7 +529,7 @@ Attila Kinali                 11F0 F9A6 A1D2 11F6 C745 D10C 6520 BCDD F2DF E765
 | 
			
		||||
Baptiste Coudurier            8D77 134D 20CC 9220 201F C5DB 0AC9 325C 5C1A BAAA
 | 
			
		||||
Ben Littler                   3EE3 3723 E560 3214 A8CD 4DEB 2CDB FCE7 768C 8D2C
 | 
			
		||||
Benoit Fouet                  B22A 4F4F 43EF 636B BB66 FCDC 0023 AE1E 2985 49C8
 | 
			
		||||
Clément Bœsch                 52D0 3A82 D445 F194 DB8B 2B16 87EE 2CB8 F4B8 FCF9
 | 
			
		||||
Bœsch Clément                 52D0 3A82 D445 F194 DB8B 2B16 87EE 2CB8 F4B8 FCF9
 | 
			
		||||
Daniel Verkamp                78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7
 | 
			
		||||
Diego Biurrun                 8227 1E31 B6D9 4994 7427 E220 9CAE D6CC 4757 FCC5
 | 
			
		||||
FFmpeg release signing key    FCF9 86EA 15E6 E293 A564 4F10 B432 2F04 D676 58D8
 | 
			
		||||
@@ -567,14 +544,12 @@ Michael Niedermayer           9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
 | 
			
		||||
Nicolas George                24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
 | 
			
		||||
Panagiotis Issaris            6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
 | 
			
		||||
Peter Ross                    A907 E02F A6E5 0CD2 34CD 20D2 6760 79C5 AC40 DD6B
 | 
			
		||||
Reimar Doeffinger             C61D 16E5 9E2C D10C 8958 38A4 0899 A2B9 06D4 D9C7
 | 
			
		||||
Reimar Döffinger              C61D 16E5 9E2C D10C 8958 38A4 0899 A2B9 06D4 D9C7
 | 
			
		||||
Reinhard Tartler              9300 5DC2 7E87 6C37 ED7B CA9A 9808 3544 9453 48A4
 | 
			
		||||
Reynaldo H. Verdejo Pinochet  6E27 CD34 170C C78E 4D4F 5F40 C18E 077F 3114 452A
 | 
			
		||||
Robert Swain                  EE7A 56EA 4A81 A7B5 2001 A521 67FA 362D A2FC 3E71
 | 
			
		||||
Sascha Sommer                 38A0 F88B 868E 9D3A 97D4 D6A0 E823 706F 1E07 0D3C
 | 
			
		||||
Stefano Sabatini              0D0B AD6B 5330 BBAD D3D6 6A0C 719C 2839 FC43 2D5F
 | 
			
		||||
Stephan Hilb                  4F38 0B3A 5F39 B99B F505 E562 8D5C 5554 4E17 8863
 | 
			
		||||
Tiancheng "Timothy" Gu        9456 AFC0 814A 8139 E994 8351 7FE6 B095 B582 B0D4
 | 
			
		||||
Tim Nicholson                 38CF DB09 3ED0 F607 8B67 6CED 0C0B FC44 8B0B FC83
 | 
			
		||||
Tomas Härdin                  A79D 4E3D F38F 763F 91F5 8B33 A01E 8AE0 41BB 2551
 | 
			
		||||
Wei Gao                       4269 7741 857A 0E60 9EC5 08D2 4744 4EFA 62C1 87B9
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										31
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										31
									
								
								Makefile
									
									
									
									
									
								
							@@ -4,7 +4,6 @@ include config.mak
 | 
			
		||||
vpath %.c    $(SRC_PATH)
 | 
			
		||||
vpath %.cpp  $(SRC_PATH)
 | 
			
		||||
vpath %.h    $(SRC_PATH)
 | 
			
		||||
vpath %.m    $(SRC_PATH)
 | 
			
		||||
vpath %.S    $(SRC_PATH)
 | 
			
		||||
vpath %.asm  $(SRC_PATH)
 | 
			
		||||
vpath %.rc   $(SRC_PATH)
 | 
			
		||||
@@ -30,23 +29,19 @@ $(foreach prog,$(AVBASENAMES),$(eval OBJS-$(prog)-$(CONFIG_OPENCL) += cmdutils_o
 | 
			
		||||
 | 
			
		||||
OBJS-ffmpeg                   += ffmpeg_opt.o ffmpeg_filter.o
 | 
			
		||||
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
 | 
			
		||||
OBJS-ffmpeg-$(HAVE_DXVA2_LIB) += ffmpeg_dxva2.o
 | 
			
		||||
OBJS-ffmpeg-$(CONFIG_VDA)     += ffmpeg_vda.o
 | 
			
		||||
 | 
			
		||||
TESTTOOLS   = audiogen videogen rotozoom tiny_psnr tiny_ssim base64
 | 
			
		||||
HOSTPROGS  := $(TESTTOOLS:%=tests/%) doc/print_options
 | 
			
		||||
TOOLS       = qt-faststart trasher uncoded_frame
 | 
			
		||||
TOOLS-$(CONFIG_ZLIB) += cws2fws
 | 
			
		||||
 | 
			
		||||
# $(FFLIBS-yes) needs to be in linking order
 | 
			
		||||
FFLIBS-$(CONFIG_AVDEVICE)   += avdevice
 | 
			
		||||
FFLIBS-$(CONFIG_AVFILTER)   += avfilter
 | 
			
		||||
FFLIBS-$(CONFIG_AVFORMAT)   += avformat
 | 
			
		||||
FFLIBS-$(CONFIG_AVCODEC)    += avcodec
 | 
			
		||||
FFLIBS-$(CONFIG_AVDEVICE) += avdevice
 | 
			
		||||
FFLIBS-$(CONFIG_AVFILTER) += avfilter
 | 
			
		||||
FFLIBS-$(CONFIG_AVFORMAT) += avformat
 | 
			
		||||
FFLIBS-$(CONFIG_AVRESAMPLE) += avresample
 | 
			
		||||
FFLIBS-$(CONFIG_POSTPROC)   += postproc
 | 
			
		||||
FFLIBS-$(CONFIG_SWRESAMPLE) += swresample
 | 
			
		||||
FFLIBS-$(CONFIG_SWSCALE)    += swscale
 | 
			
		||||
FFLIBS-$(CONFIG_AVCODEC)  += avcodec
 | 
			
		||||
FFLIBS-$(CONFIG_POSTPROC) += postproc
 | 
			
		||||
FFLIBS-$(CONFIG_SWRESAMPLE)+= swresample
 | 
			
		||||
FFLIBS-$(CONFIG_SWSCALE)  += swscale
 | 
			
		||||
 | 
			
		||||
FFLIBS := avutil
 | 
			
		||||
 | 
			
		||||
@@ -63,7 +58,7 @@ FF_DEP_LIBS  := $(DEP_LIBS)
 | 
			
		||||
all: $(AVPROGS)
 | 
			
		||||
 | 
			
		||||
$(TOOLS): %$(EXESUF): %.o $(EXEOBJS)
 | 
			
		||||
	$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS)
 | 
			
		||||
	$(LD) $(LDFLAGS) $(LD_O) $^ $(ELIBS)
 | 
			
		||||
 | 
			
		||||
tools/cws2fws$(EXESUF): ELIBS = $(ZLIB)
 | 
			
		||||
tools/uncoded_frame$(EXESUF): $(FF_DEP_LIBS)
 | 
			
		||||
@@ -77,8 +72,9 @@ config.h: .config
 | 
			
		||||
 | 
			
		||||
SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS      \
 | 
			
		||||
               HEADERS ARCH_HEADERS BUILT_HEADERS SKIPHEADERS            \
 | 
			
		||||
               ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS     \
 | 
			
		||||
               ALTIVEC-OBJS MMX-OBJS YASM-OBJS                           \
 | 
			
		||||
               ARMV5TE-OBJS ARMV6-OBJS VFP-OBJS NEON-OBJS                \
 | 
			
		||||
               ALTIVEC-OBJS VIS-OBJS                                     \
 | 
			
		||||
               MMX-OBJS YASM-OBJS                                        \
 | 
			
		||||
               MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MIPS32R2-OBJS  \
 | 
			
		||||
               OBJS SLIBOBJS HOSTOBJS TESTOBJS
 | 
			
		||||
 | 
			
		||||
@@ -92,7 +88,6 @@ $(foreach V,$(SUBDIR_VARS),$(eval $(call RESET,$(V))))
 | 
			
		||||
SUBDIR := $(1)/
 | 
			
		||||
include $(SRC_PATH)/$(1)/Makefile
 | 
			
		||||
-include $(SRC_PATH)/$(1)/$(ARCH)/Makefile
 | 
			
		||||
-include $(SRC_PATH)/$(1)/$(INTRINSICS)/Makefile
 | 
			
		||||
include $(SRC_PATH)/library.mak
 | 
			
		||||
endef
 | 
			
		||||
 | 
			
		||||
@@ -111,14 +106,14 @@ endef
 | 
			
		||||
 | 
			
		||||
$(foreach P,$(PROGS),$(eval $(call DOPROG,$(P:$(PROGSSUF)$(EXESUF)=))))
 | 
			
		||||
 | 
			
		||||
ffprobe.o cmdutils.o libavcodec/utils.o libavformat/utils.o libavdevice/avdevice.o libavfilter/avfilter.o libavutil/utils.o libpostproc/postprocess.o libswresample/swresample.o libswscale/utils.o : libavutil/ffversion.h
 | 
			
		||||
ffprobe.o cmdutils.o : libavutil/ffversion.h
 | 
			
		||||
 | 
			
		||||
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
 | 
			
		||||
	$(CP) $< $@
 | 
			
		||||
	$(STRIP) $@
 | 
			
		||||
 | 
			
		||||
%$(PROGSSUF)_g$(EXESUF): %.o $(FF_DEP_LIBS)
 | 
			
		||||
	$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $(OBJS-$*) $(FF_EXTRALIBS)
 | 
			
		||||
	$(LD) $(LDFLAGS) $(LD_O) $(OBJS-$*) $(FF_EXTRALIBS)
 | 
			
		||||
 | 
			
		||||
OBJDIRS += tools
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										18
									
								
								README
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								README
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,18 @@
 | 
			
		||||
FFmpeg README
 | 
			
		||||
-------------
 | 
			
		||||
 | 
			
		||||
1) Documentation
 | 
			
		||||
----------------
 | 
			
		||||
 | 
			
		||||
* Read the documentation in the doc/ directory in git.
 | 
			
		||||
  You can also view it online at http://ffmpeg.org/documentation.html
 | 
			
		||||
 | 
			
		||||
2) Licensing
 | 
			
		||||
------------
 | 
			
		||||
 | 
			
		||||
* See the LICENSE file.
 | 
			
		||||
 | 
			
		||||
3) Build and Install
 | 
			
		||||
--------------------
 | 
			
		||||
 | 
			
		||||
* See the INSTALL file.
 | 
			
		||||
							
								
								
									
										40
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										40
									
								
								README.md
									
									
									
									
									
								
							@@ -1,40 +0,0 @@
 | 
			
		||||
FFmpeg README
 | 
			
		||||
=============
 | 
			
		||||
 | 
			
		||||
FFmpeg is a collection of libraries and tools to process multimedia content
 | 
			
		||||
such as audio, video, subtitles and related metadata.
 | 
			
		||||
 | 
			
		||||
## Libraries
 | 
			
		||||
 | 
			
		||||
* `libavcodec` provides implementation of a wider range of codecs.
 | 
			
		||||
* `libavformat` implements streaming protocols, container formats and basic I/O access.
 | 
			
		||||
* `libavutil` includes hashers, decompressors and miscellaneous utility functions.
 | 
			
		||||
* `libavfilter` provides a mean to alter decoded Audio and Video through chain of filters.
 | 
			
		||||
* `libavdevice` provides an abstraction to access capture and playback devices.
 | 
			
		||||
* `libswresample` implements audio mixing and resampling routines.
 | 
			
		||||
* `libswscale` implements color conversion and scaling routines.
 | 
			
		||||
 | 
			
		||||
## Tools
 | 
			
		||||
 | 
			
		||||
* [ffmpeg](http://ffmpeg.org/ffmpeg.html) is a command line toolbox to
 | 
			
		||||
  manipulate, convert and stream multimedia content.
 | 
			
		||||
* [ffplay](http://ffmpeg.org/ffplay.html) is a minimalistic multimedia player.
 | 
			
		||||
* [ffprobe](http://ffmpeg.org/ffprobe.html) is a simple analisys tool to inspect
 | 
			
		||||
  multimedia content.
 | 
			
		||||
* Additional small tools such as `aviocat`, `ismindex` and `qt-faststart`.
 | 
			
		||||
 | 
			
		||||
## Documentation
 | 
			
		||||
 | 
			
		||||
The offline documentation is available in the **doc/** directory.
 | 
			
		||||
 | 
			
		||||
The online documentation is available in the main [website](http://ffmpeg.org)
 | 
			
		||||
and in the [wiki](http://trac.ffmpeg.org).
 | 
			
		||||
 | 
			
		||||
### Examples
 | 
			
		||||
 | 
			
		||||
Conding examples are available in the **doc/example** directory.
 | 
			
		||||
 | 
			
		||||
## License
 | 
			
		||||
 | 
			
		||||
FFmpeg codebase is mainly LGPL-licensed with optional components licensed under
 | 
			
		||||
GPL. Please refer to the LICENSE file for detailed information.
 | 
			
		||||
@@ -1,83 +0,0 @@
 | 
			
		||||
 ┌────────────────────────────────────────┐
 | 
			
		||||
 │ RELEASE NOTES for FFmpeg 2.4 "Fresnel" │
 | 
			
		||||
 └────────────────────────────────────────┘
 | 
			
		||||
 | 
			
		||||
   The FFmpeg Project proudly presents FFmpeg 2.4 "Fresnel", just 2 months
 | 
			
		||||
   after the release of 2.3. Since this wasn't a long time ago, the Changelog
 | 
			
		||||
   is a bit short this time.
 | 
			
		||||
 | 
			
		||||
   The most important thing in this release is the major version bump of the
 | 
			
		||||
   libraries.  This means that this release is neither ABI-compatible nor
 | 
			
		||||
   fully API-compatible. But on the other hand it is aligned with the Libav
 | 
			
		||||
   11 release series, and will as a result probably end up being maintained for
 | 
			
		||||
   a long time.
 | 
			
		||||
 | 
			
		||||
   As usual, if you have any question on this release or any FFmpeg related
 | 
			
		||||
   topic, feel free to join us on the #ffmpeg IRC channel (on
 | 
			
		||||
   irc.freenode.net).
 | 
			
		||||
 | 
			
		||||
   ┌────────────────────────────┐
 | 
			
		||||
   │ 🔨  API Information         │
 | 
			
		||||
   └────────────────────────────┘
 | 
			
		||||
 | 
			
		||||
     FFmpeg 2.4 includes the following library versions:
 | 
			
		||||
 | 
			
		||||
       • libavutil      54.7.100
 | 
			
		||||
       • libavcodec     56.1.100
 | 
			
		||||
       • libavformat    56.4.101
 | 
			
		||||
       • libavdevice    56.0.100
 | 
			
		||||
       • libavfilter     5.1.100
 | 
			
		||||
       • libswscale      3.0.100
 | 
			
		||||
       • libswresample   1.1.100
 | 
			
		||||
       • libpostproc    53.0.100
 | 
			
		||||
 | 
			
		||||
     Important API changes since 2.3:
 | 
			
		||||
 | 
			
		||||
       • The new field mime_type was added to AVProbeData, which can
 | 
			
		||||
         cause crashes, if it is not initialized.
 | 
			
		||||
       • Some deprecated functions were removed.
 | 
			
		||||
       • The avfilter_graph_parse function was made compatible with Libav.
 | 
			
		||||
       • The Matroska demuxer now outputs verbatim ASS packets.
 | 
			
		||||
 | 
			
		||||
     Please refer to the doc/APIchanges file for more information.
 | 
			
		||||
 | 
			
		||||
 ┌────────────────────────────┐
 | 
			
		||||
 │ ★  List of New Features    │
 | 
			
		||||
 └────────────────────────────┘
 | 
			
		||||
 | 
			
		||||
   ┌────────────────────────────┐
 | 
			
		||||
   │ libavformat                │
 | 
			
		||||
   └────────────────────────────┘
 | 
			
		||||
 | 
			
		||||
    • Icecast protocol.
 | 
			
		||||
    • API for live metadata updates through event flags.
 | 
			
		||||
    • UTF-16 support in text subtitles formats.
 | 
			
		||||
    • The ASS muxer now reorders the Dialogue events properly.
 | 
			
		||||
    • support for H.261 RTP payload format (RFC 4587)
 | 
			
		||||
    • HEVC/H.265 RTP payload format (draft v6) depacketizer
 | 
			
		||||
 | 
			
		||||
   ┌────────────────────────────┐
 | 
			
		||||
   │ libavfilter                │
 | 
			
		||||
   └────────────────────────────┘
 | 
			
		||||
 | 
			
		||||
    • Ported lenscorrection filter from frei0r filter.
 | 
			
		||||
    • Large optimizations in dctdnoiz to make it usable.
 | 
			
		||||
    • Added codecview filter to visualize information exported by some codecs.
 | 
			
		||||
    • Added silenceremove filter.
 | 
			
		||||
 | 
			
		||||
   ┌────────────────────────────┐
 | 
			
		||||
   │ libavutil                  │
 | 
			
		||||
   └────────────────────────────┘
 | 
			
		||||
 | 
			
		||||
    • Added clip() function in eval.
 | 
			
		||||
 | 
			
		||||
 ┌────────────────────────────┐
 | 
			
		||||
 │ ⚠  Behaviour changes       │
 | 
			
		||||
 └────────────────────────────┘
 | 
			
		||||
 | 
			
		||||
  • dctdnoiz filter now uses a block size of 8x8 instead of 16x16 by default
 | 
			
		||||
  • -vismv option is deprecated in favor of the codecview filter
 | 
			
		||||
  • libmodplug is now detected through pkg-config
 | 
			
		||||
  • HTML documentation generation through texi2html is deprecated in
 | 
			
		||||
    favor of makeinfo/texi2any
 | 
			
		||||
  • ICY metadata are now requested by default with the HTTP protocol
 | 
			
		||||
							
								
								
									
										3
									
								
								arch.mak
									
									
									
									
									
								
							
							
						
						
									
										3
									
								
								arch.mak
									
									
									
									
									
								
							@@ -1,6 +1,5 @@
 | 
			
		||||
OBJS-$(HAVE_ARMV5TE) += $(ARMV5TE-OBJS) $(ARMV5TE-OBJS-yes)
 | 
			
		||||
OBJS-$(HAVE_ARMV6)   += $(ARMV6-OBJS)   $(ARMV6-OBJS-yes)
 | 
			
		||||
OBJS-$(HAVE_ARMV8)   += $(ARMV8-OBJS)   $(ARMV8-OBJS-yes)
 | 
			
		||||
OBJS-$(HAVE_VFP)     += $(VFP-OBJS)     $(VFP-OBJS-yes)
 | 
			
		||||
OBJS-$(HAVE_NEON)    += $(NEON-OBJS)    $(NEON-OBJS-yes)
 | 
			
		||||
 | 
			
		||||
@@ -11,5 +10,7 @@ OBJS-$(HAVE_MIPSDSPR2) += $(MIPSDSPR2-OBJS)  $(MIPSDSPR2-OBJS-yes)
 | 
			
		||||
 | 
			
		||||
OBJS-$(HAVE_ALTIVEC) += $(ALTIVEC-OBJS) $(ALTIVEC-OBJS-yes)
 | 
			
		||||
 | 
			
		||||
OBJS-$(HAVE_VIS)     += $(VIS-OBJS)     $(VIS-OBJS-yes)
 | 
			
		||||
 | 
			
		||||
OBJS-$(HAVE_MMX)     += $(MMX-OBJS)     $(MMX-OBJS-yes)
 | 
			
		||||
OBJS-$(HAVE_YASM)    += $(YASM-OBJS)    $(YASM-OBJS-yes)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										131
									
								
								cmdutils.c
									
									
									
									
									
								
							
							
						
						
									
										131
									
								
								cmdutils.c
									
									
									
									
									
								
							@@ -66,7 +66,6 @@ AVDictionary *swr_opts;
 | 
			
		||||
AVDictionary *format_opts, *codec_opts, *resample_opts;
 | 
			
		||||
 | 
			
		||||
static FILE *report_file;
 | 
			
		||||
static int report_file_level = AV_LOG_DEBUG;
 | 
			
		||||
int hide_banner = 0;
 | 
			
		||||
 | 
			
		||||
void init_opts(void)
 | 
			
		||||
@@ -105,10 +104,8 @@ static void log_callback_report(void *ptr, int level, const char *fmt, va_list v
 | 
			
		||||
    av_log_default_callback(ptr, level, fmt, vl);
 | 
			
		||||
    av_log_format_line(ptr, level, fmt, vl2, line, sizeof(line), &print_prefix);
 | 
			
		||||
    va_end(vl2);
 | 
			
		||||
    if (report_file_level >= level) {
 | 
			
		||||
        fputs(line, report_file);
 | 
			
		||||
        fflush(report_file);
 | 
			
		||||
    }
 | 
			
		||||
    fputs(line, report_file);
 | 
			
		||||
    fflush(report_file);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void (*program_exit)(int ret);
 | 
			
		||||
@@ -166,7 +163,7 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
 | 
			
		||||
    int first;
 | 
			
		||||
 | 
			
		||||
    first = 1;
 | 
			
		||||
    for (po = options; po->name; po++) {
 | 
			
		||||
    for (po = options; po->name != NULL; po++) {
 | 
			
		||||
        char buf[64];
 | 
			
		||||
 | 
			
		||||
        if (((po->flags & req_flags) != req_flags) ||
 | 
			
		||||
@@ -205,7 +202,7 @@ static const OptionDef *find_option(const OptionDef *po, const char *name)
 | 
			
		||||
    const char *p = strchr(name, ':');
 | 
			
		||||
    int len = p ? p - name : strlen(name);
 | 
			
		||||
 | 
			
		||||
    while (po->name) {
 | 
			
		||||
    while (po->name != NULL) {
 | 
			
		||||
        if (!strncmp(name, po->name, len) && strlen(po->name) == len)
 | 
			
		||||
            break;
 | 
			
		||||
        po++;
 | 
			
		||||
@@ -254,7 +251,7 @@ static void prepare_app_arguments(int *argc_ptr, char ***argv_ptr)
 | 
			
		||||
 | 
			
		||||
    win32_argv_utf8 = av_mallocz(sizeof(char *) * (win32_argc + 1) + buffsize);
 | 
			
		||||
    argstr_flat     = (char *)win32_argv_utf8 + sizeof(char *) * (win32_argc + 1);
 | 
			
		||||
    if (!win32_argv_utf8) {
 | 
			
		||||
    if (win32_argv_utf8 == NULL) {
 | 
			
		||||
        LocalFree(argv_w);
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
@@ -444,7 +441,7 @@ int locate_option(int argc, char **argv, const OptionDef *options,
 | 
			
		||||
             (po->name && !strcmp(optname, po->name)))
 | 
			
		||||
            return i;
 | 
			
		||||
 | 
			
		||||
        if (!po->name || po->flags & HAS_ARG)
 | 
			
		||||
        if (po->flags & HAS_ARG)
 | 
			
		||||
            i++;
 | 
			
		||||
    }
 | 
			
		||||
    return 0;
 | 
			
		||||
@@ -555,11 +552,6 @@ int opt_default(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
        }
 | 
			
		||||
        consumed = 1;
 | 
			
		||||
    }
 | 
			
		||||
#else
 | 
			
		||||
    if (!consumed && !strcmp(opt, "sws_flags")) {
 | 
			
		||||
        av_log(NULL, AV_LOG_WARNING, "Ignoring %s %s, due to disabled swscale\n", opt, arg);
 | 
			
		||||
        consumed = 1;
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
#if CONFIG_SWRESAMPLE
 | 
			
		||||
    swr_class = swr_get_class();
 | 
			
		||||
@@ -670,7 +662,7 @@ static void init_parse_context(OptionParseContext *octx,
 | 
			
		||||
    memset(octx, 0, sizeof(*octx));
 | 
			
		||||
 | 
			
		||||
    octx->nb_groups = nb_groups;
 | 
			
		||||
    octx->groups    = av_mallocz_array(octx->nb_groups, sizeof(*octx->groups));
 | 
			
		||||
    octx->groups    = av_mallocz(sizeof(*octx->groups) * octx->nb_groups);
 | 
			
		||||
    if (!octx->groups)
 | 
			
		||||
        exit_program(1);
 | 
			
		||||
 | 
			
		||||
@@ -842,17 +834,10 @@ int opt_loglevel(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
    };
 | 
			
		||||
    char *tail;
 | 
			
		||||
    int level;
 | 
			
		||||
    int flags;
 | 
			
		||||
    int i;
 | 
			
		||||
 | 
			
		||||
    flags = av_log_get_flags();
 | 
			
		||||
    tail = strstr(arg, "repeat");
 | 
			
		||||
    if (tail)
 | 
			
		||||
        flags &= ~AV_LOG_SKIP_REPEATED;
 | 
			
		||||
    else
 | 
			
		||||
        flags |= AV_LOG_SKIP_REPEATED;
 | 
			
		||||
 | 
			
		||||
    av_log_set_flags(flags);
 | 
			
		||||
    av_log_set_flags(tail ? 0 : AV_LOG_SKIP_REPEATED);
 | 
			
		||||
    if (tail == arg)
 | 
			
		||||
        arg += 6 + (arg[6]=='+');
 | 
			
		||||
    if(tail && !*arg)
 | 
			
		||||
@@ -934,13 +919,6 @@ static int init_report(const char *env)
 | 
			
		||||
            av_free(filename_template);
 | 
			
		||||
            filename_template = val;
 | 
			
		||||
            val = NULL;
 | 
			
		||||
        } else if (!strcmp(key, "level")) {
 | 
			
		||||
            char *tail;
 | 
			
		||||
            report_file_level = strtol(val, &tail, 10);
 | 
			
		||||
            if (*tail) {
 | 
			
		||||
                av_log(NULL, AV_LOG_FATAL, "Invalid report file level\n");
 | 
			
		||||
                exit_program(1);
 | 
			
		||||
            }
 | 
			
		||||
        } else {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Unknown key '%s' in FFREPORT\n", key);
 | 
			
		||||
        }
 | 
			
		||||
@@ -1120,7 +1098,7 @@ void show_banner(int argc, char **argv, const OptionDef *options)
 | 
			
		||||
int show_version(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
{
 | 
			
		||||
    av_log_set_callback(log_callback_help);
 | 
			
		||||
    print_program_info (SHOW_COPYRIGHT, AV_LOG_INFO);
 | 
			
		||||
    print_program_info (0           , AV_LOG_INFO);
 | 
			
		||||
    print_all_libs_info(SHOW_VERSION, AV_LOG_INFO);
 | 
			
		||||
 | 
			
		||||
    return 0;
 | 
			
		||||
@@ -1208,29 +1186,16 @@ int show_license(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int is_device(const AVClass *avclass)
 | 
			
		||||
{
 | 
			
		||||
    if (!avclass)
 | 
			
		||||
        return 0;
 | 
			
		||||
    return avclass->category == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT ||
 | 
			
		||||
           avclass->category == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT ||
 | 
			
		||||
           avclass->category == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT ||
 | 
			
		||||
           avclass->category == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT ||
 | 
			
		||||
           avclass->category == AV_CLASS_CATEGORY_DEVICE_OUTPUT ||
 | 
			
		||||
           avclass->category == AV_CLASS_CATEGORY_DEVICE_INPUT;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int show_formats_devices(void *optctx, const char *opt, const char *arg, int device_only)
 | 
			
		||||
int show_formats(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
{
 | 
			
		||||
    AVInputFormat *ifmt  = NULL;
 | 
			
		||||
    AVOutputFormat *ofmt = NULL;
 | 
			
		||||
    const char *last_name;
 | 
			
		||||
    int is_dev;
 | 
			
		||||
 | 
			
		||||
    printf("%s\n"
 | 
			
		||||
    printf("File formats:\n"
 | 
			
		||||
           " D. = Demuxing supported\n"
 | 
			
		||||
           " .E = Muxing supported\n"
 | 
			
		||||
           " --\n", device_only ? "Devices:" : "File formats:");
 | 
			
		||||
           " --\n");
 | 
			
		||||
    last_name = "000";
 | 
			
		||||
    for (;;) {
 | 
			
		||||
        int decode = 0;
 | 
			
		||||
@@ -1239,10 +1204,7 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
 | 
			
		||||
        const char *long_name = NULL;
 | 
			
		||||
 | 
			
		||||
        while ((ofmt = av_oformat_next(ofmt))) {
 | 
			
		||||
            is_dev = is_device(ofmt->priv_class);
 | 
			
		||||
            if (!is_dev && device_only)
 | 
			
		||||
                continue;
 | 
			
		||||
            if ((!name || strcmp(ofmt->name, name) < 0) &&
 | 
			
		||||
            if ((name == NULL || strcmp(ofmt->name, name) < 0) &&
 | 
			
		||||
                strcmp(ofmt->name, last_name) > 0) {
 | 
			
		||||
                name      = ofmt->name;
 | 
			
		||||
                long_name = ofmt->long_name;
 | 
			
		||||
@@ -1250,10 +1212,7 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        while ((ifmt = av_iformat_next(ifmt))) {
 | 
			
		||||
            is_dev = is_device(ifmt->priv_class);
 | 
			
		||||
            if (!is_dev && device_only)
 | 
			
		||||
                continue;
 | 
			
		||||
            if ((!name || strcmp(ifmt->name, name) < 0) &&
 | 
			
		||||
            if ((name == NULL || strcmp(ifmt->name, name) < 0) &&
 | 
			
		||||
                strcmp(ifmt->name, last_name) > 0) {
 | 
			
		||||
                name      = ifmt->name;
 | 
			
		||||
                long_name = ifmt->long_name;
 | 
			
		||||
@@ -1262,7 +1221,7 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
 | 
			
		||||
            if (name && strcmp(ifmt->name, name) == 0)
 | 
			
		||||
                decode = 1;
 | 
			
		||||
        }
 | 
			
		||||
        if (!name)
 | 
			
		||||
        if (name == NULL)
 | 
			
		||||
            break;
 | 
			
		||||
        last_name = name;
 | 
			
		||||
 | 
			
		||||
@@ -1275,16 +1234,6 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int show_formats(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
{
 | 
			
		||||
    return show_formats_devices(optctx, opt, arg, 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int show_devices(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
{
 | 
			
		||||
    return show_formats_devices(optctx, opt, arg, 1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define PRINT_CODEC_SUPPORTED(codec, field, type, list_name, term, get_name) \
 | 
			
		||||
    if (codec->field) {                                                      \
 | 
			
		||||
        const type *p = codec->field;                                        \
 | 
			
		||||
@@ -1429,9 +1378,6 @@ int show_codecs(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
        const AVCodecDescriptor *desc = codecs[i];
 | 
			
		||||
        const AVCodec *codec = NULL;
 | 
			
		||||
 | 
			
		||||
        if (strstr(desc->name, "_deprecated"))
 | 
			
		||||
            continue;
 | 
			
		||||
 | 
			
		||||
        printf(" ");
 | 
			
		||||
        printf(avcodec_find_decoder(desc->id) ? "D" : ".");
 | 
			
		||||
        printf(avcodec_find_encoder(desc->id) ? "E" : ".");
 | 
			
		||||
@@ -1639,19 +1585,19 @@ int show_layouts(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
    const char *name, *descr;
 | 
			
		||||
 | 
			
		||||
    printf("Individual channels:\n"
 | 
			
		||||
           "NAME           DESCRIPTION\n");
 | 
			
		||||
           "NAME        DESCRIPTION\n");
 | 
			
		||||
    for (i = 0; i < 63; i++) {
 | 
			
		||||
        name = av_get_channel_name((uint64_t)1 << i);
 | 
			
		||||
        if (!name)
 | 
			
		||||
            continue;
 | 
			
		||||
        descr = av_get_channel_description((uint64_t)1 << i);
 | 
			
		||||
        printf("%-14s %s\n", name, descr);
 | 
			
		||||
        printf("%-12s%s\n", name, descr);
 | 
			
		||||
    }
 | 
			
		||||
    printf("\nStandard channel layouts:\n"
 | 
			
		||||
           "NAME           DECOMPOSITION\n");
 | 
			
		||||
           "NAME        DECOMPOSITION\n");
 | 
			
		||||
    for (i = 0; !av_get_standard_channel_layout(i, &layout, &name); i++) {
 | 
			
		||||
        if (name) {
 | 
			
		||||
            printf("%-14s ", name);
 | 
			
		||||
            printf("%-12s", name);
 | 
			
		||||
            for (j = 1; j; j <<= 1)
 | 
			
		||||
                if ((layout & j))
 | 
			
		||||
                    printf("%s%s", (layout & (j - 1)) ? "+" : "", av_get_channel_name(j));
 | 
			
		||||
@@ -1857,7 +1803,7 @@ int read_yesno(void)
 | 
			
		||||
 | 
			
		||||
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
 | 
			
		||||
{
 | 
			
		||||
    int64_t ret;
 | 
			
		||||
    int ret;
 | 
			
		||||
    FILE *f = av_fopen_utf8(filename, "rb");
 | 
			
		||||
 | 
			
		||||
    if (!f) {
 | 
			
		||||
@@ -1865,31 +1811,19 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
 | 
			
		||||
               strerror(errno));
 | 
			
		||||
        return AVERROR(errno);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ret = fseek(f, 0, SEEK_END);
 | 
			
		||||
    if (ret == -1) {
 | 
			
		||||
        ret = AVERROR(errno);
 | 
			
		||||
        goto out;
 | 
			
		||||
    fseek(f, 0, SEEK_END);
 | 
			
		||||
    *size = ftell(f);
 | 
			
		||||
    fseek(f, 0, SEEK_SET);
 | 
			
		||||
    if (*size == (size_t)-1) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "IO error: %s\n", strerror(errno));
 | 
			
		||||
        fclose(f);
 | 
			
		||||
        return AVERROR(errno);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ret = ftell(f);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        ret = AVERROR(errno);
 | 
			
		||||
        goto out;
 | 
			
		||||
    }
 | 
			
		||||
    *size = ret;
 | 
			
		||||
 | 
			
		||||
    ret = fseek(f, 0, SEEK_SET);
 | 
			
		||||
    if (ret == -1) {
 | 
			
		||||
        ret = AVERROR(errno);
 | 
			
		||||
        goto out;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    *bufptr = av_malloc(*size + 1);
 | 
			
		||||
    if (!*bufptr) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Could not allocate file buffer\n");
 | 
			
		||||
        ret = AVERROR(ENOMEM);
 | 
			
		||||
        goto out;
 | 
			
		||||
        fclose(f);
 | 
			
		||||
        return AVERROR(ENOMEM);
 | 
			
		||||
    }
 | 
			
		||||
    ret = fread(*bufptr, 1, *size, f);
 | 
			
		||||
    if (ret < *size) {
 | 
			
		||||
@@ -1905,8 +1839,6 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
 | 
			
		||||
        (*bufptr)[(*size)++] = '\0';
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
out:
 | 
			
		||||
    av_log(NULL, AV_LOG_ERROR, "IO error: %s\n", av_err2str(ret));
 | 
			
		||||
    fclose(f);
 | 
			
		||||
    return ret;
 | 
			
		||||
}
 | 
			
		||||
@@ -2010,8 +1942,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
        if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) ||
 | 
			
		||||
            !codec ||
 | 
			
		||||
            (codec->priv_class &&
 | 
			
		||||
            (codec && codec->priv_class &&
 | 
			
		||||
             av_opt_find(&codec->priv_class, t->key, NULL, flags,
 | 
			
		||||
                         AV_OPT_SEARCH_FAKE_OBJ)))
 | 
			
		||||
            av_dict_set(&ret, t->key, t->value, 0);
 | 
			
		||||
@@ -2034,7 +1965,7 @@ AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
 | 
			
		||||
 | 
			
		||||
    if (!s->nb_streams)
 | 
			
		||||
        return NULL;
 | 
			
		||||
    opts = av_mallocz_array(s->nb_streams, sizeof(*opts));
 | 
			
		||||
    opts = av_mallocz(s->nb_streams * sizeof(*opts));
 | 
			
		||||
    if (!opts) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR,
 | 
			
		||||
               "Could not alloc memory for stream options.\n");
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										14
									
								
								cmdutils.h
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								cmdutils.h
									
									
									
									
									
								
							@@ -24,13 +24,12 @@
 | 
			
		||||
 | 
			
		||||
#include <stdint.h>
 | 
			
		||||
 | 
			
		||||
#include "config.h"
 | 
			
		||||
#include "libavcodec/avcodec.h"
 | 
			
		||||
#include "libavfilter/avfilter.h"
 | 
			
		||||
#include "libavformat/avformat.h"
 | 
			
		||||
#include "libswscale/swscale.h"
 | 
			
		||||
 | 
			
		||||
#ifdef _WIN32
 | 
			
		||||
#ifdef __MINGW32__
 | 
			
		||||
#undef main /* We don't want SDL to override our main() */
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
@@ -59,7 +58,7 @@ void register_exit(void (*cb)(int ret));
 | 
			
		||||
/**
 | 
			
		||||
 * Wraps exit with a program-specific cleanup routine.
 | 
			
		||||
 */
 | 
			
		||||
void exit_program(int ret) av_noreturn;
 | 
			
		||||
void exit_program(int ret);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Initialize the cmdutils option system, in particular
 | 
			
		||||
@@ -431,17 +430,10 @@ int show_license(void *optctx, const char *opt, const char *arg);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Print a listing containing all the formats supported by the
 | 
			
		||||
 * program (including devices).
 | 
			
		||||
 * This option processing function does not utilize the arguments.
 | 
			
		||||
 */
 | 
			
		||||
int show_formats(void *optctx, const char *opt, const char *arg);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Print a listing containing all the devices supported by the
 | 
			
		||||
 * program.
 | 
			
		||||
 * This option processing function does not utilize the arguments.
 | 
			
		||||
 */
 | 
			
		||||
int show_devices(void *optctx, const char *opt, const char *arg);
 | 
			
		||||
int show_formats(void *optctx, const char *opt, const char *arg);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Print a listing containing all the codecs supported by the
 | 
			
		||||
 
 | 
			
		||||
@@ -6,7 +6,6 @@
 | 
			
		||||
    { "version"    , OPT_EXIT, {.func_arg = show_version},      "show version" },
 | 
			
		||||
    { "buildconf"  , OPT_EXIT, {.func_arg = show_buildconf},    "show build configuration" },
 | 
			
		||||
    { "formats"    , OPT_EXIT, {.func_arg = show_formats  },    "show available formats" },
 | 
			
		||||
    { "devices"    , OPT_EXIT, {.func_arg = show_devices  },    "show available devices" },
 | 
			
		||||
    { "codecs"     , OPT_EXIT, {.func_arg = show_codecs   },    "show available codecs" },
 | 
			
		||||
    { "decoders"   , OPT_EXIT, {.func_arg = show_decoders },    "show available decoders" },
 | 
			
		||||
    { "encoders"   , OPT_EXIT, {.func_arg = show_encoders },    "show available encoders" },
 | 
			
		||||
 
 | 
			
		||||
@@ -181,12 +181,12 @@ static int64_t run_opencl_bench(AVOpenCLExternalEnv *ext_opencl_env)
 | 
			
		||||
    OCLCHECK(clSetKernelArg, kernel, arg++, sizeof(cl_int), &width);
 | 
			
		||||
    OCLCHECK(clSetKernelArg, kernel, arg++, sizeof(cl_int), &height);
 | 
			
		||||
 | 
			
		||||
    start = av_gettime_relative();
 | 
			
		||||
    start = av_gettime();
 | 
			
		||||
    for (i = 0; i < OPENCL_NB_ITER; i++)
 | 
			
		||||
        OCLCHECK(clEnqueueNDRangeKernel, ext_opencl_env->command_queue, kernel, 2, NULL,
 | 
			
		||||
                 global_work_size_2d, local_work_size_2d, 0, NULL, NULL);
 | 
			
		||||
    clFinish(ext_opencl_env->command_queue);
 | 
			
		||||
    ret = (av_gettime_relative() - start)/OPENCL_NB_ITER;
 | 
			
		||||
    ret = (av_gettime() - start)/OPENCL_NB_ITER;
 | 
			
		||||
end:
 | 
			
		||||
    if (kernel)
 | 
			
		||||
        clReleaseKernel(kernel);
 | 
			
		||||
@@ -224,7 +224,7 @@ int opt_opencl_bench(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "No OpenCL device detected!\n");
 | 
			
		||||
        return AVERROR(EINVAL);
 | 
			
		||||
    }
 | 
			
		||||
    if (!(devices = av_malloc_array(nb_devices, sizeof(OpenCLDeviceBenchmark)))) {
 | 
			
		||||
    if (!(devices = av_malloc(sizeof(OpenCLDeviceBenchmark) * nb_devices))) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Could not allocate buffer\n");
 | 
			
		||||
        return AVERROR(ENOMEM);
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
@@ -51,9 +51,6 @@ COMPILE_HOSTC = $(call COMPILE,HOSTCC)
 | 
			
		||||
%.o: %.cpp
 | 
			
		||||
	$(COMPILE_CXX)
 | 
			
		||||
 | 
			
		||||
%.o: %.m
 | 
			
		||||
	$(COMPILE_C)
 | 
			
		||||
 | 
			
		||||
%.s: %.c
 | 
			
		||||
	$(CC) $(CPPFLAGS) $(CFLAGS) -S -o $@ $<
 | 
			
		||||
 | 
			
		||||
@@ -93,7 +90,7 @@ include $(SRC_PATH)/arch.mak
 | 
			
		||||
 | 
			
		||||
OBJS      += $(OBJS-yes)
 | 
			
		||||
SLIBOBJS  += $(SLIBOBJS-yes)
 | 
			
		||||
FFLIBS    := $($(NAME)_FFLIBS) $(FFLIBS-yes) $(FFLIBS)
 | 
			
		||||
FFLIBS    := $(FFLIBS-yes) $(FFLIBS)
 | 
			
		||||
TESTPROGS += $(TESTPROGS-yes)
 | 
			
		||||
 | 
			
		||||
LDLIBS       = $(FFLIBS:%=%$(BUILDSUF))
 | 
			
		||||
 
 | 
			
		||||
@@ -805,7 +805,7 @@ struct AVS_Library {
 | 
			
		||||
 | 
			
		||||
AVSC_INLINE AVS_Library * avs_load_library() {
 | 
			
		||||
  AVS_Library *library = (AVS_Library *)malloc(sizeof(AVS_Library));
 | 
			
		||||
  if (!library)
 | 
			
		||||
  if (library == NULL)
 | 
			
		||||
    return NULL;
 | 
			
		||||
  library->handle = LoadLibrary("avisynth");
 | 
			
		||||
  if (library->handle == NULL)
 | 
			
		||||
@@ -870,7 +870,7 @@ fail:
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
AVSC_INLINE void avs_free_library(AVS_Library *library) {
 | 
			
		||||
  if (!library)
 | 
			
		||||
  if (library == NULL)
 | 
			
		||||
    return;
 | 
			
		||||
  FreeLibrary(library->handle);
 | 
			
		||||
  free(library);
 | 
			
		||||
 
 | 
			
		||||
@@ -1,35 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
 * Work around broken floating point limits on some systems.
 | 
			
		||||
 *
 | 
			
		||||
 * This file is part of FFmpeg.
 | 
			
		||||
 *
 | 
			
		||||
 * FFmpeg is free software; you can redistribute it and/or
 | 
			
		||||
 * modify it under the terms of the GNU Lesser General Public
 | 
			
		||||
 * License as published by the Free Software Foundation; either
 | 
			
		||||
 * version 2.1 of the License, or (at your option) any later version.
 | 
			
		||||
 *
 | 
			
		||||
 * FFmpeg is distributed in the hope that it will be useful,
 | 
			
		||||
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 | 
			
		||||
 * Lesser General Public License for more details.
 | 
			
		||||
 *
 | 
			
		||||
 * You should have received a copy of the GNU Lesser General Public
 | 
			
		||||
 * License along with FFmpeg; if not, write to the Free Software
 | 
			
		||||
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include_next <float.h>
 | 
			
		||||
 | 
			
		||||
#ifdef FLT_MAX
 | 
			
		||||
#undef  FLT_MAX
 | 
			
		||||
#define FLT_MAX 3.40282346638528859812e+38F
 | 
			
		||||
 | 
			
		||||
#undef  FLT_MIN
 | 
			
		||||
#define FLT_MIN 1.17549435082228750797e-38F
 | 
			
		||||
 | 
			
		||||
#undef  DBL_MAX
 | 
			
		||||
#define DBL_MAX ((double)1.79769313486231570815e+308L)
 | 
			
		||||
 | 
			
		||||
#undef  DBL_MIN
 | 
			
		||||
#define DBL_MIN ((double)2.22507385850720138309e-308L)
 | 
			
		||||
#endif
 | 
			
		||||
@@ -1,22 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
 * Work around broken floating point limits on some systems.
 | 
			
		||||
 *
 | 
			
		||||
 * This file is part of FFmpeg.
 | 
			
		||||
 *
 | 
			
		||||
 * FFmpeg is free software; you can redistribute it and/or
 | 
			
		||||
 * modify it under the terms of the GNU Lesser General Public
 | 
			
		||||
 * License as published by the Free Software Foundation; either
 | 
			
		||||
 * version 2.1 of the License, or (at your option) any later version.
 | 
			
		||||
 *
 | 
			
		||||
 * FFmpeg is distributed in the hope that it will be useful,
 | 
			
		||||
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 | 
			
		||||
 * Lesser General Public License for more details.
 | 
			
		||||
 *
 | 
			
		||||
 * You should have received a copy of the GNU Lesser General Public
 | 
			
		||||
 * License along with FFmpeg; if not, write to the Free Software
 | 
			
		||||
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include_next <limits.h>
 | 
			
		||||
#include <float.h>
 | 
			
		||||
@@ -54,7 +54,7 @@ static int getopt(int argc, char *argv[], char *opts)
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    optopt = c = argv[optind][sp];
 | 
			
		||||
    if (c == ':' || !(cp = strchr(opts, c))) {
 | 
			
		||||
    if (c == ':' || (cp = strchr(opts, c)) == NULL) {
 | 
			
		||||
        fprintf(stderr, ": illegal option -- %c\n", c);
 | 
			
		||||
        if (argv[optind][++sp] == '\0') {
 | 
			
		||||
            optind++;
 | 
			
		||||
 
 | 
			
		||||
@@ -39,7 +39,6 @@
 | 
			
		||||
#include <windows.h>
 | 
			
		||||
#include <process.h>
 | 
			
		||||
 | 
			
		||||
#include "libavutil/attributes.h"
 | 
			
		||||
#include "libavutil/common.h"
 | 
			
		||||
#include "libavutil/internal.h"
 | 
			
		||||
#include "libavutil/mem.h"
 | 
			
		||||
@@ -74,29 +73,17 @@ static BOOL (WINAPI *cond_wait)(pthread_cond_t *cond, pthread_mutex_t *mutex,
 | 
			
		||||
#define cond_broadcast WakeAllConditionVariable
 | 
			
		||||
#define cond_signal    WakeConditionVariable
 | 
			
		||||
#define cond_wait      SleepConditionVariableCS
 | 
			
		||||
 | 
			
		||||
#define CreateEvent(a, reset, init, name)                   \
 | 
			
		||||
    CreateEventEx(a, name,                                  \
 | 
			
		||||
                  (reset ? CREATE_EVENT_MANUAL_RESET : 0) | \
 | 
			
		||||
                  (init ? CREATE_EVENT_INITIAL_SET : 0),    \
 | 
			
		||||
                  EVENT_ALL_ACCESS)
 | 
			
		||||
// CreateSemaphoreExA seems to be desktop-only, but as long as we don't
 | 
			
		||||
// use named semaphores, it doesn't matter if we use the W version.
 | 
			
		||||
#define CreateSemaphore(a, b, c, d) \
 | 
			
		||||
    CreateSemaphoreExW(a, b, c, d, 0, SEMAPHORE_ALL_ACCESS)
 | 
			
		||||
#define InitializeCriticalSection(x) InitializeCriticalSectionEx(x, 0, 0)
 | 
			
		||||
#define WaitForSingleObject(a, b) WaitForSingleObjectEx(a, b, FALSE)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static av_unused unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
 | 
			
		||||
static unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
 | 
			
		||||
{
 | 
			
		||||
    pthread_t *h = arg;
 | 
			
		||||
    h->ret = h->func(h->arg);
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static av_unused int pthread_create(pthread_t *thread, const void *unused_attr,
 | 
			
		||||
                                    void *(*start_routine)(void*), void *arg)
 | 
			
		||||
static int pthread_create(pthread_t *thread, const void *unused_attr,
 | 
			
		||||
                          void *(*start_routine)(void*), void *arg)
 | 
			
		||||
{
 | 
			
		||||
    thread->func   = start_routine;
 | 
			
		||||
    thread->arg    = arg;
 | 
			
		||||
@@ -105,7 +92,7 @@ static av_unused int pthread_create(pthread_t *thread, const void *unused_attr,
 | 
			
		||||
    return !thread->handle;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static av_unused void pthread_join(pthread_t thread, void **value_ptr)
 | 
			
		||||
static void pthread_join(pthread_t thread, void **value_ptr)
 | 
			
		||||
{
 | 
			
		||||
    DWORD ret = WaitForSingleObject(thread.handle, INFINITE);
 | 
			
		||||
    if (ret != WAIT_OBJECT_0)
 | 
			
		||||
@@ -147,32 +134,31 @@ typedef struct  win32_cond_t {
 | 
			
		||||
    volatile int is_broadcast;
 | 
			
		||||
} win32_cond_t;
 | 
			
		||||
 | 
			
		||||
static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
 | 
			
		||||
static void pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
 | 
			
		||||
{
 | 
			
		||||
    win32_cond_t *win32_cond = NULL;
 | 
			
		||||
    if (cond_init) {
 | 
			
		||||
        cond_init(cond);
 | 
			
		||||
        return 0;
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* non native condition variables */
 | 
			
		||||
    win32_cond = av_mallocz(sizeof(win32_cond_t));
 | 
			
		||||
    if (!win32_cond)
 | 
			
		||||
        return ENOMEM;
 | 
			
		||||
        return;
 | 
			
		||||
    cond->ptr = win32_cond;
 | 
			
		||||
    win32_cond->semaphore = CreateSemaphore(NULL, 0, 0x7fffffff, NULL);
 | 
			
		||||
    if (!win32_cond->semaphore)
 | 
			
		||||
        return ENOMEM;
 | 
			
		||||
        return;
 | 
			
		||||
    win32_cond->waiters_done = CreateEvent(NULL, TRUE, FALSE, NULL);
 | 
			
		||||
    if (!win32_cond->waiters_done)
 | 
			
		||||
        return ENOMEM;
 | 
			
		||||
        return;
 | 
			
		||||
 | 
			
		||||
    pthread_mutex_init(&win32_cond->mtx_waiter_count, NULL);
 | 
			
		||||
    pthread_mutex_init(&win32_cond->mtx_broadcast, NULL);
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static av_unused void pthread_cond_destroy(pthread_cond_t *cond)
 | 
			
		||||
static void pthread_cond_destroy(pthread_cond_t *cond)
 | 
			
		||||
{
 | 
			
		||||
    win32_cond_t *win32_cond = cond->ptr;
 | 
			
		||||
    /* native condition variables do not destroy */
 | 
			
		||||
@@ -188,7 +174,7 @@ static av_unused void pthread_cond_destroy(pthread_cond_t *cond)
 | 
			
		||||
    cond->ptr = NULL;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static av_unused void pthread_cond_broadcast(pthread_cond_t *cond)
 | 
			
		||||
static void pthread_cond_broadcast(pthread_cond_t *cond)
 | 
			
		||||
{
 | 
			
		||||
    win32_cond_t *win32_cond = cond->ptr;
 | 
			
		||||
    int have_waiter;
 | 
			
		||||
@@ -219,7 +205,7 @@ static av_unused void pthread_cond_broadcast(pthread_cond_t *cond)
 | 
			
		||||
    pthread_mutex_unlock(&win32_cond->mtx_broadcast);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
 | 
			
		||||
static int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
 | 
			
		||||
{
 | 
			
		||||
    win32_cond_t *win32_cond = cond->ptr;
 | 
			
		||||
    int last_waiter;
 | 
			
		||||
@@ -251,7 +237,7 @@ static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mu
 | 
			
		||||
    return pthread_mutex_lock(mutex);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static av_unused void pthread_cond_signal(pthread_cond_t *cond)
 | 
			
		||||
static void pthread_cond_signal(pthread_cond_t *cond)
 | 
			
		||||
{
 | 
			
		||||
    win32_cond_t *win32_cond = cond->ptr;
 | 
			
		||||
    int have_waiter;
 | 
			
		||||
@@ -276,7 +262,7 @@ static av_unused void pthread_cond_signal(pthread_cond_t *cond)
 | 
			
		||||
    pthread_mutex_unlock(&win32_cond->mtx_broadcast);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static av_unused void w32thread_init(void)
 | 
			
		||||
static void w32thread_init(void)
 | 
			
		||||
{
 | 
			
		||||
#if _WIN32_WINNT < 0x0600
 | 
			
		||||
    HANDLE kernel_dll = GetModuleHandle(TEXT("kernel32.dll"));
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										416
									
								
								doc/APIchanges
									
									
									
									
									
								
							
							
						
						
									
										416
									
								
								doc/APIchanges
									
									
									
									
									
								
							@@ -2,347 +2,46 @@ Never assume the API of libav* to be stable unless at least 1 month has passed
 | 
			
		||||
since the last major version increase or the API was added.
 | 
			
		||||
 | 
			
		||||
The last version increases were:
 | 
			
		||||
libavcodec:    2014-08-09
 | 
			
		||||
libavdevice:   2014-08-09
 | 
			
		||||
libavfilter:   2014-08-09
 | 
			
		||||
libavformat:   2014-08-09
 | 
			
		||||
libavresample: 2014-08-09
 | 
			
		||||
libpostproc:   2014-08-09
 | 
			
		||||
libswresample: 2014-08-09
 | 
			
		||||
libswscale:    2014-08-09
 | 
			
		||||
libavutil:     2014-08-09
 | 
			
		||||
libavcodec:    2013-03-xx
 | 
			
		||||
libavdevice:   2013-03-xx
 | 
			
		||||
libavfilter:   2013-12-xx
 | 
			
		||||
libavformat:   2013-03-xx
 | 
			
		||||
libavresample: 2012-10-05
 | 
			
		||||
libpostproc:   2011-04-18
 | 
			
		||||
libswresample: 2011-09-19
 | 
			
		||||
libswscale:    2011-06-20
 | 
			
		||||
libavutil:     2012-10-22
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
API changes, most recent first:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
-------- 8< --------- FFmpeg 2.4 was cut here -------- 8< ---------
 | 
			
		||||
 | 
			
		||||
2014-08-28 - f30a815 / 9301486 - lavc 56.1.100 / 56.1.0 - avcodec.h
 | 
			
		||||
  Add AV_PKT_DATA_STEREO3D to export container-level stereo3d information.
 | 
			
		||||
 | 
			
		||||
2014-08-25 - 215db29 / b263f8f - lavf 56.3.100 / 56.3.0 - avformat.h
 | 
			
		||||
  Add AVFormatContext.max_ts_probe.
 | 
			
		||||
 | 
			
		||||
2014-08-23 - 8fc9bd0 - lavu 54.7.100 - dict.h
 | 
			
		||||
  AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL arguments are now
 | 
			
		||||
  freed even on error. This is consistent with the behaviour all users
 | 
			
		||||
  of it we could find expect.
 | 
			
		||||
 | 
			
		||||
2014-08-21 - 980a5b0 - lavu 54.6.100 - frame.h motion_vector.h
 | 
			
		||||
  Add AV_FRAME_DATA_MOTION_VECTORS side data and AVMotionVector structure
 | 
			
		||||
 | 
			
		||||
2014-08-16 - b7d5e01 - lswr 1.1.100 - swresample.h
 | 
			
		||||
  Add AVFrame based API
 | 
			
		||||
 | 
			
		||||
2014-08-16 - c2829dc - lavu 54.4.100 - dict.h
 | 
			
		||||
  Add av_dict_set_int helper function.
 | 
			
		||||
 | 
			
		||||
2014-08-13 - c8571c6 / 8ddc326 - lavu 54.3.100 / 54.3.0 - mem.h
 | 
			
		||||
  Add av_strndup().
 | 
			
		||||
 | 
			
		||||
2014-08-13 - 2ba4577 / a8c104a - lavu 54.2.100 / 54.2.0 - opt.h
 | 
			
		||||
  Add av_opt_get_dict_val/set_dict_val with AV_OPT_TYPE_DICT to support
 | 
			
		||||
  dictionary types being set as options.
 | 
			
		||||
 | 
			
		||||
2014-08-13 - afbd4b8 - lavf 56.01.0 - avformat.h
 | 
			
		||||
  Add AVFormatContext.event_flags and AVStream.event_flags for signaling to
 | 
			
		||||
  the user when events happen in the file/stream.
 | 
			
		||||
 | 
			
		||||
2014-08-10 - 78eaaa8 / fb1ddcd - lavr 2.1.0 - avresample.h
 | 
			
		||||
  Add avresample_convert_frame() and avresample_config().
 | 
			
		||||
 | 
			
		||||
2014-08-10 - 78eaaa8 / fb1ddcd - lavu 54.1.100 / 54.1.0 - error.h
 | 
			
		||||
  Add AVERROR_INPUT_CHANGED and AVERROR_OUTPUT_CHANGED.
 | 
			
		||||
 | 
			
		||||
2014-08-08 - 3841f2a / d35b94f - lavc 55.73.102 / 55.57.4 - avcodec.h
 | 
			
		||||
  Deprecate FF_IDCT_XVIDMMX define and xvidmmx idct option.
 | 
			
		||||
  Replaced by FF_IDCT_XVID and xvid respectively.
 | 
			
		||||
 | 
			
		||||
2014-08-08 - 5c3c671 - lavf 55.53.100 - avio.h
 | 
			
		||||
  Add avio_feof() and deprecate url_feof().
 | 
			
		||||
 | 
			
		||||
2014-08-07 - bb78903 - lsws 2.1.3 - swscale.h
 | 
			
		||||
  sws_getContext is not going to be removed in the future.
 | 
			
		||||
 | 
			
		||||
2014-08-07 - a561662 / ad1ee5f - lavc 55.73.101 / 55.57.3 - avcodec.h
 | 
			
		||||
  reordered_opaque is not going to be removed in the future.
 | 
			
		||||
 | 
			
		||||
2014-08-02 - 28a2107 - lavu 52.98.100 - pixelutils.h
 | 
			
		||||
  Add pixelutils API with SAD functions
 | 
			
		||||
 | 
			
		||||
2014-08-04 - 6017c98 / e9abafc - lavu 52.97.100 / 53.22.0 - pixfmt.h
 | 
			
		||||
  Add AV_PIX_FMT_YA16 pixel format for 16 bit packed gray with alpha.
 | 
			
		||||
 | 
			
		||||
2014-08-04 - 4c8bc6f / e96c3b8 - lavu 52.96.101 / 53.21.1 - avstring.h
 | 
			
		||||
  Rename AV_PIX_FMT_Y400A to AV_PIX_FMT_YA8 to better identify the format.
 | 
			
		||||
  An alias pixel format and color space name are provided for compatibility.
 | 
			
		||||
 | 
			
		||||
2014-08-04 - 073c074 / d2962e9 - lavu 52.96.100 / 53.21.0 - pixdesc.h
 | 
			
		||||
  Support name aliases for pixel formats.
 | 
			
		||||
 | 
			
		||||
2014-08-03 - 71d008e / 1ef9e83 - lavc 55.72.101 / 55.57.2 - avcodec.h
 | 
			
		||||
2014-08-03 - 71d008e / 1ef9e83 - lavu 52.95.100 / 53.20.0 - frame.h
 | 
			
		||||
  Deprecate AVCodecContext.dtg_active_format and use side-data instead.
 | 
			
		||||
 | 
			
		||||
2014-08-03 - e680c73 - lavc 55.72.100 - avcodec.h
 | 
			
		||||
  Add get_pixels() to AVDCT
 | 
			
		||||
 | 
			
		||||
2014-08-03 - 9400603 / 9f17685 - lavc 55.71.101 / 55.57.1 - avcodec.h
 | 
			
		||||
  Deprecate unused FF_IDCT_IPP define and ipp avcodec option.
 | 
			
		||||
  Deprecate unused FF_DEBUG_PTS define and pts avcodec option.
 | 
			
		||||
  Deprecate unused FF_CODER_TYPE_DEFLATE define and deflate avcodec option.
 | 
			
		||||
  Deprecate unused FF_DCT_INT define and int avcodec option.
 | 
			
		||||
  Deprecate unused avcodec option scenechange_factor.
 | 
			
		||||
 | 
			
		||||
2014-07-30 - ba3e331 - lavu 52.94.100 - frame.h
 | 
			
		||||
  Add av_frame_side_data_name()
 | 
			
		||||
 | 
			
		||||
2014-07-29 - 80a3a66 / 3a19405 - lavf 56.01.100 / 56.01.0 - avformat.h
 | 
			
		||||
  Add mime_type field to AVProbeData, which now MUST be initialized in
 | 
			
		||||
  order to avoid uninitialized reads of the mime_type pointer, likely
 | 
			
		||||
  leading to crashes.
 | 
			
		||||
  Typically, this means you will do 'AVProbeData pd = { 0 };' instead of
 | 
			
		||||
  'AVProbeData pd;'.
 | 
			
		||||
 | 
			
		||||
2014-07-29 - 31e0b5d / 69e7336 - lavu 52.92.100 / 53.19.0 - avstring.h
 | 
			
		||||
  Make name matching function from lavf public as av_match_name().
 | 
			
		||||
 | 
			
		||||
2014-07-28 - 2e5c8b0 / c5fca01 - lavc 55.71.100 / 55.57.0 - avcodec.h
 | 
			
		||||
  Add AV_CODEC_PROP_REORDER to mark codecs supporting frame reordering.
 | 
			
		||||
 | 
			
		||||
2014-07-27 - ff9a154 - lavf 55.50.100 - avformat.h
 | 
			
		||||
  New field int64_t probesize2 instead of deprecated
 | 
			
		||||
  field int probesize.
 | 
			
		||||
 | 
			
		||||
2014-07-27 - 932ff70 - lavc 55.70.100 - avdct.h
 | 
			
		||||
  Add AVDCT / avcodec_dct_alloc() / avcodec_dct_init().
 | 
			
		||||
 | 
			
		||||
2014-07-23 - 8a4c086 - lavf 55.49.100 - avio.h
 | 
			
		||||
  Add avio_read_to_bprint()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
-------- 8< --------- FFmpeg 2.3 was cut here -------- 8< ---------
 | 
			
		||||
 | 
			
		||||
2014-07-14 - 62227a7 - lavf 55.47.100 - avformat.h
 | 
			
		||||
  Add av_stream_get_parser()
 | 
			
		||||
 | 
			
		||||
2014-07-09 - c67690f / a54f03b - lavu 52.92.100 / 53.18.0 - display.h
 | 
			
		||||
  Add av_display_matrix_flip() to flip the transformation matrix.
 | 
			
		||||
 | 
			
		||||
2014-07-09 - 1b58f13 / f6ee61f - lavc 55.69.100 / 55.56.0 - dv_profile.h
 | 
			
		||||
  Add a public API for DV profile handling.
 | 
			
		||||
 | 
			
		||||
2014-06-20 - 0dceefc / 9e500ef - lavu 52.90.100 / 53.17.0 - imgutils.h
 | 
			
		||||
  Add av_image_check_sar().
 | 
			
		||||
 | 
			
		||||
2014-06-20 - 4a99333 / 874390e - lavc 55.68.100 / 55.55.0 - avcodec.h
 | 
			
		||||
  Add av_packet_rescale_ts() to simplify timestamp conversion.
 | 
			
		||||
 | 
			
		||||
2014-06-18 - ac293b6 / 194be1f - lavf 55.44.100 / 55.20.0 - avformat.h
 | 
			
		||||
  The proper way for providing a hint about the desired timebase to the muxers
 | 
			
		||||
  is now setting AVStream.time_base, instead of AVStream.codec.time_base as was
 | 
			
		||||
  done previously. The old method is now deprecated.
 | 
			
		||||
 | 
			
		||||
2014-06-11 - 67d29da - lavc 55.66.101 - avcodec.h
 | 
			
		||||
  Increase FF_INPUT_BUFFER_PADDING_SIZE to 32 due to some corner cases needing
 | 
			
		||||
  it
 | 
			
		||||
 | 
			
		||||
2014-06-10 - 5482780 - lavf 55.43.100 - avformat.h
 | 
			
		||||
  New field int64_t max_analyze_duration2 instead of deprecated
 | 
			
		||||
  int max_analyze_duration.
 | 
			
		||||
 | 
			
		||||
2014-05-30 - 00759d7 - lavu 52.89.100 - opt.h
 | 
			
		||||
  Add av_opt_copy()
 | 
			
		||||
 | 
			
		||||
2014-06-01 - 03bb99a / 0957b27 - lavc 55.66.100 / 55.54.0 - avcodec.h
 | 
			
		||||
  Add AVCodecContext.side_data_only_packets to allow encoders to output packets
 | 
			
		||||
  with only side data. This option may become mandatory in the future, so all
 | 
			
		||||
  users are recommended to update their code and enable this option.
 | 
			
		||||
 | 
			
		||||
2014-06-01 - 6e8e9f1 / 8c02adc - lavu 52.88.100 / 53.16.0 - frame.h, pixfmt.h
 | 
			
		||||
  Move all color-related enums (AVColorPrimaries, AVColorSpace, AVColorRange,
 | 
			
		||||
  AVColorTransferCharacteristic, and AVChromaLocation) inside lavu.
 | 
			
		||||
  And add AVFrame fields for them.
 | 
			
		||||
 | 
			
		||||
2014-05-29 - bdb2e80 / b2d4565 - lavr 1.3.0 - avresample.h
 | 
			
		||||
  Add avresample_max_output_samples
 | 
			
		||||
 | 
			
		||||
2014-05-28 - d858ee7 / 6d21259 - lavf 55.42.100 / 55.19.0 - avformat.h
 | 
			
		||||
  Add strict_std_compliance and related AVOptions to support experimental
 | 
			
		||||
  muxing.
 | 
			
		||||
 | 
			
		||||
2014-05-26 - 55cc60c - lavu 52.87.100 - threadmessage.h
 | 
			
		||||
  Add thread message queue API.
 | 
			
		||||
 | 
			
		||||
2014-05-26 - c37d179 - lavf 55.41.100 - avformat.h
 | 
			
		||||
  Add format_probesize to AVFormatContext.
 | 
			
		||||
 | 
			
		||||
2014-05-20 - 7d25af1 / c23c96b - lavf 55.39.100 / 55.18.0 - avformat.h
 | 
			
		||||
  Add av_stream_get_side_data() to access stream-level side data
 | 
			
		||||
  in the same way as av_packet_get_side_data().
 | 
			
		||||
 | 
			
		||||
2014-05-20 - 7336e39 - lavu 52.86.100 - fifo.h
 | 
			
		||||
  Add av_fifo_alloc_array() function.
 | 
			
		||||
 | 
			
		||||
2014-05-19 - ef1d4ee / bddd8cb - lavu 52.85.100 / 53.15.0 - frame.h, display.h
 | 
			
		||||
  Add AV_FRAME_DATA_DISPLAYMATRIX for exporting frame-level
 | 
			
		||||
  spatial rendering on video frames for proper display.
 | 
			
		||||
 | 
			
		||||
2014-05-19 - ef1d4ee / bddd8cb - lavc 55.64.100 / 55.53.0 - avcodec.h
 | 
			
		||||
  Add AV_PKT_DATA_DISPLAYMATRIX for exporting packet-level
 | 
			
		||||
  spatial rendering on video frames for proper display.
 | 
			
		||||
 | 
			
		||||
2014-05-19 - 999a99c / a312f71 - lavf 55.38.101 / 55.17.1 - avformat.h
 | 
			
		||||
  Deprecate AVStream.pts and the AVFrac struct, which was its only use case.
 | 
			
		||||
  See use av_stream_get_end_pts()
 | 
			
		||||
 | 
			
		||||
2014-05-18 - 68c0518 / fd05602 - lavc 55.63.100 / 55.52.0 - avcodec.h
 | 
			
		||||
  Add avcodec_free_context(). From now on it should be used for freeing
 | 
			
		||||
  AVCodecContext.
 | 
			
		||||
 | 
			
		||||
2014-05-17 - 0eec06e - lavu 52.84.100 - time.h
 | 
			
		||||
  Add av_gettime_relative() av_gettime_relative_is_monotonic()
 | 
			
		||||
 | 
			
		||||
2014-05-15 - eacf7d6 / 0c1959b - lavf 55.38.100 / 55.17.0 - avformat.h
 | 
			
		||||
  Add AVFMT_FLAG_BITEXACT flag. Muxers now use it instead of checking
 | 
			
		||||
  CODEC_FLAG_BITEXACT on the first stream.
 | 
			
		||||
 | 
			
		||||
2014-05-15 - 96cb4c8 - lswr 0.19.100 - swresample.h
 | 
			
		||||
  Add swr_close()
 | 
			
		||||
 | 
			
		||||
2014-05-11 - 14aef38 / 66e6c8a - lavu 52.83.100 / 53.14.0 - pixfmt.h
 | 
			
		||||
  Add AV_PIX_FMT_VDA for new-style VDA acceleration.
 | 
			
		||||
 | 
			
		||||
2014-05-07 - 351f611 - lavu 52.82.100 - fifo.h
 | 
			
		||||
  Add av_fifo_freep() function.
 | 
			
		||||
 | 
			
		||||
2014-05-02 - ba52fb11 - lavu 52.81.100 - opt.h
 | 
			
		||||
  Add av_opt_set_dict2() function.
 | 
			
		||||
 | 
			
		||||
2014-05-01 - e77b985 / a2941c8 - lavc 55.60.103 / 55.50.3 - avcodec.h
 | 
			
		||||
  Deprecate CODEC_FLAG_MV0. It is replaced by the flag "mv0" in the
 | 
			
		||||
  "mpv_flags" private option of the mpegvideo encoders.
 | 
			
		||||
 | 
			
		||||
2014-05-01 - e40ae8c / 6484149 - lavc 55.60.102 / 55.50.2 - avcodec.h
 | 
			
		||||
  Deprecate CODEC_FLAG_GMC. It is replaced by the "gmc" private option of the
 | 
			
		||||
  libxvid encoder.
 | 
			
		||||
 | 
			
		||||
2014-05-01 - 1851643 / b2c3171 - lavc 55.60.101 / 55.50.1 - avcodec.h
 | 
			
		||||
  Deprecate CODEC_FLAG_NORMALIZE_AQP. It is replaced by the flag "naq" in the
 | 
			
		||||
  "mpv_flags" private option of the mpegvideo encoders.
 | 
			
		||||
 | 
			
		||||
2014-05-01 - cac07d0 / 5fcceda - avcodec.h
 | 
			
		||||
  Deprecate CODEC_FLAG_INPUT_PRESERVED. Its functionality is replaced by passing
 | 
			
		||||
  reference-counted frames to encoders.
 | 
			
		||||
 | 
			
		||||
2014-04-30 - 617e866 - lavu 52.81.100 - pixdesc.h
 | 
			
		||||
  Add av_find_best_pix_fmt_of_2(), av_get_pix_fmt_loss()
 | 
			
		||||
  Deprecate avcodec_get_pix_fmt_loss(), avcodec_find_best_pix_fmt_of_2()
 | 
			
		||||
 | 
			
		||||
2014-04-29 - 1bf6396 - lavc 55.60.100 - avcodec.h
 | 
			
		||||
  Add AVCodecDescriptor.mime_types field.
 | 
			
		||||
 | 
			
		||||
2014-04-29 - b804eb4 - lavu 52.80.100 - hash.h
 | 
			
		||||
  Add av_hash_final_bin(), av_hash_final_hex() and av_hash_final_b64().
 | 
			
		||||
 | 
			
		||||
2014-03-07 - 8b2a130 - lavc 55.50.0 / 55.53.100 - dxva2.h
 | 
			
		||||
  Add FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO for old Intel GPUs.
 | 
			
		||||
 | 
			
		||||
2014-04-22 - 502512e /dac7e8a - lavu 53.13.0 / 52.78.100 - avutil.h
 | 
			
		||||
  Add av_get_time_base_q().
 | 
			
		||||
 | 
			
		||||
2014-04-17 - a8d01a7 / 0983d48 - lavu 53.12.0 / 52.77.100 - crc.h
 | 
			
		||||
  Add AV_CRC_16_ANSI_LE crc variant.
 | 
			
		||||
 | 
			
		||||
2014-04-15 - ef818d8 - lavf 55.37.101 - avformat.h
 | 
			
		||||
  Add av_format_inject_global_side_data()
 | 
			
		||||
 | 
			
		||||
2014-04-12 - 4f698be - lavu 52.76.100 - log.h
 | 
			
		||||
  Add av_log_get_flags()
 | 
			
		||||
 | 
			
		||||
2014-04-11 - 6db42a2b - lavd 55.12.100 - avdevice.h
 | 
			
		||||
  Add avdevice_capabilities_create() function.
 | 
			
		||||
  Add avdevice_capabilities_free() function.
 | 
			
		||||
 | 
			
		||||
2014-04-07 - 0a1cc04 / 8b17243 - lavu 52.75.100 / 53.11.0 - pixfmt.h
 | 
			
		||||
  Add AV_PIX_FMT_YVYU422 pixel format.
 | 
			
		||||
 | 
			
		||||
2014-04-04 - c1d0536 / 8542f9c - lavu 52.74.100 / 53.10.0 - replaygain.h
 | 
			
		||||
  Full scale for peak values is now 100000 (instead of UINT32_MAX) and values
 | 
			
		||||
  may overflow.
 | 
			
		||||
 | 
			
		||||
2014-04-03 - c16e006 / 7763118 - lavu 52.73.100 / 53.9.0 - log.h
 | 
			
		||||
  Add AV_LOG(c) macro to have 256 color debug messages.
 | 
			
		||||
 | 
			
		||||
2014-04-03 - eaed4da9 - lavu 52.72.100 - opt.h
 | 
			
		||||
  Add AV_OPT_MULTI_COMPONENT_RANGE define to allow return
 | 
			
		||||
  multi-component option ranges.
 | 
			
		||||
 | 
			
		||||
2014-03-29 - cd50a44b - lavu 52.70.100 - mem.h
 | 
			
		||||
  Add av_dynarray_add_nofree() function.
 | 
			
		||||
 | 
			
		||||
2014-02-24 - 3e1f241 / d161ae0 - lavu 52.69.100 / 53.8.0 - frame.h
 | 
			
		||||
  Add av_frame_remove_side_data() for removing a single side data
 | 
			
		||||
  instance from a frame.
 | 
			
		||||
 | 
			
		||||
2014-03-24 - 83e8978 / 5a7e35d - lavu 52.68.100 / 53.7.0 - frame.h, replaygain.h
 | 
			
		||||
  Add AV_FRAME_DATA_REPLAYGAIN for exporting replaygain tags.
 | 
			
		||||
  Add a new header replaygain.h with the AVReplayGain struct.
 | 
			
		||||
 | 
			
		||||
2014-03-24 - 83e8978 / 5a7e35d - lavc 55.54.100 / 55.36.0 - avcodec.h
 | 
			
		||||
  Add AV_PKT_DATA_REPLAYGAIN for exporting replaygain tags.
 | 
			
		||||
 | 
			
		||||
2014-03-24 - 595ba3b / 25b3258 - lavf 55.35.100 / 55.13.0 - avformat.h
 | 
			
		||||
  Add AVStream.side_data and AVStream.nb_side_data for exporting stream-global
 | 
			
		||||
  side data (e.g. replaygain tags, video rotation)
 | 
			
		||||
 | 
			
		||||
2014-03-24 - bd34e26 / 0e2c3ee - lavc 55.53.100 / 55.35.0 - avcodec.h
 | 
			
		||||
  Give the name AVPacketSideData to the previously anonymous struct used for
 | 
			
		||||
  AVPacket.side_data.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
-------- 8< --------- FFmpeg 2.2 was cut here -------- 8< ---------
 | 
			
		||||
 | 
			
		||||
2014-03-18 - 37c07d4 - lsws 2.5.102
 | 
			
		||||
2014-03-18 - e9c8a9a - lsws 2.5.102
 | 
			
		||||
  Make gray16 full-scale.
 | 
			
		||||
 | 
			
		||||
2014-03-16 - 6b1ca17 / 1481d24 - lavu 52.67.100 / 53.6.0 - pixfmt.h
 | 
			
		||||
  Add RGBA64_LIBAV pixel format and variants for compatibility
 | 
			
		||||
 | 
			
		||||
2014-03-11 - 3f3229c - lavf 55.34.101 - avformat.h
 | 
			
		||||
  Set AVFormatContext.start_time_realtime when demuxing.
 | 
			
		||||
 | 
			
		||||
2014-03-03 - 06fed440 - lavd 55.11.100 - avdevice.h
 | 
			
		||||
  Add av_input_audio_device_next().
 | 
			
		||||
  Add av_input_video_device_next().
 | 
			
		||||
  Add av_output_audio_device_next().
 | 
			
		||||
  Add av_output_video_device_next().
 | 
			
		||||
 | 
			
		||||
2014-02-24 - fff5262 / 1155fd0 - lavu 52.66.100 / 53.5.0 - frame.h
 | 
			
		||||
2014-xx-xx - xxxxxxx - lavu 53.05.0 - frame.h
 | 
			
		||||
  Add av_frame_copy() for copying the frame data.
 | 
			
		||||
 | 
			
		||||
2014-02-24 - a66be60 - lswr 0.18.100 - swresample.h
 | 
			
		||||
2014-02-xx - xxxxxxx - lswr 0.18.100 - swresample.h
 | 
			
		||||
  Add swr_is_initialized() for checking whether a resample context is initialized.
 | 
			
		||||
 | 
			
		||||
2014-02-22 - 5367c0b / 7e86c27 - lavr 1.2.0 - avresample.h
 | 
			
		||||
2014-02-xx - xxxxxxx - lavr 1.2.0 - avresample.h
 | 
			
		||||
  Add avresample_is_open() for checking whether a resample context is open.
 | 
			
		||||
 | 
			
		||||
2014-02-19 - 6a24d77 / c3ecd96 - lavu 52.65.100 / 53.4.0  - opt.h
 | 
			
		||||
2014-xx-xx - xxxxxxx - lavu 53.04.0  - opt.h
 | 
			
		||||
  Add AV_OPT_FLAG_EXPORT and AV_OPT_FLAG_READONLY to mark options meant (only)
 | 
			
		||||
  for reading.
 | 
			
		||||
 | 
			
		||||
2014-02-19 - f4c8d00 / 6bb8720 - lavu 52.64.101 / 53.3.1 - opt.h
 | 
			
		||||
2014-xx-xx - xxxxxxx - lavu 53.03.01 - opt.h
 | 
			
		||||
  Deprecate unused AV_OPT_FLAG_METADATA.
 | 
			
		||||
 | 
			
		||||
2014-02-16 - 81c3f81 - lavd 55.10.100 - avdevice.h
 | 
			
		||||
2014-02-xx - xxxxxxx - lavd 55.10.100 - avdevice.h
 | 
			
		||||
  Add avdevice_list_devices() and avdevice_free_list_devices()
 | 
			
		||||
 | 
			
		||||
2014-02-16 - db3c970 - lavf 55.33.100 - avio.h
 | 
			
		||||
  Add avio_find_protocol_name() to find out the name of the protocol that would
 | 
			
		||||
  be selected for a given URL.
 | 
			
		||||
 | 
			
		||||
2014-02-15 - a2bc6c1 / c98f316 - lavu 52.64.100 / 53.3.0 - frame.h
 | 
			
		||||
2014-02-xx - xxxxxxx - lavu 53.3.0 - frame.h
 | 
			
		||||
  Add AV_FRAME_DATA_DOWNMIX_INFO value to the AVFrameSideDataType enum and
 | 
			
		||||
  downmix_info.h API, which identify downmix-related metadata.
 | 
			
		||||
 | 
			
		||||
@@ -353,7 +52,7 @@ API changes, most recent first:
 | 
			
		||||
  Add AVFormatContext.max_interleave_delta for controlling amount of buffering
 | 
			
		||||
  when interleaving.
 | 
			
		||||
 | 
			
		||||
2014-02-02 - 5871ee5 - lavf 55.29.100 - avformat.h
 | 
			
		||||
2014-02-02 - xxxxxxx - lavf 55.29.100 - avformat.h
 | 
			
		||||
  Add output_ts_offset muxing option to AVFormatContext.
 | 
			
		||||
 | 
			
		||||
2014-01-27 - 102bd64 - lavd 55.7.100 - avdevice.h
 | 
			
		||||
@@ -373,10 +72,10 @@ API changes, most recent first:
 | 
			
		||||
  (i.e. as if the CODEC_FLAG_EMU_EDGE flag was always on). Deprecate
 | 
			
		||||
  CODEC_FLAG_EMU_EDGE and avcodec_get_edge_width().
 | 
			
		||||
 | 
			
		||||
2014-01-19 - 1a193c4 - lavf 55.25.100 - avformat.h
 | 
			
		||||
2014-01-19 - xxxxxxx - lavf 55.25.100 - avformat.h
 | 
			
		||||
  Add avformat_get_mov_video_tags() and avformat_get_mov_audio_tags().
 | 
			
		||||
 | 
			
		||||
2014-01-19 - 3532dd5 - lavu 52.63.100 - rational.h
 | 
			
		||||
2014-01-19 - xxxxxxx - lavu 52.63.100 - rational.h
 | 
			
		||||
  Add av_make_q() function.
 | 
			
		||||
 | 
			
		||||
2014-01-05 - 4cf4da9 / 5b4797a - lavu 52.62.100 / 53.2.0 - frame.h
 | 
			
		||||
@@ -386,16 +85,16 @@ API changes, most recent first:
 | 
			
		||||
2014-01-05 - 751385f / 5c437fb - lavu 52.61.100 / 53.1.0 - channel_layout.h
 | 
			
		||||
  Add values for various Dolby flags to the AVMatrixEncoding enum.
 | 
			
		||||
 | 
			
		||||
2014-01-04 - b317f94 - lavu 52.60.100 - mathematics.h
 | 
			
		||||
2014-01-04 - xxxxxxx - lavu 52.60.100 - mathematics.h
 | 
			
		||||
  Add av_add_stable() function.
 | 
			
		||||
 | 
			
		||||
2013-12-22 - 911676c - lavu 52.59.100 - avstring.h
 | 
			
		||||
2013-12-22 - xxxxxxx - lavu 52.59.100 - avstring.h
 | 
			
		||||
  Add av_strnlen() function.
 | 
			
		||||
 | 
			
		||||
2013-12-09 - 64f73ac - lavu 52.57.100 - opencl.h
 | 
			
		||||
2013-12-xx - xxxxxxx - lavu 52.57.100 - opencl.h
 | 
			
		||||
  Add av_opencl_benchmark() function.
 | 
			
		||||
 | 
			
		||||
2013-11-30 - 82b2e9c - lavu 52.56.100 - ffversion.h
 | 
			
		||||
2013-11-xx - xxxxxxx - lavu 52.56.100 - ffversion.h
 | 
			
		||||
  Moves version.h to libavutil/ffversion.h.
 | 
			
		||||
  Install ffversion.h and make it public.
 | 
			
		||||
 | 
			
		||||
@@ -412,13 +111,13 @@ API changes, most recent first:
 | 
			
		||||
  Add AV_FRAME_DATA_A53_CC value to the AVFrameSideDataType enum, which
 | 
			
		||||
  identifies ATSC A53 Part 4 Closed Captions data.
 | 
			
		||||
 | 
			
		||||
2013-11-22 - 6859065 - lavu 52.54.100 - avstring.h
 | 
			
		||||
2013-11-XX - xxxxxxx - lavu 52.54.100 - avstring.h
 | 
			
		||||
  Add av_utf8_decode() function.
 | 
			
		||||
 | 
			
		||||
2013-11-22 - fb7d70c - lavc 55.44.100 - avcodec.h
 | 
			
		||||
  Add HEVC profiles
 | 
			
		||||
 | 
			
		||||
2013-11-20 - c28b61c - lavc 55.44.100 - avcodec.h
 | 
			
		||||
2013-11-xx - xxxxxxx - lavc 55.44.100 - avcodec.h
 | 
			
		||||
  Add av_packet_{un,}pack_dictionary()
 | 
			
		||||
  Add AV_PKT_METADATA_UPDATE side data type, used to transmit key/value
 | 
			
		||||
  strings between a stream and the application.
 | 
			
		||||
@@ -430,7 +129,7 @@ API changes, most recent first:
 | 
			
		||||
  Deprecate AVCodecContext.error_rate, it is replaced by the 'error_rate'
 | 
			
		||||
  private option of the mpegvideo encoder family.
 | 
			
		||||
 | 
			
		||||
2013-11-14 - 31c09b7 / 728c465 - lavc 55.42.100 / 55.26.0 - vdpau.h
 | 
			
		||||
2013-11-14 - 31c09b7 / 728c465 - lavc 55.26.0 - vdpau.h
 | 
			
		||||
  Add av_vdpau_get_profile().
 | 
			
		||||
  Add av_vdpau_alloc_context(). This function must from now on be
 | 
			
		||||
  used for allocating AVVDPAUContext.
 | 
			
		||||
@@ -440,32 +139,29 @@ API changes, most recent first:
 | 
			
		||||
  Add ITU-R BT.2020 and other not yet included values to color primaries,
 | 
			
		||||
  transfer characteristics and colorspaces.
 | 
			
		||||
 | 
			
		||||
2013-11-04 - 85cabf1 - lavu 52.50.100 - avutil.h
 | 
			
		||||
2013-11-04 - xxxxxxx - lavu 52.50.100 - avutil.h
 | 
			
		||||
  Add av_fopen_utf8()
 | 
			
		||||
 | 
			
		||||
2013-10-31 - 78265fc / 28096e0 - lavu 52.49.100 / 52.17.0 - frame.h
 | 
			
		||||
  Add AVFrame.flags and AV_FRAME_FLAG_CORRUPT.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
-------- 8< --------- FFmpeg 2.1 was cut here -------- 8< ---------
 | 
			
		||||
 | 
			
		||||
2013-10-27 - dbe6f9f - lavc 55.39.100 - avcodec.h
 | 
			
		||||
2013-10-27 - xxxxxxx - lavc 55.39.100 - avcodec.h
 | 
			
		||||
  Add CODEC_CAP_DELAY support to avcodec_decode_subtitle2.
 | 
			
		||||
 | 
			
		||||
2013-10-27 - d61617a - lavu 52.48.100 - parseutils.h
 | 
			
		||||
2013-10-27 - xxxxxxx - lavu 52.48.100 - parseutils.h
 | 
			
		||||
  Add av_get_known_color_name().
 | 
			
		||||
 | 
			
		||||
2013-10-17 - 8696e51 - lavu 52.47.100 - opt.h
 | 
			
		||||
2013-10-17 - xxxxxxx - lavu 52.47.100 - opt.h
 | 
			
		||||
  Add AV_OPT_TYPE_CHANNEL_LAYOUT and channel layout option handlers
 | 
			
		||||
  av_opt_get_channel_layout() and av_opt_set_channel_layout().
 | 
			
		||||
 | 
			
		||||
2013-10-06 - ccf96f8 -libswscale 2.5.101 - options.c
 | 
			
		||||
2013-10-xx - xxxxxxx -libswscale 2.5.101 - options.c
 | 
			
		||||
  Change default scaler to bicubic
 | 
			
		||||
 | 
			
		||||
2013-10-03 - e57dba0 - lavc 55.34.100 - avcodec.h
 | 
			
		||||
2013-10-03 - xxxxxxx - lavc 55.34.100 - avcodec.h
 | 
			
		||||
  Add av_codec_get_max_lowres()
 | 
			
		||||
 | 
			
		||||
2013-10-02 - 5082fcc - lavf 55.19.100 - avformat.h
 | 
			
		||||
2013-10-02 - xxxxxxx - lavf 55.19.100 - avformat.h
 | 
			
		||||
  Add audio/video/subtitle AVCodec fields to AVFormatContext to force specific
 | 
			
		||||
  decoders
 | 
			
		||||
 | 
			
		||||
@@ -483,7 +179,7 @@ API changes, most recent first:
 | 
			
		||||
2013-09-04 - 3e1f507 - lavc 55.31.101 - avcodec.h
 | 
			
		||||
  avcodec_close() argument can be NULL.
 | 
			
		||||
 | 
			
		||||
2013-09-04 - 36cd017a - lavf 55.16.101 - avformat.h
 | 
			
		||||
2013-09-04 - 36cd017 - lavf 55.16.101 - avformat.h
 | 
			
		||||
  avformat_close_input() argument can be NULL and point on NULL.
 | 
			
		||||
 | 
			
		||||
2013-08-29 - e31db62 - lavf 55.15.100 - avformat.h
 | 
			
		||||
@@ -492,10 +188,10 @@ API changes, most recent first:
 | 
			
		||||
2013-08-15 - 1e0e193 - lsws 2.5.100 -
 | 
			
		||||
  Add a sws_dither AVOption, allowing to set the dither algorithm used
 | 
			
		||||
 | 
			
		||||
2013-08-11 - d404fe35 - lavc 55.27.100 - vdpau.h
 | 
			
		||||
2013-08-xx - xxxxxxx - lavc 55.27.100 - vdpau.h
 | 
			
		||||
  Add a render2 alternative to the render callback function.
 | 
			
		||||
 | 
			
		||||
2013-08-11 - af05edc - lavc 55.26.100 - vdpau.h
 | 
			
		||||
2013-08-xx - xxxxxxx - lavc 55.26.100 - vdpau.h
 | 
			
		||||
  Add allocation function for AVVDPAUContext, allowing
 | 
			
		||||
  to extend it in the future without breaking ABI/API.
 | 
			
		||||
 | 
			
		||||
@@ -505,7 +201,7 @@ API changes, most recent first:
 | 
			
		||||
 | 
			
		||||
2013-08-05 - 9547e3e / f824535 - lavc 55.22.100 / 55.13.0 - avcodec.h
 | 
			
		||||
  Deprecate the bitstream-related members from struct AVVDPAUContext.
 | 
			
		||||
  The bitstream buffers no longer need to be explicitly freed.
 | 
			
		||||
  The bistream buffers no longer need to be explicitly freed.
 | 
			
		||||
 | 
			
		||||
2013-08-05 - 3b805dc / 549294f - lavc 55.21.100 / 55.12.0 - avcodec.h
 | 
			
		||||
  Deprecate the CODEC_CAP_HWACCEL_VDPAU codec capability. Use CODEC_CAP_HWACCEL
 | 
			
		||||
@@ -521,9 +217,6 @@ API changes, most recent first:
 | 
			
		||||
  Add avcodec_chroma_pos_to_enum()
 | 
			
		||||
  Add avcodec_enum_to_chroma_pos()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
-------- 8< --------- FFmpeg 2.0 was cut here -------- 8< ---------
 | 
			
		||||
 | 
			
		||||
2013-07-03 - 838bd73 - lavfi 3.78.100 - avfilter.h
 | 
			
		||||
  Deprecate avfilter_graph_parse() in favor of the equivalent
 | 
			
		||||
  avfilter_graph_parse_ptr().
 | 
			
		||||
@@ -596,9 +289,6 @@ API changes, most recent first:
 | 
			
		||||
2013-03-17 - 7aa9af5 - lavu 52.20.100 - opt.h
 | 
			
		||||
  Add AV_OPT_TYPE_VIDEO_RATE value to AVOptionType enum.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
-------- 8< --------- FFmpeg 1.2 was cut here -------- 8< ---------
 | 
			
		||||
 | 
			
		||||
2013-03-07 - 9767ec6 - lavu 52.18.100 - avstring.h,bprint.h
 | 
			
		||||
  Add av_escape() and av_bprint_escape() API.
 | 
			
		||||
 | 
			
		||||
@@ -611,9 +301,6 @@ API changes, most recent first:
 | 
			
		||||
2013-01-01 - 2eb2e17 - lavfi 3.34.100
 | 
			
		||||
  Add avfilter_get_audio_buffer_ref_from_arrays_channels.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
-------- 8< --------- FFmpeg 1.1 was cut here -------- 8< ---------
 | 
			
		||||
 | 
			
		||||
2012-12-20 - 34de47aa - lavfi 3.29.100 - avfilter.h
 | 
			
		||||
  Add AVFilterLink.channels, avfilter_link_get_channels()
 | 
			
		||||
  and avfilter_ref_get_channels().
 | 
			
		||||
@@ -659,9 +346,6 @@ API changes, most recent first:
 | 
			
		||||
  Add LIBSWRESAMPLE_VERSION, LIBSWRESAMPLE_BUILD
 | 
			
		||||
  and LIBSWRESAMPLE_IDENT symbols.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
-------- 8< --------- FFmpeg 1.0 was cut here -------- 8< ---------
 | 
			
		||||
 | 
			
		||||
2012-09-06 - 29e972f - lavu 51.72.100 - parseutils.h
 | 
			
		||||
  Add av_small_strptime() time parsing function.
 | 
			
		||||
 | 
			
		||||
@@ -1080,9 +764,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
 | 
			
		||||
2012-01-12 - b18e17e / 3167dc9 - lavfi 2.59.100 / 2.15.0
 | 
			
		||||
  Add a new installed header -- libavfilter/version.h -- with version macros.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
-------- 8< --------- FFmpeg 0.9 was cut here -------- 8< ---------
 | 
			
		||||
 | 
			
		||||
2011-12-08 - a502939 - lavfi 2.52.0
 | 
			
		||||
  Add av_buffersink_poll_frame() to buffersink.h.
 | 
			
		||||
 | 
			
		||||
@@ -1111,9 +792,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
 | 
			
		||||
  Add avformat_close_input().
 | 
			
		||||
  Deprecate av_close_input_file() and av_close_input_stream().
 | 
			
		||||
 | 
			
		||||
2011-12-09 - c59b80c / b2890f5 - lavu 51.32.0 / 51.20.0 - audioconvert.h
 | 
			
		||||
  Expand the channel layout list.
 | 
			
		||||
 | 
			
		||||
2011-12-02 - e4de716 / 0eea212 - lavc 53.40.0 / 53.25.0
 | 
			
		||||
  Add nb_samples and extended_data fields to AVFrame.
 | 
			
		||||
  Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE.
 | 
			
		||||
@@ -1127,10 +805,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
 | 
			
		||||
  Change AVCodecContext.error[4] to [8] at next major bump.
 | 
			
		||||
  Add AV_NUM_DATA_POINTERS to simplify the bump transition.
 | 
			
		||||
 | 
			
		||||
2011-11-24 - lavu 51.29.0 / 51.19.0
 | 
			
		||||
  92afb43 / bd97b2e - add planar RGB pixel formats
 | 
			
		||||
  92afb43 / 6b0768e - add PIX_FMT_PLANAR and PIX_FMT_RGB pixel descriptions
 | 
			
		||||
 | 
			
		||||
2011-11-23 - 8e576d5 / bbb46f3 - lavu 51.27.0 / 51.18.0
 | 
			
		||||
  Add av_samples_get_buffer_size(), av_samples_fill_arrays(), and
 | 
			
		||||
  av_samples_alloc(), to samplefmt.h.
 | 
			
		||||
@@ -1292,13 +966,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
 | 
			
		||||
2011-06-28 - 5129336 - lavu 51.11.0 - avutil.h
 | 
			
		||||
  Define the AV_PICTURE_TYPE_NONE value in AVPictureType enum.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
-------- 8< --------- FFmpeg 0.7 was cut here -------- 8< ---------
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
-------- 8< --------- FFmpeg 0.8 was cut here -------- 8< ---------
 | 
			
		||||
 | 
			
		||||
2011-06-19 - fd2c0a5 - lavfi 2.23.0 - avfilter.h
 | 
			
		||||
  Add layout negotiation fields and helper functions.
 | 
			
		||||
 | 
			
		||||
@@ -1976,9 +1643,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
 | 
			
		||||
2010-06-02 - 7e566bb - lavc 52.73.0 - av_get_codec_tag_string()
 | 
			
		||||
  Add av_get_codec_tag_string().
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
-------- 8< --------- FFmpeg 0.6 was cut here -------- 8< ---------
 | 
			
		||||
 | 
			
		||||
2010-06-01 - 2b99142 - lsws 0.11.0 - convertPalette API
 | 
			
		||||
  Add sws_convertPalette8ToPacked32() and sws_convertPalette8ToPacked24().
 | 
			
		||||
 | 
			
		||||
@@ -1996,6 +1660,10 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
 | 
			
		||||
2010-05-09 - b6bc205 - lavfi 1.20.0 - AVFilterPicRef
 | 
			
		||||
  Add interlaced and top_field_first fields to AVFilterPicRef.
 | 
			
		||||
 | 
			
		||||
------------------------------8<-------------------------------------
 | 
			
		||||
                   0.6 branch was cut here
 | 
			
		||||
----------------------------->8--------------------------------------
 | 
			
		||||
 | 
			
		||||
2010-05-01 - 8e2ee18 - lavf 52.62.0 - probe function
 | 
			
		||||
  Add av_probe_input_format2 to API, it allows ignoring probe
 | 
			
		||||
  results below given score and returns the actual probe score.
 | 
			
		||||
 
 | 
			
		||||
@@ -31,7 +31,7 @@ PROJECT_NAME           = FFmpeg
 | 
			
		||||
# This could be handy for archiving the generated documentation or
 | 
			
		||||
# if some version control system is used.
 | 
			
		||||
 | 
			
		||||
PROJECT_NUMBER         = 2.4.9
 | 
			
		||||
PROJECT_NUMBER         = 2.2.4
 | 
			
		||||
 | 
			
		||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
 | 
			
		||||
# in the documentation. The maximum height of the logo should not exceed 55
 | 
			
		||||
@@ -759,7 +759,7 @@ ALPHABETICAL_INDEX     = YES
 | 
			
		||||
# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
 | 
			
		||||
# in which this list will be split (can be a number in the range [1..20])
 | 
			
		||||
 | 
			
		||||
COLS_IN_ALPHA_INDEX    = 5
 | 
			
		||||
COLS_IN_ALPHA_INDEX    = 2
 | 
			
		||||
 | 
			
		||||
# In case all classes in a project start with a common prefix, all
 | 
			
		||||
# classes will be put under the same header in the alphabetical index.
 | 
			
		||||
@@ -1056,7 +1056,7 @@ FORMULA_TRANSPARENT    = YES
 | 
			
		||||
# typically be disabled. For large projects the javascript based search engine
 | 
			
		||||
# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
 | 
			
		||||
 | 
			
		||||
SEARCHENGINE           = YES
 | 
			
		||||
SEARCHENGINE           = NO
 | 
			
		||||
 | 
			
		||||
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
 | 
			
		||||
# implemented using a PHP enabled web server instead of at the web client
 | 
			
		||||
@@ -1359,8 +1359,6 @@ PREDEFINED             = "__attribute__(x)=" \
 | 
			
		||||
                         "DECLARE_ALIGNED(a,t,n)=t n" \
 | 
			
		||||
                         "offsetof(x,y)=0x42" \
 | 
			
		||||
                         av_alloc_size \
 | 
			
		||||
                         AV_GCC_VERSION_AT_LEAST(x,y)=1 \
 | 
			
		||||
                         __GNUC__=1 \
 | 
			
		||||
 | 
			
		||||
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
 | 
			
		||||
# this tag can be used to specify a list of macro names that should be expanded.
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										21
									
								
								doc/Makefile
									
									
									
									
									
								
							
							
						
						
									
										21
									
								
								doc/Makefile
									
									
									
									
									
								
							@@ -38,9 +38,7 @@ DOCS = $(DOCS-yes)
 | 
			
		||||
 | 
			
		||||
DOC_EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE)      += avio_reading
 | 
			
		||||
DOC_EXAMPLES-$(CONFIG_AVCODEC_EXAMPLE)           += avcodec
 | 
			
		||||
DOC_EXAMPLES-$(CONFIG_DECODING_ENCODING_EXAMPLE) += decoding_encoding
 | 
			
		||||
DOC_EXAMPLES-$(CONFIG_DEMUXING_DECODING_EXAMPLE) += demuxing_decoding
 | 
			
		||||
DOC_EXAMPLES-$(CONFIG_EXTRACT_MVS_EXAMPLE)       += extract_mvs
 | 
			
		||||
DOC_EXAMPLES-$(CONFIG_FILTER_AUDIO_EXAMPLE)      += filter_audio
 | 
			
		||||
DOC_EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE)   += filtering_audio
 | 
			
		||||
DOC_EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE)   += filtering_video
 | 
			
		||||
@@ -50,7 +48,6 @@ DOC_EXAMPLES-$(CONFIG_REMUXING_EXAMPLE)          += remuxing
 | 
			
		||||
DOC_EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE)  += resampling_audio
 | 
			
		||||
DOC_EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE)     += scaling_video
 | 
			
		||||
DOC_EXAMPLES-$(CONFIG_TRANSCODE_AAC_EXAMPLE)     += transcode_aac
 | 
			
		||||
DOC_EXAMPLES-$(CONFIG_TRANSCODING_EXAMPLE)       += transcoding
 | 
			
		||||
ALL_DOC_EXAMPLES_LIST = $(DOC_EXAMPLES-) $(DOC_EXAMPLES-yes)
 | 
			
		||||
 | 
			
		||||
DOC_EXAMPLES       := $(DOC_EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)$(EXESUF))
 | 
			
		||||
@@ -82,25 +79,14 @@ $(GENTEXI): doc/avoptions_%.texi: doc/print_options$(HOSTEXESUF)
 | 
			
		||||
	$(M)doc/print_options $* > $@
 | 
			
		||||
 | 
			
		||||
doc/%.html: TAG = HTML
 | 
			
		||||
doc/%-all.html: TAG = HTML
 | 
			
		||||
 | 
			
		||||
ifdef HAVE_MAKEINFO_HTML
 | 
			
		||||
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.pm $(GENTEXI)
 | 
			
		||||
	$(Q)$(TEXIDEP)
 | 
			
		||||
	$(M)makeinfo --html -I doc --no-split -D config-not-all --init-file=$(SRC_PATH)/doc/t2h.pm --output $@ $<
 | 
			
		||||
 | 
			
		||||
doc/%-all.html: doc/%.texi $(SRC_PATH)/doc/t2h.pm $(GENTEXI)
 | 
			
		||||
	$(Q)$(TEXIDEP)
 | 
			
		||||
	$(M)makeinfo --html -I doc --no-split -D config-all --init-file=$(SRC_PATH)/doc/t2h.pm --output $@ $<
 | 
			
		||||
else
 | 
			
		||||
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
 | 
			
		||||
	$(Q)$(TEXIDEP)
 | 
			
		||||
	$(M)texi2html -I doc -monolithic --D=config-not-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
 | 
			
		||||
 | 
			
		||||
doc/%-all.html: TAG = HTML
 | 
			
		||||
doc/%-all.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
 | 
			
		||||
	$(Q)$(TEXIDEP)
 | 
			
		||||
	$(M)texi2html -I doc -monolithic --D=config-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
doc/%.pod: TAG = POD
 | 
			
		||||
doc/%.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
 | 
			
		||||
@@ -124,9 +110,8 @@ OBJDIRS += doc/examples
 | 
			
		||||
 | 
			
		||||
DOXY_INPUT      = $(addprefix $(SRC_PATH)/, $(INSTHEADERS) $(DOC_EXAMPLES:%$(EXESUF)=%.c) $(LIB_EXAMPLES:%$(EXESUF)=%.c))
 | 
			
		||||
 | 
			
		||||
doc/doxy/html: TAG = DOXY
 | 
			
		||||
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(SRC_PATH)/doc/doxy-wrapper.sh $(DOXY_INPUT)
 | 
			
		||||
	$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $(SRC_PATH) $< $(DOXYGEN) $(DOXY_INPUT)
 | 
			
		||||
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(DOXY_INPUT)
 | 
			
		||||
	$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $(SRC_PATH) $< $(DOXY_INPUT)
 | 
			
		||||
 | 
			
		||||
install-doc: install-html install-man
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										16
									
								
								doc/RELEASE_NOTES
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								doc/RELEASE_NOTES
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,16 @@
 | 
			
		||||
Release Notes
 | 
			
		||||
=============
 | 
			
		||||
 | 
			
		||||
* 2.2 "Muybridge"  March, 2014
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
General notes
 | 
			
		||||
-------------
 | 
			
		||||
See the Changelog file for a list of significant changes. Note, there
 | 
			
		||||
are many more new features and bugfixes than whats listed there.
 | 
			
		||||
 | 
			
		||||
Bugreports against FFmpeg git master or the most recent FFmpeg release are
 | 
			
		||||
accepted. If you are experiencing issues with any formally released version of
 | 
			
		||||
FFmpeg, please try git master to check if the issue still exists. If it does,
 | 
			
		||||
make your report against the development code following the usual bug reporting
 | 
			
		||||
guidelines.
 | 
			
		||||
@@ -74,18 +74,7 @@ format with @command{ffmpeg}, you can use the command:
 | 
			
		||||
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@section imxdump
 | 
			
		||||
 | 
			
		||||
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
 | 
			
		||||
Pro decoder. This filter only applies to the mpeg2video codec, and is
 | 
			
		||||
likely not needed for Final Cut Pro 7 and newer with the appropriate
 | 
			
		||||
@option{-tag:v}.
 | 
			
		||||
 | 
			
		||||
For example, to remux 30 MB/sec NTSC IMX to MOV:
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
 | 
			
		||||
@end example
 | 
			
		||||
@section imx_dump_header
 | 
			
		||||
 | 
			
		||||
@section mjpeg2jpeg
 | 
			
		||||
 | 
			
		||||
@@ -132,13 +121,6 @@ ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
 | 
			
		||||
 | 
			
		||||
@section noise
 | 
			
		||||
 | 
			
		||||
Damages the contents of packets without damaging the container. Can be
 | 
			
		||||
used for fuzzing or testing error resilience/concealment.
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -i INPUT -c copy -bsf noise output.mkv
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@section remove_extra
 | 
			
		||||
 | 
			
		||||
@c man end BITSTREAM FILTERS
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										5
									
								
								doc/bootstrap.min.css
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								doc/bootstrap.min.css
									
									
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							@@ -25,9 +25,6 @@ fate-list
 | 
			
		||||
install
 | 
			
		||||
    Install headers, libraries and programs.
 | 
			
		||||
 | 
			
		||||
examples
 | 
			
		||||
    Build all examples located in doc/examples.
 | 
			
		||||
 | 
			
		||||
libavformat/output-example
 | 
			
		||||
    Build the libavformat basic example.
 | 
			
		||||
 | 
			
		||||
@@ -37,9 +34,6 @@ libavcodec/api-example
 | 
			
		||||
libswscale/swscale-test
 | 
			
		||||
    Build the swscale self-test (useful also as example).
 | 
			
		||||
 | 
			
		||||
config
 | 
			
		||||
    Reconfigure the project with current configuration.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Useful standard make commands:
 | 
			
		||||
make -t <target>
 | 
			
		||||
 
 | 
			
		||||
@@ -285,11 +285,6 @@ detect bitstream specification deviations
 | 
			
		||||
detect improper bitstream length
 | 
			
		||||
@item explode
 | 
			
		||||
abort decoding on minor error detection
 | 
			
		||||
@item ignore_err
 | 
			
		||||
ignore decoding errors, and continue decoding.
 | 
			
		||||
This is useful if you want to analyze the content of a video and thus want
 | 
			
		||||
everything to be decoded no matter what. This option will not result in a video
 | 
			
		||||
that is pleasing to watch in case of errors.
 | 
			
		||||
@item careful
 | 
			
		||||
consider things that violate the spec and have not been seen in the wild as errors
 | 
			
		||||
@item compliant
 | 
			
		||||
@@ -394,9 +389,6 @@ Possible values:
 | 
			
		||||
 | 
			
		||||
@item simplemmx
 | 
			
		||||
 | 
			
		||||
@item simpleauto
 | 
			
		||||
Automatically pick a IDCT compatible with the simple one
 | 
			
		||||
 | 
			
		||||
@item arm
 | 
			
		||||
 | 
			
		||||
@item altivec
 | 
			
		||||
@@ -432,8 +424,6 @@ Possible values:
 | 
			
		||||
iterative motion vector (MV) search (slow)
 | 
			
		||||
@item deblock
 | 
			
		||||
use strong deblock filter for damaged MBs
 | 
			
		||||
@item favor_inter
 | 
			
		||||
favor predicting from the previous frame instead of the current
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@item bits_per_coded_sample @var{integer}
 | 
			
		||||
@@ -498,8 +488,6 @@ threading operations
 | 
			
		||||
@item vismv @var{integer} (@emph{decoding,video})
 | 
			
		||||
Visualize motion vectors (MVs).
 | 
			
		||||
 | 
			
		||||
This option is deprecated, see the codecview filter instead.
 | 
			
		||||
 | 
			
		||||
Possible values:
 | 
			
		||||
@table @samp
 | 
			
		||||
@item pf
 | 
			
		||||
@@ -799,9 +787,6 @@ Frame data might be split into multiple chunks.
 | 
			
		||||
Show all frames before the first keyframe.
 | 
			
		||||
@item skiprd
 | 
			
		||||
Deprecated, use mpegvideo private options instead.
 | 
			
		||||
@item export_mvs
 | 
			
		||||
Export motion vectors into frame side-data (see @code{AV_FRAME_DATA_MOTION_VECTORS})
 | 
			
		||||
for codecs that support it. See also @file{doc/examples/export_mvs.c}.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@item error @var{integer} (@emph{encoding,video})
 | 
			
		||||
@@ -894,7 +879,7 @@ Set frame skip factor.
 | 
			
		||||
Set frame skip exponent.
 | 
			
		||||
Negative values behave identical to the corresponding positive ones, except
 | 
			
		||||
that the score is normalized.
 | 
			
		||||
Positive values exist primarily for compatibility reasons and are not so useful.
 | 
			
		||||
Positive values exist primarly for compatibility reasons and are not so useful.
 | 
			
		||||
 | 
			
		||||
@item skipcmp @var{integer} (@emph{encoding,video})
 | 
			
		||||
Set frame skip compare function.
 | 
			
		||||
@@ -1040,26 +1025,15 @@ Set the log level offset.
 | 
			
		||||
Number of slices, used in parallelized encoding.
 | 
			
		||||
 | 
			
		||||
@item thread_type @var{flags} (@emph{decoding/encoding,video})
 | 
			
		||||
Select which multithreading methods to use.
 | 
			
		||||
 | 
			
		||||
Use of @samp{frame} will increase decoding delay by one frame per
 | 
			
		||||
thread, so clients which cannot provide future frames should not use
 | 
			
		||||
it.
 | 
			
		||||
Select multithreading type.
 | 
			
		||||
 | 
			
		||||
Possible values:
 | 
			
		||||
@table @samp
 | 
			
		||||
@item slice
 | 
			
		||||
Decode more than one part of a single frame at once.
 | 
			
		||||
 | 
			
		||||
Multithreading using slices works only when the video was encoded with
 | 
			
		||||
slices.
 | 
			
		||||
 | 
			
		||||
@item frame
 | 
			
		||||
Decode more than one frame at once.
 | 
			
		||||
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
Default value is @samp{slice+frame}.
 | 
			
		||||
 | 
			
		||||
@item audio_service_type @var{integer} (@emph{encoding,audio})
 | 
			
		||||
Set audio service type.
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -163,9 +163,6 @@ Requires the presence of the libopus headers and library during
 | 
			
		||||
configuration. You need to explicitly configure the build with
 | 
			
		||||
@code{--enable-libopus}.
 | 
			
		||||
 | 
			
		||||
An FFmpeg native decoder for Opus exists, so users can decode Opus
 | 
			
		||||
without this library.
 | 
			
		||||
 | 
			
		||||
@c man end AUDIO DECODERS
 | 
			
		||||
 | 
			
		||||
@chapter Subtitles Decoders
 | 
			
		||||
 
 | 
			
		||||
@@ -74,7 +74,7 @@ following directive is recognized:
 | 
			
		||||
Path to a file to read; special characters and spaces must be escaped with
 | 
			
		||||
backslash or single quotes.
 | 
			
		||||
 | 
			
		||||
All subsequent file-related directives apply to that file.
 | 
			
		||||
All subsequent directives apply to that file.
 | 
			
		||||
 | 
			
		||||
@item @code{ffconcat version 1.0}
 | 
			
		||||
Identify the script type and version. It also sets the @option{safe} option
 | 
			
		||||
@@ -92,22 +92,6 @@ file is not available or accurate.
 | 
			
		||||
If the duration is set for all files, then it is possible to seek in the
 | 
			
		||||
whole concatenated video.
 | 
			
		||||
 | 
			
		||||
@item @code{stream}
 | 
			
		||||
Introduce a stream in the virtual file.
 | 
			
		||||
All subsequent stream-related directives apply to the last introduced
 | 
			
		||||
stream.
 | 
			
		||||
Some streams properties must be set in order to allow identifying the
 | 
			
		||||
matching streams in the subfiles.
 | 
			
		||||
If no streams are defined in the script, the streams from the first file are
 | 
			
		||||
copied.
 | 
			
		||||
 | 
			
		||||
@item @code{exact_stream_id @var{id}}
 | 
			
		||||
Set the id of the stream.
 | 
			
		||||
If this directive is given, the string with the corresponding id in the
 | 
			
		||||
subfiles will be used.
 | 
			
		||||
This is especially useful for MPEG-PS (VOB) files, where the order of the
 | 
			
		||||
streams is not reliable.
 | 
			
		||||
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@subsection Options
 | 
			
		||||
@@ -128,14 +112,6 @@ If set to 0, any file name is accepted.
 | 
			
		||||
The default is -1, it is equivalent to 1 if the format was automatically
 | 
			
		||||
probed and 0 otherwise.
 | 
			
		||||
 | 
			
		||||
@item auto_convert
 | 
			
		||||
If set to 1, try to perform automatic conversions on packet data to make the
 | 
			
		||||
streams concatenable.
 | 
			
		||||
 | 
			
		||||
Currently, the only conversion is adding the h264_mp4toannexb bitstream
 | 
			
		||||
filter to H.264 streams in MP4 format. This is necessary in particular if
 | 
			
		||||
there are resolution changes.
 | 
			
		||||
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@section flv
 | 
			
		||||
@@ -174,40 +150,6 @@ See @url{http://quvi.sourceforge.net/} for more information.
 | 
			
		||||
FFmpeg needs to be built with @code{--enable-libquvi} for this demuxer to be
 | 
			
		||||
enabled.
 | 
			
		||||
 | 
			
		||||
@section gif
 | 
			
		||||
 | 
			
		||||
Animated GIF demuxer.
 | 
			
		||||
 | 
			
		||||
It accepts the following options:
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
@item min_delay
 | 
			
		||||
Set the minimum valid delay between frames in hundredths of seconds.
 | 
			
		||||
Range is 0 to 6000. Default value is 2.
 | 
			
		||||
 | 
			
		||||
@item default_delay
 | 
			
		||||
Set the default delay between frames in hundredths of seconds.
 | 
			
		||||
Range is 0 to 6000. Default value is 10.
 | 
			
		||||
 | 
			
		||||
@item ignore_loop
 | 
			
		||||
GIF files can contain information to loop a certain number of times (or
 | 
			
		||||
infinitely). If @option{ignore_loop} is set to 1, then the loop setting
 | 
			
		||||
from the input will be ignored and looping will not occur. If set to 0,
 | 
			
		||||
then looping will occur and will cycle the number of times according to
 | 
			
		||||
the GIF. Default value is 1.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
For example, with the overlay filter, place an infinitely looping GIF
 | 
			
		||||
over another video:
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
Note that in the above example the shortest option for overlay filter is
 | 
			
		||||
used to end the output video at the length of the shortest input file,
 | 
			
		||||
which in this case is @file{input.mp4} as the GIF in this example loops
 | 
			
		||||
infinitely.
 | 
			
		||||
 | 
			
		||||
@section image2
 | 
			
		||||
 | 
			
		||||
Image file demuxer.
 | 
			
		||||
@@ -307,8 +249,6 @@ is 5.
 | 
			
		||||
If set to 1, will set frame timestamp to modification time of image file. Note
 | 
			
		||||
that monotonity of timestamps is not provided: images go in the same order as
 | 
			
		||||
without this option. Default value is 0.
 | 
			
		||||
If set to 2, will set frame timestamp to the modification time of the image file in
 | 
			
		||||
nanosecond precision.
 | 
			
		||||
@item video_size
 | 
			
		||||
Set the video size of the images to read. If not specified the video
 | 
			
		||||
size is guessed from the first image file in the sequence.
 | 
			
		||||
 
 | 
			
		||||
@@ -323,12 +323,9 @@ Always fill out the commit log message. Describe in a few lines what you
 | 
			
		||||
changed and why. You can refer to mailing list postings if you fix a
 | 
			
		||||
particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
 | 
			
		||||
Recommended format:
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
area changed: Short 1 line description
 | 
			
		||||
 | 
			
		||||
details describing what and why and giving references.
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@item
 | 
			
		||||
Make sure the author of the commit is set correctly. (see git commit --author)
 | 
			
		||||
 
 | 
			
		||||
@@ -2,11 +2,10 @@
 | 
			
		||||
 | 
			
		||||
SRC_PATH="${1}"
 | 
			
		||||
DOXYFILE="${2}"
 | 
			
		||||
DOXYGEN="${3}"
 | 
			
		||||
 | 
			
		||||
shift 3
 | 
			
		||||
shift 2
 | 
			
		||||
 | 
			
		||||
$DOXYGEN - <<EOF
 | 
			
		||||
doxygen - <<EOF
 | 
			
		||||
@INCLUDE        = ${DOXYFILE}
 | 
			
		||||
INPUT           = $@
 | 
			
		||||
EXAMPLE_PATH    = ${SRC_PATH}/doc/examples
 | 
			
		||||
 
 | 
			
		||||
@@ -80,7 +80,7 @@ thresholds with quantizer steps to find the appropriate quantization with
 | 
			
		||||
distortion below threshold band by band.
 | 
			
		||||
 | 
			
		||||
The quality of this method is comparable to the two loop searching method
 | 
			
		||||
described below, but somewhat a little better and slower.
 | 
			
		||||
descibed below, but somewhat a little better and slower.
 | 
			
		||||
 | 
			
		||||
@item anmr
 | 
			
		||||
Average noise to mask ratio (ANMR) trellis-based solution.
 | 
			
		||||
@@ -1032,7 +1032,7 @@ configuration. You need to explicitly configure the build with
 | 
			
		||||
 | 
			
		||||
@subsection Option Mapping
 | 
			
		||||
 | 
			
		||||
Most libopus options are modelled after the @command{opusenc} utility from
 | 
			
		||||
Most libopus options are modeled after the @command{opusenc} utility from
 | 
			
		||||
opus-tools. The following is an option mapping chart describing options
 | 
			
		||||
supported by the libopus wrapper, and their @command{opusenc}-equivalent
 | 
			
		||||
in parentheses.
 | 
			
		||||
@@ -1330,7 +1330,7 @@ ffmpeg -i INPUT -codec:v libtheora -b:v 1000k OUTPUT.ogg
 | 
			
		||||
 | 
			
		||||
@section libvpx
 | 
			
		||||
 | 
			
		||||
VP8/VP9 format supported through libvpx.
 | 
			
		||||
VP8 format supported through libvpx.
 | 
			
		||||
 | 
			
		||||
Requires the presence of the libvpx headers and library during configuration.
 | 
			
		||||
You need to explicitly configure the build with @code{--enable-libvpx}.
 | 
			
		||||
@@ -1442,9 +1442,6 @@ g_lag_in_frames
 | 
			
		||||
@item vp8flags error_resilient
 | 
			
		||||
g_error_resilient
 | 
			
		||||
 | 
			
		||||
@item aq_mode
 | 
			
		||||
@code{VP9E_SET_AQ_MODE}
 | 
			
		||||
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
For more information about libvpx see:
 | 
			
		||||
@@ -1569,34 +1566,25 @@ kilobits/s.
 | 
			
		||||
 | 
			
		||||
@item g (@emph{keyint})
 | 
			
		||||
 | 
			
		||||
@item qmin (@emph{qpmin})
 | 
			
		||||
Minimum quantizer scale.
 | 
			
		||||
 | 
			
		||||
@item qmax (@emph{qpmax})
 | 
			
		||||
Maximum quantizer scale.
 | 
			
		||||
 | 
			
		||||
@item qmin (@emph{qpmin})
 | 
			
		||||
 | 
			
		||||
@item qdiff (@emph{qpstep})
 | 
			
		||||
Maximum difference between quantizer scales.
 | 
			
		||||
 | 
			
		||||
@item qblur (@emph{qblur})
 | 
			
		||||
Quantizer curve blur
 | 
			
		||||
 | 
			
		||||
@item qcomp (@emph{qcomp})
 | 
			
		||||
Quantizer curve compression factor
 | 
			
		||||
 | 
			
		||||
@item refs (@emph{ref})
 | 
			
		||||
Number of reference frames each P-frame can use. The range is from @var{0-16}.
 | 
			
		||||
 | 
			
		||||
@item sc_threshold (@emph{scenecut})
 | 
			
		||||
Sets the threshold for the scene change detection.
 | 
			
		||||
 | 
			
		||||
@item trellis (@emph{trellis})
 | 
			
		||||
Performs Trellis quantization to increase efficiency. Enabled by default.
 | 
			
		||||
 | 
			
		||||
@item nr  (@emph{nr})
 | 
			
		||||
 | 
			
		||||
@item me_range (@emph{merange})
 | 
			
		||||
Maximum range of the motion search in pixels.
 | 
			
		||||
 | 
			
		||||
@item me_method (@emph{me})
 | 
			
		||||
Set motion estimation method. Possible values in the decreasing order
 | 
			
		||||
@@ -1618,13 +1606,10 @@ Hadamard exhaustive search (slowest).
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@item subq (@emph{subme})
 | 
			
		||||
Sub-pixel motion estimation method.
 | 
			
		||||
 | 
			
		||||
@item b_strategy (@emph{b-adapt})
 | 
			
		||||
Adaptive B-frame placement decision algorithm. Use only on first-pass.
 | 
			
		||||
 | 
			
		||||
@item keyint_min (@emph{min-keyint})
 | 
			
		||||
Minimum GOP size.
 | 
			
		||||
 | 
			
		||||
@item coder
 | 
			
		||||
Set entropy encoder. Possible values:
 | 
			
		||||
@@ -1651,7 +1636,6 @@ Ignore chroma in motion estimation. It generates the same effect as
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@item threads (@emph{threads})
 | 
			
		||||
Number of encoding threads.
 | 
			
		||||
 | 
			
		||||
@item thread_type
 | 
			
		||||
Set multithreading technique. Possible values:
 | 
			
		||||
@@ -2045,30 +2029,6 @@ fastest.
 | 
			
		||||
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@section mpeg2
 | 
			
		||||
 | 
			
		||||
MPEG-2 video encoder.
 | 
			
		||||
 | 
			
		||||
@subsection Options
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
@item seq_disp_ext @var{integer}
 | 
			
		||||
Specifies if the encoder should write a sequence_display_extension to the
 | 
			
		||||
output.
 | 
			
		||||
@table @option
 | 
			
		||||
@item -1
 | 
			
		||||
@itemx auto
 | 
			
		||||
Decide automatically to write it or not (this is the default) by checking if
 | 
			
		||||
the data to be written is different from the default or unspecified values.
 | 
			
		||||
@item 0
 | 
			
		||||
@itemx never
 | 
			
		||||
Never write it.
 | 
			
		||||
@item 1
 | 
			
		||||
@itemx always
 | 
			
		||||
Always write it.
 | 
			
		||||
@end table
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@section png
 | 
			
		||||
 | 
			
		||||
PNG image encoder.
 | 
			
		||||
@@ -2150,27 +2110,3 @@ For the fastest encoding speed set the @option{qscale} parameter (4 is the
 | 
			
		||||
recommended value) and do not set a size constraint.
 | 
			
		||||
 | 
			
		||||
@c man end VIDEO ENCODERS
 | 
			
		||||
 | 
			
		||||
@chapter Subtitles Encoders
 | 
			
		||||
@c man begin SUBTITLES ENCODERS
 | 
			
		||||
 | 
			
		||||
@section dvdsub
 | 
			
		||||
 | 
			
		||||
This codec encodes the bitmap subtitle format that is used in DVDs.
 | 
			
		||||
Typically they are stored in VOBSUB file pairs (*.idx + *.sub),
 | 
			
		||||
and they can also be used in Matroska files.
 | 
			
		||||
 | 
			
		||||
@subsection Options
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
@item even_rows_fix
 | 
			
		||||
When set to 1, enable a work-around that makes the number of pixel rows
 | 
			
		||||
even in all subtitles.  This fixes a problem with some players that
 | 
			
		||||
cut off the bottom row if the number is odd.  The work-around just adds
 | 
			
		||||
a fully transparent row if needed.  The overhead is low, typically
 | 
			
		||||
one byte per subtitle on average.
 | 
			
		||||
 | 
			
		||||
By default, this work-around is disabled.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@c man end SUBTITLES ENCODERS
 | 
			
		||||
 
 | 
			
		||||
@@ -12,9 +12,8 @@ CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
 | 
			
		||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
 | 
			
		||||
 | 
			
		||||
EXAMPLES=       avio_reading                       \
 | 
			
		||||
                decoding_encoding                  \
 | 
			
		||||
                avcodec                            \
 | 
			
		||||
                demuxing_decoding                  \
 | 
			
		||||
                extract_mvs                        \
 | 
			
		||||
                filtering_video                    \
 | 
			
		||||
                filtering_audio                    \
 | 
			
		||||
                metadata                           \
 | 
			
		||||
@@ -23,13 +22,11 @@ EXAMPLES=       avio_reading                       \
 | 
			
		||||
                resampling_audio                   \
 | 
			
		||||
                scaling_video                      \
 | 
			
		||||
                transcode_aac                      \
 | 
			
		||||
                transcoding                        \
 | 
			
		||||
 | 
			
		||||
OBJS=$(addsuffix .o,$(EXAMPLES))
 | 
			
		||||
 | 
			
		||||
# the following examples make explicit use of the math library
 | 
			
		||||
avcodec:           LDLIBS += -lm
 | 
			
		||||
decoding_encoding: LDLIBS += -lm
 | 
			
		||||
muxing:            LDLIBS += -lm
 | 
			
		||||
resampling_audio:  LDLIBS += -lm
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -24,7 +24,7 @@
 | 
			
		||||
 * @file
 | 
			
		||||
 * libavcodec API use example.
 | 
			
		||||
 *
 | 
			
		||||
 * @example decoding_encoding.c
 | 
			
		||||
 * @example avcodec.c
 | 
			
		||||
 * Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
 | 
			
		||||
 * not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
 | 
			
		||||
 * format handling
 | 
			
		||||
@@ -375,13 +375,7 @@ static void video_encode_example(const char *filename, int codec_id)
 | 
			
		||||
    c->height = 288;
 | 
			
		||||
    /* frames per second */
 | 
			
		||||
    c->time_base = (AVRational){1,25};
 | 
			
		||||
    /* emit one intra frame every ten frames
 | 
			
		||||
     * check frame pict_type before passing frame
 | 
			
		||||
     * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
 | 
			
		||||
     * then gop_size is ignored and the output of encoder
 | 
			
		||||
     * will always be I frame irrespective to gop_size
 | 
			
		||||
     */
 | 
			
		||||
    c->gop_size = 10;
 | 
			
		||||
    c->gop_size = 10; /* emit one intra frame every ten frames */
 | 
			
		||||
    c->max_b_frames = 1;
 | 
			
		||||
    c->pix_fmt = AV_PIX_FMT_YUV420P;
 | 
			
		||||
 | 
			
		||||
@@ -640,7 +634,7 @@ int main(int argc, char **argv)
 | 
			
		||||
               "This program generates a synthetic stream and encodes it to a file\n"
 | 
			
		||||
               "named test.h264, test.mp2 or test.mpg depending on output_type.\n"
 | 
			
		||||
               "The encoded stream is then decoded and written to a raw data output.\n"
 | 
			
		||||
               "output_type must be chosen between 'h264', 'mp2', 'mpg'.\n",
 | 
			
		||||
               "output_type must be choosen between 'h264', 'mp2', 'mpg'.\n",
 | 
			
		||||
               argv[0]);
 | 
			
		||||
        return 1;
 | 
			
		||||
    }
 | 
			
		||||
@@ -279,7 +279,7 @@ int main (int argc, char **argv)
 | 
			
		||||
        audio_dec_ctx = audio_stream->codec;
 | 
			
		||||
        audio_dst_file = fopen(audio_dst_filename, "wb");
 | 
			
		||||
        if (!audio_dst_file) {
 | 
			
		||||
            fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
 | 
			
		||||
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
 | 
			
		||||
            ret = 1;
 | 
			
		||||
            goto end;
 | 
			
		||||
        }
 | 
			
		||||
 
 | 
			
		||||
@@ -1,185 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
 * Copyright (c) 2012 Stefano Sabatini
 | 
			
		||||
 * Copyright (c) 2014 Clément Bœsch
 | 
			
		||||
 *
 | 
			
		||||
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 | 
			
		||||
 * of this software and associated documentation files (the "Software"), to deal
 | 
			
		||||
 * in the Software without restriction, including without limitation the rights
 | 
			
		||||
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 | 
			
		||||
 * copies of the Software, and to permit persons to whom the Software is
 | 
			
		||||
 * furnished to do so, subject to the following conditions:
 | 
			
		||||
 *
 | 
			
		||||
 * The above copyright notice and this permission notice shall be included in
 | 
			
		||||
 * all copies or substantial portions of the Software.
 | 
			
		||||
 *
 | 
			
		||||
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
			
		||||
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
			
		||||
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 | 
			
		||||
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 | 
			
		||||
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 | 
			
		||||
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 | 
			
		||||
 * THE SOFTWARE.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include <libavutil/motion_vector.h>
 | 
			
		||||
#include <libavformat/avformat.h>
 | 
			
		||||
 | 
			
		||||
static AVFormatContext *fmt_ctx = NULL;
 | 
			
		||||
static AVCodecContext *video_dec_ctx = NULL;
 | 
			
		||||
static AVStream *video_stream = NULL;
 | 
			
		||||
static const char *src_filename = NULL;
 | 
			
		||||
 | 
			
		||||
static int video_stream_idx = -1;
 | 
			
		||||
static AVFrame *frame = NULL;
 | 
			
		||||
static AVPacket pkt;
 | 
			
		||||
static int video_frame_count = 0;
 | 
			
		||||
 | 
			
		||||
static int decode_packet(int *got_frame, int cached)
 | 
			
		||||
{
 | 
			
		||||
    int decoded = pkt.size;
 | 
			
		||||
 | 
			
		||||
    *got_frame = 0;
 | 
			
		||||
 | 
			
		||||
    if (pkt.stream_index == video_stream_idx) {
 | 
			
		||||
        int ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
 | 
			
		||||
            return ret;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (*got_frame) {
 | 
			
		||||
            int i;
 | 
			
		||||
            AVFrameSideData *sd;
 | 
			
		||||
 | 
			
		||||
            video_frame_count++;
 | 
			
		||||
            sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
 | 
			
		||||
            if (sd) {
 | 
			
		||||
                const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
 | 
			
		||||
                for (i = 0; i < sd->size / sizeof(*mvs); i++) {
 | 
			
		||||
                    const AVMotionVector *mv = &mvs[i];
 | 
			
		||||
                    printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64"\n",
 | 
			
		||||
                           video_frame_count, mv->source,
 | 
			
		||||
                           mv->w, mv->h, mv->src_x, mv->src_y,
 | 
			
		||||
                           mv->dst_x, mv->dst_y, mv->flags);
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return decoded;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int open_codec_context(int *stream_idx,
 | 
			
		||||
                              AVFormatContext *fmt_ctx, enum AVMediaType type)
 | 
			
		||||
{
 | 
			
		||||
    int ret;
 | 
			
		||||
    AVStream *st;
 | 
			
		||||
    AVCodecContext *dec_ctx = NULL;
 | 
			
		||||
    AVCodec *dec = NULL;
 | 
			
		||||
    AVDictionary *opts = NULL;
 | 
			
		||||
 | 
			
		||||
    ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        fprintf(stderr, "Could not find %s stream in input file '%s'\n",
 | 
			
		||||
                av_get_media_type_string(type), src_filename);
 | 
			
		||||
        return ret;
 | 
			
		||||
    } else {
 | 
			
		||||
        *stream_idx = ret;
 | 
			
		||||
        st = fmt_ctx->streams[*stream_idx];
 | 
			
		||||
 | 
			
		||||
        /* find decoder for the stream */
 | 
			
		||||
        dec_ctx = st->codec;
 | 
			
		||||
        dec = avcodec_find_decoder(dec_ctx->codec_id);
 | 
			
		||||
        if (!dec) {
 | 
			
		||||
            fprintf(stderr, "Failed to find %s codec\n",
 | 
			
		||||
                    av_get_media_type_string(type));
 | 
			
		||||
            return AVERROR(EINVAL);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        /* Init the video decoder */
 | 
			
		||||
        av_dict_set(&opts, "flags2", "+export_mvs", 0);
 | 
			
		||||
        if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
 | 
			
		||||
            fprintf(stderr, "Failed to open %s codec\n",
 | 
			
		||||
                    av_get_media_type_string(type));
 | 
			
		||||
            return ret;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int main(int argc, char **argv)
 | 
			
		||||
{
 | 
			
		||||
    int ret = 0, got_frame;
 | 
			
		||||
 | 
			
		||||
    if (argc != 2) {
 | 
			
		||||
        fprintf(stderr, "Usage: %s <video>\n", argv[0]);
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
    src_filename = argv[1];
 | 
			
		||||
 | 
			
		||||
    av_register_all();
 | 
			
		||||
 | 
			
		||||
    if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
 | 
			
		||||
        fprintf(stderr, "Could not open source file %s\n", src_filename);
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
 | 
			
		||||
        fprintf(stderr, "Could not find stream information\n");
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
 | 
			
		||||
        video_stream = fmt_ctx->streams[video_stream_idx];
 | 
			
		||||
        video_dec_ctx = video_stream->codec;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    av_dump_format(fmt_ctx, 0, src_filename, 0);
 | 
			
		||||
 | 
			
		||||
    if (!video_stream) {
 | 
			
		||||
        fprintf(stderr, "Could not find video stream in the input, aborting\n");
 | 
			
		||||
        ret = 1;
 | 
			
		||||
        goto end;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    frame = av_frame_alloc();
 | 
			
		||||
    if (!frame) {
 | 
			
		||||
        fprintf(stderr, "Could not allocate frame\n");
 | 
			
		||||
        ret = AVERROR(ENOMEM);
 | 
			
		||||
        goto end;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
 | 
			
		||||
 | 
			
		||||
    /* initialize packet, set data to NULL, let the demuxer fill it */
 | 
			
		||||
    av_init_packet(&pkt);
 | 
			
		||||
    pkt.data = NULL;
 | 
			
		||||
    pkt.size = 0;
 | 
			
		||||
 | 
			
		||||
    /* read frames from the file */
 | 
			
		||||
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
 | 
			
		||||
        AVPacket orig_pkt = pkt;
 | 
			
		||||
        do {
 | 
			
		||||
            ret = decode_packet(&got_frame, 0);
 | 
			
		||||
            if (ret < 0)
 | 
			
		||||
                break;
 | 
			
		||||
            pkt.data += ret;
 | 
			
		||||
            pkt.size -= ret;
 | 
			
		||||
        } while (pkt.size > 0);
 | 
			
		||||
        av_free_packet(&orig_pkt);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* flush cached frames */
 | 
			
		||||
    pkt.data = NULL;
 | 
			
		||||
    pkt.size = 0;
 | 
			
		||||
    do {
 | 
			
		||||
        decode_packet(&got_frame, 1);
 | 
			
		||||
    } while (got_frame);
 | 
			
		||||
 | 
			
		||||
end:
 | 
			
		||||
    avcodec_close(video_dec_ctx);
 | 
			
		||||
    avformat_close_input(&fmt_ctx);
 | 
			
		||||
    av_frame_free(&frame);
 | 
			
		||||
    return ret < 0;
 | 
			
		||||
}
 | 
			
		||||
@@ -45,7 +45,6 @@
 | 
			
		||||
 | 
			
		||||
#include "libavutil/channel_layout.h"
 | 
			
		||||
#include "libavutil/md5.h"
 | 
			
		||||
#include "libavutil/mem.h"
 | 
			
		||||
#include "libavutil/opt.h"
 | 
			
		||||
#include "libavutil/samplefmt.h"
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -34,8 +34,6 @@
 | 
			
		||||
#include <string.h>
 | 
			
		||||
#include <math.h>
 | 
			
		||||
 | 
			
		||||
#include <libavutil/avassert.h>
 | 
			
		||||
#include <libavutil/channel_layout.h>
 | 
			
		||||
#include <libavutil/opt.h>
 | 
			
		||||
#include <libavutil/mathematics.h>
 | 
			
		||||
#include <libavutil/timestamp.h>
 | 
			
		||||
@@ -43,28 +41,13 @@
 | 
			
		||||
#include <libswscale/swscale.h>
 | 
			
		||||
#include <libswresample/swresample.h>
 | 
			
		||||
 | 
			
		||||
static int audio_is_eof, video_is_eof;
 | 
			
		||||
 | 
			
		||||
#define STREAM_DURATION   10.0
 | 
			
		||||
#define STREAM_FRAME_RATE 25 /* 25 images/s */
 | 
			
		||||
#define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */
 | 
			
		||||
 | 
			
		||||
#define SCALE_FLAGS SWS_BICUBIC
 | 
			
		||||
 | 
			
		||||
// a wrapper around a single output AVStream
 | 
			
		||||
typedef struct OutputStream {
 | 
			
		||||
    AVStream *st;
 | 
			
		||||
 | 
			
		||||
    /* pts of the next frame that will be generated */
 | 
			
		||||
    int64_t next_pts;
 | 
			
		||||
    int samples_count;
 | 
			
		||||
 | 
			
		||||
    AVFrame *frame;
 | 
			
		||||
    AVFrame *tmp_frame;
 | 
			
		||||
 | 
			
		||||
    float t, tincr, tincr2;
 | 
			
		||||
 | 
			
		||||
    struct SwsContext *sws_ctx;
 | 
			
		||||
    struct SwrContext *swr_ctx;
 | 
			
		||||
} OutputStream;
 | 
			
		||||
static int sws_flags = SWS_BICUBIC;
 | 
			
		||||
 | 
			
		||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
 | 
			
		||||
{
 | 
			
		||||
@@ -80,7 +63,9 @@ static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
 | 
			
		||||
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
 | 
			
		||||
{
 | 
			
		||||
    /* rescale output packet timestamp values from codec to stream timebase */
 | 
			
		||||
    av_packet_rescale_ts(pkt, *time_base, st->time_base);
 | 
			
		||||
    pkt->pts = av_rescale_q_rnd(pkt->pts, *time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 | 
			
		||||
    pkt->dts = av_rescale_q_rnd(pkt->dts, *time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 | 
			
		||||
    pkt->duration = av_rescale_q(pkt->duration, *time_base, st->time_base);
 | 
			
		||||
    pkt->stream_index = st->index;
 | 
			
		||||
 | 
			
		||||
    /* Write the compressed frame to the media file. */
 | 
			
		||||
@@ -89,12 +74,11 @@ static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AV
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Add an output stream. */
 | 
			
		||||
static void add_stream(OutputStream *ost, AVFormatContext *oc,
 | 
			
		||||
                       AVCodec **codec,
 | 
			
		||||
                       enum AVCodecID codec_id)
 | 
			
		||||
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
 | 
			
		||||
                            enum AVCodecID codec_id)
 | 
			
		||||
{
 | 
			
		||||
    AVCodecContext *c;
 | 
			
		||||
    int i;
 | 
			
		||||
    AVStream *st;
 | 
			
		||||
 | 
			
		||||
    /* find the encoder */
 | 
			
		||||
    *codec = avcodec_find_encoder(codec_id);
 | 
			
		||||
@@ -104,13 +88,13 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ost->st = avformat_new_stream(oc, *codec);
 | 
			
		||||
    if (!ost->st) {
 | 
			
		||||
    st = avformat_new_stream(oc, *codec);
 | 
			
		||||
    if (!st) {
 | 
			
		||||
        fprintf(stderr, "Could not allocate stream\n");
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
    ost->st->id = oc->nb_streams-1;
 | 
			
		||||
    c = ost->st->codec;
 | 
			
		||||
    st->id = oc->nb_streams-1;
 | 
			
		||||
    c = st->codec;
 | 
			
		||||
 | 
			
		||||
    switch ((*codec)->type) {
 | 
			
		||||
    case AVMEDIA_TYPE_AUDIO:
 | 
			
		||||
@@ -118,24 +102,7 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
 | 
			
		||||
            (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
 | 
			
		||||
        c->bit_rate    = 64000;
 | 
			
		||||
        c->sample_rate = 44100;
 | 
			
		||||
        if ((*codec)->supported_samplerates) {
 | 
			
		||||
            c->sample_rate = (*codec)->supported_samplerates[0];
 | 
			
		||||
            for (i = 0; (*codec)->supported_samplerates[i]; i++) {
 | 
			
		||||
                if ((*codec)->supported_samplerates[i] == 44100)
 | 
			
		||||
                    c->sample_rate = 44100;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);
 | 
			
		||||
        c->channel_layout = AV_CH_LAYOUT_STEREO;
 | 
			
		||||
        if ((*codec)->channel_layouts) {
 | 
			
		||||
            c->channel_layout = (*codec)->channel_layouts[0];
 | 
			
		||||
            for (i = 0; (*codec)->channel_layouts[i]; i++) {
 | 
			
		||||
                if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
 | 
			
		||||
                    c->channel_layout = AV_CH_LAYOUT_STEREO;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);
 | 
			
		||||
        ost->st->time_base = (AVRational){ 1, c->sample_rate };
 | 
			
		||||
        c->channels    = 2;
 | 
			
		||||
        break;
 | 
			
		||||
 | 
			
		||||
    case AVMEDIA_TYPE_VIDEO:
 | 
			
		||||
@@ -149,9 +116,8 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
 | 
			
		||||
         * of which frame timestamps are represented. For fixed-fps content,
 | 
			
		||||
         * timebase should be 1/framerate and timestamp increments should be
 | 
			
		||||
         * identical to 1. */
 | 
			
		||||
        ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
 | 
			
		||||
        c->time_base       = ost->st->time_base;
 | 
			
		||||
 | 
			
		||||
        c->time_base.den = STREAM_FRAME_RATE;
 | 
			
		||||
        c->time_base.num = 1;
 | 
			
		||||
        c->gop_size      = 12; /* emit one intra frame every twelve frames at most */
 | 
			
		||||
        c->pix_fmt       = STREAM_PIX_FMT;
 | 
			
		||||
        if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
 | 
			
		||||
@@ -173,262 +139,258 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
 | 
			
		||||
    /* Some formats want stream headers to be separate. */
 | 
			
		||||
    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
 | 
			
		||||
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
 | 
			
		||||
 | 
			
		||||
    return st;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**************************************************************/
 | 
			
		||||
/* audio output */
 | 
			
		||||
 | 
			
		||||
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
 | 
			
		||||
                                  uint64_t channel_layout,
 | 
			
		||||
                                  int sample_rate, int nb_samples)
 | 
			
		||||
static float t, tincr, tincr2;
 | 
			
		||||
 | 
			
		||||
AVFrame *audio_frame;
 | 
			
		||||
static uint8_t **src_samples_data;
 | 
			
		||||
static int       src_samples_linesize;
 | 
			
		||||
static int       src_nb_samples;
 | 
			
		||||
 | 
			
		||||
static int max_dst_nb_samples;
 | 
			
		||||
uint8_t **dst_samples_data;
 | 
			
		||||
int       dst_samples_linesize;
 | 
			
		||||
int       dst_samples_size;
 | 
			
		||||
int samples_count;
 | 
			
		||||
 | 
			
		||||
struct SwrContext *swr_ctx = NULL;
 | 
			
		||||
 | 
			
		||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
 | 
			
		||||
{
 | 
			
		||||
    AVFrame *frame = av_frame_alloc();
 | 
			
		||||
    AVCodecContext *c;
 | 
			
		||||
    int ret;
 | 
			
		||||
 | 
			
		||||
    if (!frame) {
 | 
			
		||||
        fprintf(stderr, "Error allocating an audio frame\n");
 | 
			
		||||
    c = st->codec;
 | 
			
		||||
 | 
			
		||||
    /* allocate and init a re-usable frame */
 | 
			
		||||
    audio_frame = av_frame_alloc();
 | 
			
		||||
    if (!audio_frame) {
 | 
			
		||||
        fprintf(stderr, "Could not allocate audio frame\n");
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    frame->format = sample_fmt;
 | 
			
		||||
    frame->channel_layout = channel_layout;
 | 
			
		||||
    frame->sample_rate = sample_rate;
 | 
			
		||||
    frame->nb_samples = nb_samples;
 | 
			
		||||
 | 
			
		||||
    if (nb_samples) {
 | 
			
		||||
        ret = av_frame_get_buffer(frame, 0);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            fprintf(stderr, "Error allocating an audio buffer\n");
 | 
			
		||||
            exit(1);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return frame;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
 | 
			
		||||
{
 | 
			
		||||
    AVCodecContext *c;
 | 
			
		||||
    int nb_samples;
 | 
			
		||||
    int ret;
 | 
			
		||||
    AVDictionary *opt = NULL;
 | 
			
		||||
 | 
			
		||||
    c = ost->st->codec;
 | 
			
		||||
 | 
			
		||||
    /* open it */
 | 
			
		||||
    av_dict_copy(&opt, opt_arg, 0);
 | 
			
		||||
    ret = avcodec_open2(c, codec, &opt);
 | 
			
		||||
    av_dict_free(&opt);
 | 
			
		||||
    ret = avcodec_open2(c, codec, NULL);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* init signal generator */
 | 
			
		||||
    ost->t     = 0;
 | 
			
		||||
    ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
 | 
			
		||||
    t     = 0;
 | 
			
		||||
    tincr = 2 * M_PI * 110.0 / c->sample_rate;
 | 
			
		||||
    /* increment frequency by 110 Hz per second */
 | 
			
		||||
    ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
 | 
			
		||||
    tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
 | 
			
		||||
 | 
			
		||||
    if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
 | 
			
		||||
        nb_samples = 10000;
 | 
			
		||||
    else
 | 
			
		||||
        nb_samples = c->frame_size;
 | 
			
		||||
    src_nb_samples = c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ?
 | 
			
		||||
        10000 : c->frame_size;
 | 
			
		||||
 | 
			
		||||
    ost->frame     = alloc_audio_frame(c->sample_fmt, c->channel_layout,
 | 
			
		||||
                                       c->sample_rate, nb_samples);
 | 
			
		||||
    ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
 | 
			
		||||
                                       c->sample_rate, nb_samples);
 | 
			
		||||
    ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels,
 | 
			
		||||
                                             src_nb_samples, AV_SAMPLE_FMT_S16, 0);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        fprintf(stderr, "Could not allocate source samples\n");
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* compute the number of converted samples: buffering is avoided
 | 
			
		||||
     * ensuring that the output buffer will contain at least all the
 | 
			
		||||
     * converted input samples */
 | 
			
		||||
    max_dst_nb_samples = src_nb_samples;
 | 
			
		||||
 | 
			
		||||
    /* create resampler context */
 | 
			
		||||
        ost->swr_ctx = swr_alloc();
 | 
			
		||||
        if (!ost->swr_ctx) {
 | 
			
		||||
    if (c->sample_fmt != AV_SAMPLE_FMT_S16) {
 | 
			
		||||
        swr_ctx = swr_alloc();
 | 
			
		||||
        if (!swr_ctx) {
 | 
			
		||||
            fprintf(stderr, "Could not allocate resampler context\n");
 | 
			
		||||
            exit(1);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        /* set options */
 | 
			
		||||
        av_opt_set_int       (ost->swr_ctx, "in_channel_count",   c->channels,       0);
 | 
			
		||||
        av_opt_set_int       (ost->swr_ctx, "in_sample_rate",     c->sample_rate,    0);
 | 
			
		||||
        av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt",      AV_SAMPLE_FMT_S16, 0);
 | 
			
		||||
        av_opt_set_int       (ost->swr_ctx, "out_channel_count",  c->channels,       0);
 | 
			
		||||
        av_opt_set_int       (ost->swr_ctx, "out_sample_rate",    c->sample_rate,    0);
 | 
			
		||||
        av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt",     c->sample_fmt,     0);
 | 
			
		||||
        av_opt_set_int       (swr_ctx, "in_channel_count",   c->channels,       0);
 | 
			
		||||
        av_opt_set_int       (swr_ctx, "in_sample_rate",     c->sample_rate,    0);
 | 
			
		||||
        av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt",      AV_SAMPLE_FMT_S16, 0);
 | 
			
		||||
        av_opt_set_int       (swr_ctx, "out_channel_count",  c->channels,       0);
 | 
			
		||||
        av_opt_set_int       (swr_ctx, "out_sample_rate",    c->sample_rate,    0);
 | 
			
		||||
        av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt",     c->sample_fmt,     0);
 | 
			
		||||
 | 
			
		||||
        /* initialize the resampling context */
 | 
			
		||||
        if ((ret = swr_init(ost->swr_ctx)) < 0) {
 | 
			
		||||
        if ((ret = swr_init(swr_ctx)) < 0) {
 | 
			
		||||
            fprintf(stderr, "Failed to initialize the resampling context\n");
 | 
			
		||||
            exit(1);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels,
 | 
			
		||||
                                                 max_dst_nb_samples, c->sample_fmt, 0);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            fprintf(stderr, "Could not allocate destination samples\n");
 | 
			
		||||
            exit(1);
 | 
			
		||||
        }
 | 
			
		||||
    } else {
 | 
			
		||||
        dst_samples_data = src_samples_data;
 | 
			
		||||
    }
 | 
			
		||||
    dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, max_dst_nb_samples,
 | 
			
		||||
                                                  c->sample_fmt, 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
 | 
			
		||||
 * 'nb_channels' channels. */
 | 
			
		||||
static AVFrame *get_audio_frame(OutputStream *ost)
 | 
			
		||||
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
 | 
			
		||||
{
 | 
			
		||||
    AVFrame *frame = ost->tmp_frame;
 | 
			
		||||
    int j, i, v;
 | 
			
		||||
    int16_t *q = (int16_t*)frame->data[0];
 | 
			
		||||
    int16_t *q;
 | 
			
		||||
 | 
			
		||||
    /* check if we want to generate more frames */
 | 
			
		||||
    if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
 | 
			
		||||
                      STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
 | 
			
		||||
        return NULL;
 | 
			
		||||
 | 
			
		||||
    for (j = 0; j <frame->nb_samples; j++) {
 | 
			
		||||
        v = (int)(sin(ost->t) * 10000);
 | 
			
		||||
        for (i = 0; i < ost->st->codec->channels; i++)
 | 
			
		||||
    q = samples;
 | 
			
		||||
    for (j = 0; j < frame_size; j++) {
 | 
			
		||||
        v = (int)(sin(t) * 10000);
 | 
			
		||||
        for (i = 0; i < nb_channels; i++)
 | 
			
		||||
            *q++ = v;
 | 
			
		||||
        ost->t     += ost->tincr;
 | 
			
		||||
        ost->tincr += ost->tincr2;
 | 
			
		||||
        t     += tincr;
 | 
			
		||||
        tincr += tincr2;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    frame->pts = ost->next_pts;
 | 
			
		||||
    ost->next_pts  += frame->nb_samples;
 | 
			
		||||
 | 
			
		||||
    return frame;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * encode one audio frame and send it to the muxer
 | 
			
		||||
 * return 1 when encoding is finished, 0 otherwise
 | 
			
		||||
 */
 | 
			
		||||
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
 | 
			
		||||
static void write_audio_frame(AVFormatContext *oc, AVStream *st, int flush)
 | 
			
		||||
{
 | 
			
		||||
    AVCodecContext *c;
 | 
			
		||||
    AVPacket pkt = { 0 }; // data and size must be 0;
 | 
			
		||||
    AVFrame *frame;
 | 
			
		||||
    int ret;
 | 
			
		||||
    int got_packet;
 | 
			
		||||
    int dst_nb_samples;
 | 
			
		||||
    int got_packet, ret, dst_nb_samples;
 | 
			
		||||
 | 
			
		||||
    av_init_packet(&pkt);
 | 
			
		||||
    c = ost->st->codec;
 | 
			
		||||
    c = st->codec;
 | 
			
		||||
 | 
			
		||||
    frame = get_audio_frame(ost);
 | 
			
		||||
    if (!flush) {
 | 
			
		||||
        get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);
 | 
			
		||||
 | 
			
		||||
    if (frame) {
 | 
			
		||||
        /* convert samples from native format to destination codec format, using the resampler */
 | 
			
		||||
        if (swr_ctx) {
 | 
			
		||||
            /* compute destination number of samples */
 | 
			
		||||
            dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
 | 
			
		||||
            dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
 | 
			
		||||
                                            c->sample_rate, c->sample_rate, AV_ROUND_UP);
 | 
			
		||||
            av_assert0(dst_nb_samples == frame->nb_samples);
 | 
			
		||||
 | 
			
		||||
        /* when we pass a frame to the encoder, it may keep a reference to it
 | 
			
		||||
         * internally;
 | 
			
		||||
         * make sure we do not overwrite it here
 | 
			
		||||
         */
 | 
			
		||||
        ret = av_frame_make_writable(ost->frame);
 | 
			
		||||
        if (ret < 0)
 | 
			
		||||
            exit(1);
 | 
			
		||||
            if (dst_nb_samples > max_dst_nb_samples) {
 | 
			
		||||
                av_free(dst_samples_data[0]);
 | 
			
		||||
                ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
 | 
			
		||||
                                       dst_nb_samples, c->sample_fmt, 0);
 | 
			
		||||
                if (ret < 0)
 | 
			
		||||
                    exit(1);
 | 
			
		||||
                max_dst_nb_samples = dst_nb_samples;
 | 
			
		||||
                dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
 | 
			
		||||
                                                              c->sample_fmt, 0);
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            /* convert to destination format */
 | 
			
		||||
            ret = swr_convert(ost->swr_ctx,
 | 
			
		||||
                              ost->frame->data, dst_nb_samples,
 | 
			
		||||
                              (const uint8_t **)frame->data, frame->nb_samples);
 | 
			
		||||
            ret = swr_convert(swr_ctx,
 | 
			
		||||
                              dst_samples_data, dst_nb_samples,
 | 
			
		||||
                              (const uint8_t **)src_samples_data, src_nb_samples);
 | 
			
		||||
            if (ret < 0) {
 | 
			
		||||
                fprintf(stderr, "Error while converting\n");
 | 
			
		||||
                exit(1);
 | 
			
		||||
            }
 | 
			
		||||
            frame = ost->frame;
 | 
			
		||||
        } else {
 | 
			
		||||
            dst_nb_samples = src_nb_samples;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
 | 
			
		||||
        ost->samples_count += dst_nb_samples;
 | 
			
		||||
        audio_frame->nb_samples = dst_nb_samples;
 | 
			
		||||
        audio_frame->pts = av_rescale_q(samples_count, (AVRational){1, c->sample_rate}, c->time_base);
 | 
			
		||||
        avcodec_fill_audio_frame(audio_frame, c->channels, c->sample_fmt,
 | 
			
		||||
                                 dst_samples_data[0], dst_samples_size, 0);
 | 
			
		||||
        samples_count += dst_nb_samples;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
 | 
			
		||||
    ret = avcodec_encode_audio2(c, &pkt, flush ? NULL : audio_frame, &got_packet);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (got_packet) {
 | 
			
		||||
        ret = write_frame(oc, &c->time_base, ost->st, &pkt);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            fprintf(stderr, "Error while writing audio frame: %s\n",
 | 
			
		||||
                    av_err2str(ret));
 | 
			
		||||
            exit(1);
 | 
			
		||||
        }
 | 
			
		||||
    if (!got_packet) {
 | 
			
		||||
        if (flush)
 | 
			
		||||
            audio_is_eof = 1;
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return (frame || got_packet) ? 0 : 1;
 | 
			
		||||
    ret = write_frame(oc, &c->time_base, st, &pkt);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        fprintf(stderr, "Error while writing audio frame: %s\n",
 | 
			
		||||
                av_err2str(ret));
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void close_audio(AVFormatContext *oc, AVStream *st)
 | 
			
		||||
{
 | 
			
		||||
    avcodec_close(st->codec);
 | 
			
		||||
    if (dst_samples_data != src_samples_data) {
 | 
			
		||||
        av_free(dst_samples_data[0]);
 | 
			
		||||
        av_free(dst_samples_data);
 | 
			
		||||
    }
 | 
			
		||||
    av_free(src_samples_data[0]);
 | 
			
		||||
    av_free(src_samples_data);
 | 
			
		||||
    av_frame_free(&audio_frame);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**************************************************************/
 | 
			
		||||
/* video output */
 | 
			
		||||
 | 
			
		||||
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
 | 
			
		||||
{
 | 
			
		||||
    AVFrame *picture;
 | 
			
		||||
    int ret;
 | 
			
		||||
static AVFrame *frame;
 | 
			
		||||
static AVPicture src_picture, dst_picture;
 | 
			
		||||
static int frame_count;
 | 
			
		||||
 | 
			
		||||
    picture = av_frame_alloc();
 | 
			
		||||
    if (!picture)
 | 
			
		||||
        return NULL;
 | 
			
		||||
 | 
			
		||||
    picture->format = pix_fmt;
 | 
			
		||||
    picture->width  = width;
 | 
			
		||||
    picture->height = height;
 | 
			
		||||
 | 
			
		||||
    /* allocate the buffers for the frame data */
 | 
			
		||||
    ret = av_frame_get_buffer(picture, 32);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        fprintf(stderr, "Could not allocate frame data.\n");
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return picture;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
 | 
			
		||||
static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
 | 
			
		||||
{
 | 
			
		||||
    int ret;
 | 
			
		||||
    AVCodecContext *c = ost->st->codec;
 | 
			
		||||
    AVDictionary *opt = NULL;
 | 
			
		||||
 | 
			
		||||
    av_dict_copy(&opt, opt_arg, 0);
 | 
			
		||||
    AVCodecContext *c = st->codec;
 | 
			
		||||
 | 
			
		||||
    /* open the codec */
 | 
			
		||||
    ret = avcodec_open2(c, codec, &opt);
 | 
			
		||||
    av_dict_free(&opt);
 | 
			
		||||
    ret = avcodec_open2(c, codec, NULL);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* allocate and init a re-usable frame */
 | 
			
		||||
    ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
 | 
			
		||||
    if (!ost->frame) {
 | 
			
		||||
    frame = av_frame_alloc();
 | 
			
		||||
    if (!frame) {
 | 
			
		||||
        fprintf(stderr, "Could not allocate video frame\n");
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
    frame->format = c->pix_fmt;
 | 
			
		||||
    frame->width = c->width;
 | 
			
		||||
    frame->height = c->height;
 | 
			
		||||
 | 
			
		||||
    /* Allocate the encoded raw picture. */
 | 
			
		||||
    ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* If the output format is not YUV420P, then a temporary YUV420P
 | 
			
		||||
     * picture is needed too. It is then converted to the required
 | 
			
		||||
     * output format. */
 | 
			
		||||
    ost->tmp_frame = NULL;
 | 
			
		||||
    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
 | 
			
		||||
        ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
 | 
			
		||||
        if (!ost->tmp_frame) {
 | 
			
		||||
            fprintf(stderr, "Could not allocate temporary picture\n");
 | 
			
		||||
        ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            fprintf(stderr, "Could not allocate temporary picture: %s\n",
 | 
			
		||||
                    av_err2str(ret));
 | 
			
		||||
            exit(1);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* copy data and linesize picture pointers to frame */
 | 
			
		||||
    *((AVPicture *)frame) = dst_picture;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Prepare a dummy image. */
 | 
			
		||||
static void fill_yuv_image(AVFrame *pict, int frame_index,
 | 
			
		||||
static void fill_yuv_image(AVPicture *pict, int frame_index,
 | 
			
		||||
                           int width, int height)
 | 
			
		||||
{
 | 
			
		||||
    int x, y, i, ret;
 | 
			
		||||
 | 
			
		||||
    /* when we pass a frame to the encoder, it may keep a reference to it
 | 
			
		||||
     * internally;
 | 
			
		||||
     * make sure we do not overwrite it here
 | 
			
		||||
     */
 | 
			
		||||
    ret = av_frame_make_writable(pict);
 | 
			
		||||
    if (ret < 0)
 | 
			
		||||
        exit(1);
 | 
			
		||||
    int x, y, i;
 | 
			
		||||
 | 
			
		||||
    i = frame_index;
 | 
			
		||||
 | 
			
		||||
@@ -446,89 +408,65 @@ static void fill_yuv_image(AVFrame *pict, int frame_index,
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static AVFrame *get_video_frame(OutputStream *ost)
 | 
			
		||||
{
 | 
			
		||||
    AVCodecContext *c = ost->st->codec;
 | 
			
		||||
 | 
			
		||||
    /* check if we want to generate more frames */
 | 
			
		||||
    if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
 | 
			
		||||
                      STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
 | 
			
		||||
        return NULL;
 | 
			
		||||
 | 
			
		||||
    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
 | 
			
		||||
        /* as we only generate a YUV420P picture, we must convert it
 | 
			
		||||
         * to the codec pixel format if needed */
 | 
			
		||||
        if (!ost->sws_ctx) {
 | 
			
		||||
            ost->sws_ctx = sws_getContext(c->width, c->height,
 | 
			
		||||
                                          AV_PIX_FMT_YUV420P,
 | 
			
		||||
                                          c->width, c->height,
 | 
			
		||||
                                          c->pix_fmt,
 | 
			
		||||
                                          SCALE_FLAGS, NULL, NULL, NULL);
 | 
			
		||||
            if (!ost->sws_ctx) {
 | 
			
		||||
                fprintf(stderr,
 | 
			
		||||
                        "Could not initialize the conversion context\n");
 | 
			
		||||
                exit(1);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
 | 
			
		||||
        sws_scale(ost->sws_ctx,
 | 
			
		||||
                  (const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
 | 
			
		||||
                  0, c->height, ost->frame->data, ost->frame->linesize);
 | 
			
		||||
    } else {
 | 
			
		||||
        fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ost->frame->pts = ost->next_pts++;
 | 
			
		||||
 | 
			
		||||
    return ost->frame;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * encode one video frame and send it to the muxer
 | 
			
		||||
 * return 1 when encoding is finished, 0 otherwise
 | 
			
		||||
 */
 | 
			
		||||
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
 | 
			
		||||
static void write_video_frame(AVFormatContext *oc, AVStream *st, int flush)
 | 
			
		||||
{
 | 
			
		||||
    int ret;
 | 
			
		||||
    AVCodecContext *c;
 | 
			
		||||
    AVFrame *frame;
 | 
			
		||||
    int got_packet = 0;
 | 
			
		||||
    static struct SwsContext *sws_ctx;
 | 
			
		||||
    AVCodecContext *c = st->codec;
 | 
			
		||||
 | 
			
		||||
    c = ost->st->codec;
 | 
			
		||||
    if (!flush) {
 | 
			
		||||
        if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
 | 
			
		||||
            /* as we only generate a YUV420P picture, we must convert it
 | 
			
		||||
             * to the codec pixel format if needed */
 | 
			
		||||
            if (!sws_ctx) {
 | 
			
		||||
                sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
 | 
			
		||||
                                         c->width, c->height, c->pix_fmt,
 | 
			
		||||
                                         sws_flags, NULL, NULL, NULL);
 | 
			
		||||
                if (!sws_ctx) {
 | 
			
		||||
                    fprintf(stderr,
 | 
			
		||||
                            "Could not initialize the conversion context\n");
 | 
			
		||||
                    exit(1);
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
            fill_yuv_image(&src_picture, frame_count, c->width, c->height);
 | 
			
		||||
            sws_scale(sws_ctx,
 | 
			
		||||
                      (const uint8_t * const *)src_picture.data, src_picture.linesize,
 | 
			
		||||
                      0, c->height, dst_picture.data, dst_picture.linesize);
 | 
			
		||||
        } else {
 | 
			
		||||
            fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    frame = get_video_frame(ost);
 | 
			
		||||
 | 
			
		||||
    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
 | 
			
		||||
        /* a hack to avoid data copy with some raw video muxers */
 | 
			
		||||
    if (oc->oformat->flags & AVFMT_RAWPICTURE && !flush) {
 | 
			
		||||
        /* Raw video case - directly store the picture in the packet */
 | 
			
		||||
        AVPacket pkt;
 | 
			
		||||
        av_init_packet(&pkt);
 | 
			
		||||
 | 
			
		||||
        if (!frame)
 | 
			
		||||
            return 1;
 | 
			
		||||
 | 
			
		||||
        pkt.flags        |= AV_PKT_FLAG_KEY;
 | 
			
		||||
        pkt.stream_index  = ost->st->index;
 | 
			
		||||
        pkt.data          = (uint8_t *)frame;
 | 
			
		||||
        pkt.stream_index  = st->index;
 | 
			
		||||
        pkt.data          = dst_picture.data[0];
 | 
			
		||||
        pkt.size          = sizeof(AVPicture);
 | 
			
		||||
 | 
			
		||||
        pkt.pts = pkt.dts = frame->pts;
 | 
			
		||||
        av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
 | 
			
		||||
 | 
			
		||||
        ret = av_interleaved_write_frame(oc, &pkt);
 | 
			
		||||
    } else {
 | 
			
		||||
        AVPacket pkt = { 0 };
 | 
			
		||||
        int got_packet;
 | 
			
		||||
        av_init_packet(&pkt);
 | 
			
		||||
 | 
			
		||||
        /* encode the image */
 | 
			
		||||
        ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
 | 
			
		||||
        frame->pts = frame_count;
 | 
			
		||||
        ret = avcodec_encode_video2(c, &pkt, flush ? NULL : frame, &got_packet);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
 | 
			
		||||
            exit(1);
 | 
			
		||||
        }
 | 
			
		||||
        /* If size is zero, it means the image was buffered. */
 | 
			
		||||
 | 
			
		||||
        if (got_packet) {
 | 
			
		||||
            ret = write_frame(oc, &c->time_base, ost->st, &pkt);
 | 
			
		||||
            ret = write_frame(oc, &c->time_base, st, &pkt);
 | 
			
		||||
        } else {
 | 
			
		||||
            if (flush)
 | 
			
		||||
                video_is_eof = 1;
 | 
			
		||||
            ret = 0;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
@@ -537,17 +475,15 @@ static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
 | 
			
		||||
        fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return (frame || got_packet) ? 0 : 1;
 | 
			
		||||
    frame_count++;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void close_stream(AVFormatContext *oc, OutputStream *ost)
 | 
			
		||||
static void close_video(AVFormatContext *oc, AVStream *st)
 | 
			
		||||
{
 | 
			
		||||
    avcodec_close(ost->st->codec);
 | 
			
		||||
    av_frame_free(&ost->frame);
 | 
			
		||||
    av_frame_free(&ost->tmp_frame);
 | 
			
		||||
    sws_freeContext(ost->sws_ctx);
 | 
			
		||||
    swr_free(&ost->swr_ctx);
 | 
			
		||||
    avcodec_close(st->codec);
 | 
			
		||||
    av_free(src_picture.data[0]);
 | 
			
		||||
    av_free(dst_picture.data[0]);
 | 
			
		||||
    av_frame_free(&frame);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**************************************************************/
 | 
			
		||||
@@ -555,20 +491,18 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
 | 
			
		||||
 | 
			
		||||
int main(int argc, char **argv)
 | 
			
		||||
{
 | 
			
		||||
    OutputStream video_st = { 0 }, audio_st = { 0 };
 | 
			
		||||
    const char *filename;
 | 
			
		||||
    AVOutputFormat *fmt;
 | 
			
		||||
    AVFormatContext *oc;
 | 
			
		||||
    AVStream *audio_st, *video_st;
 | 
			
		||||
    AVCodec *audio_codec, *video_codec;
 | 
			
		||||
    int ret;
 | 
			
		||||
    int have_video = 0, have_audio = 0;
 | 
			
		||||
    int encode_video = 0, encode_audio = 0;
 | 
			
		||||
    AVDictionary *opt = NULL;
 | 
			
		||||
    double audio_time, video_time;
 | 
			
		||||
    int flush, ret;
 | 
			
		||||
 | 
			
		||||
    /* Initialize libavcodec, and register all codecs and formats. */
 | 
			
		||||
    av_register_all();
 | 
			
		||||
 | 
			
		||||
    if (argc < 2) {
 | 
			
		||||
    if (argc != 2) {
 | 
			
		||||
        printf("usage: %s output_file\n"
 | 
			
		||||
               "API example program to output a media file with libavformat.\n"
 | 
			
		||||
               "This program generates a synthetic audio and video stream, encodes and\n"
 | 
			
		||||
@@ -580,9 +514,6 @@ int main(int argc, char **argv)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    filename = argv[1];
 | 
			
		||||
    if (argc > 3 && !strcmp(argv[2], "-flags")) {
 | 
			
		||||
        av_dict_set(&opt, argv[2]+1, argv[3], 0);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* allocate the output media context */
 | 
			
		||||
    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
 | 
			
		||||
@@ -597,24 +528,20 @@ int main(int argc, char **argv)
 | 
			
		||||
 | 
			
		||||
    /* Add the audio and video streams using the default format codecs
 | 
			
		||||
     * and initialize the codecs. */
 | 
			
		||||
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
 | 
			
		||||
        add_stream(&video_st, oc, &video_codec, fmt->video_codec);
 | 
			
		||||
        have_video = 1;
 | 
			
		||||
        encode_video = 1;
 | 
			
		||||
    }
 | 
			
		||||
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
 | 
			
		||||
        add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
 | 
			
		||||
        have_audio = 1;
 | 
			
		||||
        encode_audio = 1;
 | 
			
		||||
    }
 | 
			
		||||
    video_st = NULL;
 | 
			
		||||
    audio_st = NULL;
 | 
			
		||||
 | 
			
		||||
    if (fmt->video_codec != AV_CODEC_ID_NONE)
 | 
			
		||||
        video_st = add_stream(oc, &video_codec, fmt->video_codec);
 | 
			
		||||
    if (fmt->audio_codec != AV_CODEC_ID_NONE)
 | 
			
		||||
        audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
 | 
			
		||||
 | 
			
		||||
    /* Now that all the parameters are set, we can open the audio and
 | 
			
		||||
     * video codecs and allocate the necessary encode buffers. */
 | 
			
		||||
    if (have_video)
 | 
			
		||||
        open_video(oc, video_codec, &video_st, opt);
 | 
			
		||||
 | 
			
		||||
    if (have_audio)
 | 
			
		||||
        open_audio(oc, audio_codec, &audio_st, opt);
 | 
			
		||||
    if (video_st)
 | 
			
		||||
        open_video(oc, video_codec, video_st);
 | 
			
		||||
    if (audio_st)
 | 
			
		||||
        open_audio(oc, audio_codec, audio_st);
 | 
			
		||||
 | 
			
		||||
    av_dump_format(oc, 0, filename, 1);
 | 
			
		||||
 | 
			
		||||
@@ -629,21 +556,30 @@ int main(int argc, char **argv)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* Write the stream header, if any. */
 | 
			
		||||
    ret = avformat_write_header(oc, &opt);
 | 
			
		||||
    ret = avformat_write_header(oc, NULL);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        fprintf(stderr, "Error occurred when opening output file: %s\n",
 | 
			
		||||
                av_err2str(ret));
 | 
			
		||||
        return 1;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    while (encode_video || encode_audio) {
 | 
			
		||||
        /* select the stream to encode */
 | 
			
		||||
        if (encode_video &&
 | 
			
		||||
            (!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
 | 
			
		||||
                                            audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
 | 
			
		||||
            encode_video = !write_video_frame(oc, &video_st);
 | 
			
		||||
        } else {
 | 
			
		||||
            encode_audio = !write_audio_frame(oc, &audio_st);
 | 
			
		||||
    flush = 0;
 | 
			
		||||
    while ((video_st && !video_is_eof) || (audio_st && !audio_is_eof)) {
 | 
			
		||||
        /* Compute current audio and video time. */
 | 
			
		||||
        audio_time = (audio_st && !audio_is_eof) ? audio_st->pts.val * av_q2d(audio_st->time_base) : INFINITY;
 | 
			
		||||
        video_time = (video_st && !video_is_eof) ? video_st->pts.val * av_q2d(video_st->time_base) : INFINITY;
 | 
			
		||||
 | 
			
		||||
        if (!flush &&
 | 
			
		||||
            (!audio_st || audio_time >= STREAM_DURATION) &&
 | 
			
		||||
            (!video_st || video_time >= STREAM_DURATION)) {
 | 
			
		||||
            flush = 1;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        /* write interleaved audio and video frames */
 | 
			
		||||
        if (audio_st && !audio_is_eof && audio_time <= video_time) {
 | 
			
		||||
            write_audio_frame(oc, audio_st, flush);
 | 
			
		||||
        } else if (video_st && !video_is_eof && video_time < audio_time) {
 | 
			
		||||
            write_video_frame(oc, video_st, flush);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -654,10 +590,10 @@ int main(int argc, char **argv)
 | 
			
		||||
    av_write_trailer(oc);
 | 
			
		||||
 | 
			
		||||
    /* Close each codec. */
 | 
			
		||||
    if (have_video)
 | 
			
		||||
        close_stream(oc, &video_st);
 | 
			
		||||
    if (have_audio)
 | 
			
		||||
        close_stream(oc, &audio_st);
 | 
			
		||||
    if (video_st)
 | 
			
		||||
        close_video(oc, video_st);
 | 
			
		||||
    if (audio_st)
 | 
			
		||||
        close_audio(oc, audio_st);
 | 
			
		||||
 | 
			
		||||
    if (!(fmt->flags & AVFMT_NOFILE))
 | 
			
		||||
        /* Close the output file. */
 | 
			
		||||
 
 | 
			
		||||
@@ -99,7 +99,6 @@ int main(int argc, char **argv)
 | 
			
		||||
            fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
 | 
			
		||||
            goto end;
 | 
			
		||||
        }
 | 
			
		||||
        out_stream->codec->codec_tag = 0;
 | 
			
		||||
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
 | 
			
		||||
            out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
@@ -168,7 +168,7 @@ int main(int argc, char **argv)
 | 
			
		||||
        dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
 | 
			
		||||
                                        src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
 | 
			
		||||
        if (dst_nb_samples > max_dst_nb_samples) {
 | 
			
		||||
            av_freep(&dst_data[0]);
 | 
			
		||||
            av_free(dst_data[0]);
 | 
			
		||||
            ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
 | 
			
		||||
                                   dst_nb_samples, dst_sample_fmt, 1);
 | 
			
		||||
            if (ret < 0)
 | 
			
		||||
@@ -199,7 +199,8 @@ int main(int argc, char **argv)
 | 
			
		||||
            fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
 | 
			
		||||
 | 
			
		||||
end:
 | 
			
		||||
    fclose(dst_file);
 | 
			
		||||
    if (dst_file)
 | 
			
		||||
        fclose(dst_file);
 | 
			
		||||
 | 
			
		||||
    if (src_data)
 | 
			
		||||
        av_freep(&src_data[0]);
 | 
			
		||||
 
 | 
			
		||||
@@ -132,7 +132,8 @@ int main(int argc, char **argv)
 | 
			
		||||
           av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
 | 
			
		||||
 | 
			
		||||
end:
 | 
			
		||||
    fclose(dst_file);
 | 
			
		||||
    if (dst_file)
 | 
			
		||||
        fclose(dst_file);
 | 
			
		||||
    av_freep(&src_data[0]);
 | 
			
		||||
    av_freep(&dst_data[0]);
 | 
			
		||||
    sws_freeContext(sws_ctx);
 | 
			
		||||
 
 | 
			
		||||
@@ -52,7 +52,7 @@
 | 
			
		||||
 * @param error Error code to be converted
 | 
			
		||||
 * @return Corresponding error text (not thread-safe)
 | 
			
		||||
 */
 | 
			
		||||
static const char *get_error_text(const int error)
 | 
			
		||||
static char *const get_error_text(const int error)
 | 
			
		||||
{
 | 
			
		||||
    static char error_buffer[255];
 | 
			
		||||
    av_strerror(error, error_buffer, sizeof(error_buffer));
 | 
			
		||||
 
 | 
			
		||||
@@ -1,601 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
 * Copyright (c) 2010 Nicolas George
 | 
			
		||||
 * Copyright (c) 2011 Stefano Sabatini
 | 
			
		||||
 * Copyright (c) 2014 Andrey Utkin
 | 
			
		||||
 *
 | 
			
		||||
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 | 
			
		||||
 * of this software and associated documentation files (the "Software"), to deal
 | 
			
		||||
 * in the Software without restriction, including without limitation the rights
 | 
			
		||||
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 | 
			
		||||
 * copies of the Software, and to permit persons to whom the Software is
 | 
			
		||||
 * furnished to do so, subject to the following conditions:
 | 
			
		||||
 *
 | 
			
		||||
 * The above copyright notice and this permission notice shall be included in
 | 
			
		||||
 * all copies or substantial portions of the Software.
 | 
			
		||||
 *
 | 
			
		||||
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
			
		||||
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
			
		||||
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 | 
			
		||||
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 | 
			
		||||
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 | 
			
		||||
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 | 
			
		||||
 * THE SOFTWARE.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * @file
 | 
			
		||||
 * API example for demuxing, decoding, filtering, encoding and muxing
 | 
			
		||||
 * @example transcoding.c
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include <libavcodec/avcodec.h>
 | 
			
		||||
#include <libavformat/avformat.h>
 | 
			
		||||
#include <libavfilter/avfiltergraph.h>
 | 
			
		||||
#include <libavfilter/avcodec.h>
 | 
			
		||||
#include <libavfilter/buffersink.h>
 | 
			
		||||
#include <libavfilter/buffersrc.h>
 | 
			
		||||
#include <libavutil/opt.h>
 | 
			
		||||
#include <libavutil/pixdesc.h>
 | 
			
		||||
 | 
			
		||||
static AVFormatContext *ifmt_ctx;
 | 
			
		||||
static AVFormatContext *ofmt_ctx;
 | 
			
		||||
typedef struct FilteringContext {
 | 
			
		||||
    AVFilterContext *buffersink_ctx;
 | 
			
		||||
    AVFilterContext *buffersrc_ctx;
 | 
			
		||||
    AVFilterGraph *filter_graph;
 | 
			
		||||
} FilteringContext;
 | 
			
		||||
static FilteringContext *filter_ctx;
 | 
			
		||||
 | 
			
		||||
static int open_input_file(const char *filename)
 | 
			
		||||
{
 | 
			
		||||
    int ret;
 | 
			
		||||
    unsigned int i;
 | 
			
		||||
 | 
			
		||||
    ifmt_ctx = NULL;
 | 
			
		||||
    if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
 | 
			
		||||
        return ret;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
 | 
			
		||||
        return ret;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 | 
			
		||||
        AVStream *stream;
 | 
			
		||||
        AVCodecContext *codec_ctx;
 | 
			
		||||
        stream = ifmt_ctx->streams[i];
 | 
			
		||||
        codec_ctx = stream->codec;
 | 
			
		||||
        /* Reencode video & audio and remux subtitles etc. */
 | 
			
		||||
        if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
 | 
			
		||||
                || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
 | 
			
		||||
            /* Open decoder */
 | 
			
		||||
            ret = avcodec_open2(codec_ctx,
 | 
			
		||||
                    avcodec_find_decoder(codec_ctx->codec_id), NULL);
 | 
			
		||||
            if (ret < 0) {
 | 
			
		||||
                av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
 | 
			
		||||
                return ret;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    av_dump_format(ifmt_ctx, 0, filename, 0);
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int open_output_file(const char *filename)
 | 
			
		||||
{
 | 
			
		||||
    AVStream *out_stream;
 | 
			
		||||
    AVStream *in_stream;
 | 
			
		||||
    AVCodecContext *dec_ctx, *enc_ctx;
 | 
			
		||||
    AVCodec *encoder;
 | 
			
		||||
    int ret;
 | 
			
		||||
    unsigned int i;
 | 
			
		||||
 | 
			
		||||
    ofmt_ctx = NULL;
 | 
			
		||||
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
 | 
			
		||||
    if (!ofmt_ctx) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
 | 
			
		||||
        return AVERROR_UNKNOWN;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 | 
			
		||||
        out_stream = avformat_new_stream(ofmt_ctx, NULL);
 | 
			
		||||
        if (!out_stream) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
 | 
			
		||||
            return AVERROR_UNKNOWN;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        in_stream = ifmt_ctx->streams[i];
 | 
			
		||||
        dec_ctx = in_stream->codec;
 | 
			
		||||
        enc_ctx = out_stream->codec;
 | 
			
		||||
 | 
			
		||||
        if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
 | 
			
		||||
                || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
 | 
			
		||||
            /* in this example, we choose transcoding to same codec */
 | 
			
		||||
            encoder = avcodec_find_encoder(dec_ctx->codec_id);
 | 
			
		||||
            if (!encoder) {
 | 
			
		||||
                av_log(NULL, AV_LOG_FATAL, "Neccessary encoder not found\n");
 | 
			
		||||
                return AVERROR_INVALIDDATA;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            /* In this example, we transcode to same properties (picture size,
 | 
			
		||||
             * sample rate etc.). These properties can be changed for output
 | 
			
		||||
             * streams easily using filters */
 | 
			
		||||
            if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
 | 
			
		||||
                enc_ctx->height = dec_ctx->height;
 | 
			
		||||
                enc_ctx->width = dec_ctx->width;
 | 
			
		||||
                enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
 | 
			
		||||
                /* take first format from list of supported formats */
 | 
			
		||||
                enc_ctx->pix_fmt = encoder->pix_fmts[0];
 | 
			
		||||
                /* video time_base can be set to whatever is handy and supported by encoder */
 | 
			
		||||
                enc_ctx->time_base = dec_ctx->time_base;
 | 
			
		||||
            } else {
 | 
			
		||||
                enc_ctx->sample_rate = dec_ctx->sample_rate;
 | 
			
		||||
                enc_ctx->channel_layout = dec_ctx->channel_layout;
 | 
			
		||||
                enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
 | 
			
		||||
                /* take first format from list of supported formats */
 | 
			
		||||
                enc_ctx->sample_fmt = encoder->sample_fmts[0];
 | 
			
		||||
                enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            /* Third parameter can be used to pass settings to encoder */
 | 
			
		||||
            ret = avcodec_open2(enc_ctx, encoder, NULL);
 | 
			
		||||
            if (ret < 0) {
 | 
			
		||||
                av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
 | 
			
		||||
                return ret;
 | 
			
		||||
            }
 | 
			
		||||
        } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
 | 
			
		||||
            av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
 | 
			
		||||
            return AVERROR_INVALIDDATA;
 | 
			
		||||
        } else {
 | 
			
		||||
            /* if this stream must be remuxed */
 | 
			
		||||
            ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
 | 
			
		||||
                    ifmt_ctx->streams[i]->codec);
 | 
			
		||||
            if (ret < 0) {
 | 
			
		||||
                av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
 | 
			
		||||
                return ret;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
 | 
			
		||||
            enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
    av_dump_format(ofmt_ctx, 0, filename, 1);
 | 
			
		||||
 | 
			
		||||
    if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
 | 
			
		||||
        ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
 | 
			
		||||
            return ret;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* init muxer, write output file header */
 | 
			
		||||
    ret = avformat_write_header(ofmt_ctx, NULL);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
 | 
			
		||||
        return ret;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
 | 
			
		||||
        AVCodecContext *enc_ctx, const char *filter_spec)
 | 
			
		||||
{
 | 
			
		||||
    char args[512];
 | 
			
		||||
    int ret = 0;
 | 
			
		||||
    AVFilter *buffersrc = NULL;
 | 
			
		||||
    AVFilter *buffersink = NULL;
 | 
			
		||||
    AVFilterContext *buffersrc_ctx = NULL;
 | 
			
		||||
    AVFilterContext *buffersink_ctx = NULL;
 | 
			
		||||
    AVFilterInOut *outputs = avfilter_inout_alloc();
 | 
			
		||||
    AVFilterInOut *inputs  = avfilter_inout_alloc();
 | 
			
		||||
    AVFilterGraph *filter_graph = avfilter_graph_alloc();
 | 
			
		||||
 | 
			
		||||
    if (!outputs || !inputs || !filter_graph) {
 | 
			
		||||
        ret = AVERROR(ENOMEM);
 | 
			
		||||
        goto end;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
 | 
			
		||||
        buffersrc = avfilter_get_by_name("buffer");
 | 
			
		||||
        buffersink = avfilter_get_by_name("buffersink");
 | 
			
		||||
        if (!buffersrc || !buffersink) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
 | 
			
		||||
            ret = AVERROR_UNKNOWN;
 | 
			
		||||
            goto end;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        snprintf(args, sizeof(args),
 | 
			
		||||
                "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
 | 
			
		||||
                dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
 | 
			
		||||
                dec_ctx->time_base.num, dec_ctx->time_base.den,
 | 
			
		||||
                dec_ctx->sample_aspect_ratio.num,
 | 
			
		||||
                dec_ctx->sample_aspect_ratio.den);
 | 
			
		||||
 | 
			
		||||
        ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
 | 
			
		||||
                args, NULL, filter_graph);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
 | 
			
		||||
            goto end;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
 | 
			
		||||
                NULL, NULL, filter_graph);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
 | 
			
		||||
            goto end;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
 | 
			
		||||
                (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
 | 
			
		||||
                AV_OPT_SEARCH_CHILDREN);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
 | 
			
		||||
            goto end;
 | 
			
		||||
        }
 | 
			
		||||
    } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
 | 
			
		||||
        buffersrc = avfilter_get_by_name("abuffer");
 | 
			
		||||
        buffersink = avfilter_get_by_name("abuffersink");
 | 
			
		||||
        if (!buffersrc || !buffersink) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
 | 
			
		||||
            ret = AVERROR_UNKNOWN;
 | 
			
		||||
            goto end;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (!dec_ctx->channel_layout)
 | 
			
		||||
            dec_ctx->channel_layout =
 | 
			
		||||
                av_get_default_channel_layout(dec_ctx->channels);
 | 
			
		||||
        snprintf(args, sizeof(args),
 | 
			
		||||
                "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
 | 
			
		||||
                dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
 | 
			
		||||
                av_get_sample_fmt_name(dec_ctx->sample_fmt),
 | 
			
		||||
                dec_ctx->channel_layout);
 | 
			
		||||
        ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
 | 
			
		||||
                args, NULL, filter_graph);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
 | 
			
		||||
            goto end;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
 | 
			
		||||
                NULL, NULL, filter_graph);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
 | 
			
		||||
            goto end;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
 | 
			
		||||
                (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
 | 
			
		||||
                AV_OPT_SEARCH_CHILDREN);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
 | 
			
		||||
            goto end;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
 | 
			
		||||
                (uint8_t*)&enc_ctx->channel_layout,
 | 
			
		||||
                sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
 | 
			
		||||
            goto end;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
 | 
			
		||||
                (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
 | 
			
		||||
                AV_OPT_SEARCH_CHILDREN);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
 | 
			
		||||
            goto end;
 | 
			
		||||
        }
 | 
			
		||||
    } else {
 | 
			
		||||
        ret = AVERROR_UNKNOWN;
 | 
			
		||||
        goto end;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* Endpoints for the filter graph. */
 | 
			
		||||
    outputs->name       = av_strdup("in");
 | 
			
		||||
    outputs->filter_ctx = buffersrc_ctx;
 | 
			
		||||
    outputs->pad_idx    = 0;
 | 
			
		||||
    outputs->next       = NULL;
 | 
			
		||||
 | 
			
		||||
    inputs->name       = av_strdup("out");
 | 
			
		||||
    inputs->filter_ctx = buffersink_ctx;
 | 
			
		||||
    inputs->pad_idx    = 0;
 | 
			
		||||
    inputs->next       = NULL;
 | 
			
		||||
 | 
			
		||||
    if (!outputs->name || !inputs->name) {
 | 
			
		||||
        ret = AVERROR(ENOMEM);
 | 
			
		||||
        goto end;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
 | 
			
		||||
                    &inputs, &outputs, NULL)) < 0)
 | 
			
		||||
        goto end;
 | 
			
		||||
 | 
			
		||||
    if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
 | 
			
		||||
        goto end;
 | 
			
		||||
 | 
			
		||||
    /* Fill FilteringContext */
 | 
			
		||||
    fctx->buffersrc_ctx = buffersrc_ctx;
 | 
			
		||||
    fctx->buffersink_ctx = buffersink_ctx;
 | 
			
		||||
    fctx->filter_graph = filter_graph;
 | 
			
		||||
 | 
			
		||||
end:
 | 
			
		||||
    avfilter_inout_free(&inputs);
 | 
			
		||||
    avfilter_inout_free(&outputs);
 | 
			
		||||
 | 
			
		||||
    return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int init_filters(void)
 | 
			
		||||
{
 | 
			
		||||
    const char *filter_spec;
 | 
			
		||||
    unsigned int i;
 | 
			
		||||
    int ret;
 | 
			
		||||
    filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
 | 
			
		||||
    if (!filter_ctx)
 | 
			
		||||
        return AVERROR(ENOMEM);
 | 
			
		||||
 | 
			
		||||
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 | 
			
		||||
        filter_ctx[i].buffersrc_ctx  = NULL;
 | 
			
		||||
        filter_ctx[i].buffersink_ctx = NULL;
 | 
			
		||||
        filter_ctx[i].filter_graph   = NULL;
 | 
			
		||||
        if (!(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
 | 
			
		||||
                || ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
 | 
			
		||||
            continue;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
 | 
			
		||||
            filter_spec = "null"; /* passthrough (dummy) filter for video */
 | 
			
		||||
        else
 | 
			
		||||
            filter_spec = "anull"; /* passthrough (dummy) filter for audio */
 | 
			
		||||
        ret = init_filter(&filter_ctx[i], ifmt_ctx->streams[i]->codec,
 | 
			
		||||
                ofmt_ctx->streams[i]->codec, filter_spec);
 | 
			
		||||
        if (ret)
 | 
			
		||||
            return ret;
 | 
			
		||||
    }
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
 | 
			
		||||
    int ret;
 | 
			
		||||
    int got_frame_local;
 | 
			
		||||
    AVPacket enc_pkt;
 | 
			
		||||
    int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
 | 
			
		||||
        (ifmt_ctx->streams[stream_index]->codec->codec_type ==
 | 
			
		||||
         AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
 | 
			
		||||
 | 
			
		||||
    if (!got_frame)
 | 
			
		||||
        got_frame = &got_frame_local;
 | 
			
		||||
 | 
			
		||||
    av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
 | 
			
		||||
    /* encode filtered frame */
 | 
			
		||||
    enc_pkt.data = NULL;
 | 
			
		||||
    enc_pkt.size = 0;
 | 
			
		||||
    av_init_packet(&enc_pkt);
 | 
			
		||||
    ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
 | 
			
		||||
            filt_frame, got_frame);
 | 
			
		||||
    av_frame_free(&filt_frame);
 | 
			
		||||
    if (ret < 0)
 | 
			
		||||
        return ret;
 | 
			
		||||
    if (!(*got_frame))
 | 
			
		||||
        return 0;
 | 
			
		||||
 | 
			
		||||
    /* prepare packet for muxing */
 | 
			
		||||
    enc_pkt.stream_index = stream_index;
 | 
			
		||||
    enc_pkt.dts = av_rescale_q_rnd(enc_pkt.dts,
 | 
			
		||||
            ofmt_ctx->streams[stream_index]->codec->time_base,
 | 
			
		||||
            ofmt_ctx->streams[stream_index]->time_base,
 | 
			
		||||
            AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 | 
			
		||||
    enc_pkt.pts = av_rescale_q_rnd(enc_pkt.pts,
 | 
			
		||||
            ofmt_ctx->streams[stream_index]->codec->time_base,
 | 
			
		||||
            ofmt_ctx->streams[stream_index]->time_base,
 | 
			
		||||
            AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 | 
			
		||||
    enc_pkt.duration = av_rescale_q(enc_pkt.duration,
 | 
			
		||||
            ofmt_ctx->streams[stream_index]->codec->time_base,
 | 
			
		||||
            ofmt_ctx->streams[stream_index]->time_base);
 | 
			
		||||
 | 
			
		||||
    av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
 | 
			
		||||
    /* mux encoded frame */
 | 
			
		||||
    ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
 | 
			
		||||
    return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
 | 
			
		||||
{
 | 
			
		||||
    int ret;
 | 
			
		||||
    AVFrame *filt_frame;
 | 
			
		||||
 | 
			
		||||
    av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
 | 
			
		||||
    /* push the decoded frame into the filtergraph */
 | 
			
		||||
    ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
 | 
			
		||||
            frame, 0);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
 | 
			
		||||
        return ret;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* pull filtered frames from the filtergraph */
 | 
			
		||||
    while (1) {
 | 
			
		||||
        filt_frame = av_frame_alloc();
 | 
			
		||||
        if (!filt_frame) {
 | 
			
		||||
            ret = AVERROR(ENOMEM);
 | 
			
		||||
            break;
 | 
			
		||||
        }
 | 
			
		||||
        av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
 | 
			
		||||
        ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
 | 
			
		||||
                filt_frame);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            /* if no more frames for output - returns AVERROR(EAGAIN)
 | 
			
		||||
             * if flushed and no more frames for output - returns AVERROR_EOF
 | 
			
		||||
             * rewrite retcode to 0 to show it as normal procedure completion
 | 
			
		||||
             */
 | 
			
		||||
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 | 
			
		||||
                ret = 0;
 | 
			
		||||
            av_frame_free(&filt_frame);
 | 
			
		||||
            break;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
 | 
			
		||||
        ret = encode_write_frame(filt_frame, stream_index, NULL);
 | 
			
		||||
        if (ret < 0)
 | 
			
		||||
            break;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int flush_encoder(unsigned int stream_index)
 | 
			
		||||
{
 | 
			
		||||
    int ret;
 | 
			
		||||
    int got_frame;
 | 
			
		||||
 | 
			
		||||
    if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
 | 
			
		||||
                CODEC_CAP_DELAY))
 | 
			
		||||
        return 0;
 | 
			
		||||
 | 
			
		||||
    while (1) {
 | 
			
		||||
        av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
 | 
			
		||||
        ret = encode_write_frame(NULL, stream_index, &got_frame);
 | 
			
		||||
        if (ret < 0)
 | 
			
		||||
            break;
 | 
			
		||||
        if (!got_frame)
 | 
			
		||||
            return 0;
 | 
			
		||||
    }
 | 
			
		||||
    return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int main(int argc, char **argv)
 | 
			
		||||
{
 | 
			
		||||
    int ret;
 | 
			
		||||
    AVPacket packet = { .data = NULL, .size = 0 };
 | 
			
		||||
    AVFrame *frame = NULL;
 | 
			
		||||
    enum AVMediaType type;
 | 
			
		||||
    unsigned int stream_index;
 | 
			
		||||
    unsigned int i;
 | 
			
		||||
    int got_frame;
 | 
			
		||||
    int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
 | 
			
		||||
 | 
			
		||||
    if (argc != 3) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
 | 
			
		||||
        return 1;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    av_register_all();
 | 
			
		||||
    avfilter_register_all();
 | 
			
		||||
 | 
			
		||||
    if ((ret = open_input_file(argv[1])) < 0)
 | 
			
		||||
        goto end;
 | 
			
		||||
    if ((ret = open_output_file(argv[2])) < 0)
 | 
			
		||||
        goto end;
 | 
			
		||||
    if ((ret = init_filters()) < 0)
 | 
			
		||||
        goto end;
 | 
			
		||||
 | 
			
		||||
    /* read all packets */
 | 
			
		||||
    while (1) {
 | 
			
		||||
        if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
 | 
			
		||||
            break;
 | 
			
		||||
        stream_index = packet.stream_index;
 | 
			
		||||
        type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
 | 
			
		||||
        av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
 | 
			
		||||
                stream_index);
 | 
			
		||||
 | 
			
		||||
        if (filter_ctx[stream_index].filter_graph) {
 | 
			
		||||
            av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
 | 
			
		||||
            frame = av_frame_alloc();
 | 
			
		||||
            if (!frame) {
 | 
			
		||||
                ret = AVERROR(ENOMEM);
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
            packet.dts = av_rescale_q_rnd(packet.dts,
 | 
			
		||||
                    ifmt_ctx->streams[stream_index]->time_base,
 | 
			
		||||
                    ifmt_ctx->streams[stream_index]->codec->time_base,
 | 
			
		||||
                    AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 | 
			
		||||
            packet.pts = av_rescale_q_rnd(packet.pts,
 | 
			
		||||
                    ifmt_ctx->streams[stream_index]->time_base,
 | 
			
		||||
                    ifmt_ctx->streams[stream_index]->codec->time_base,
 | 
			
		||||
                    AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 | 
			
		||||
            dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
 | 
			
		||||
                avcodec_decode_audio4;
 | 
			
		||||
            ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
 | 
			
		||||
                    &got_frame, &packet);
 | 
			
		||||
            if (ret < 0) {
 | 
			
		||||
                av_frame_free(&frame);
 | 
			
		||||
                av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            if (got_frame) {
 | 
			
		||||
                frame->pts = av_frame_get_best_effort_timestamp(frame);
 | 
			
		||||
                ret = filter_encode_write_frame(frame, stream_index);
 | 
			
		||||
                av_frame_free(&frame);
 | 
			
		||||
                if (ret < 0)
 | 
			
		||||
                    goto end;
 | 
			
		||||
            } else {
 | 
			
		||||
                av_frame_free(&frame);
 | 
			
		||||
            }
 | 
			
		||||
        } else {
 | 
			
		||||
            /* remux this frame without reencoding */
 | 
			
		||||
            packet.dts = av_rescale_q_rnd(packet.dts,
 | 
			
		||||
                    ifmt_ctx->streams[stream_index]->time_base,
 | 
			
		||||
                    ofmt_ctx->streams[stream_index]->time_base,
 | 
			
		||||
                    AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 | 
			
		||||
            packet.pts = av_rescale_q_rnd(packet.pts,
 | 
			
		||||
                    ifmt_ctx->streams[stream_index]->time_base,
 | 
			
		||||
                    ofmt_ctx->streams[stream_index]->time_base,
 | 
			
		||||
                    AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 | 
			
		||||
 | 
			
		||||
            ret = av_interleaved_write_frame(ofmt_ctx, &packet);
 | 
			
		||||
            if (ret < 0)
 | 
			
		||||
                goto end;
 | 
			
		||||
        }
 | 
			
		||||
        av_free_packet(&packet);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* flush filters and encoders */
 | 
			
		||||
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 | 
			
		||||
        /* flush filter */
 | 
			
		||||
        if (!filter_ctx[i].filter_graph)
 | 
			
		||||
            continue;
 | 
			
		||||
        ret = filter_encode_write_frame(NULL, i);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
 | 
			
		||||
            goto end;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        /* flush encoder */
 | 
			
		||||
        ret = flush_encoder(i);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
 | 
			
		||||
            goto end;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    av_write_trailer(ofmt_ctx);
 | 
			
		||||
end:
 | 
			
		||||
    av_free_packet(&packet);
 | 
			
		||||
    av_frame_free(&frame);
 | 
			
		||||
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 | 
			
		||||
        avcodec_close(ifmt_ctx->streams[i]->codec);
 | 
			
		||||
        if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
 | 
			
		||||
            avcodec_close(ofmt_ctx->streams[i]->codec);
 | 
			
		||||
        if (filter_ctx && filter_ctx[i].filter_graph)
 | 
			
		||||
            avfilter_graph_free(&filter_ctx[i].filter_graph);
 | 
			
		||||
    }
 | 
			
		||||
    av_free(filter_ctx);
 | 
			
		||||
    avformat_close_input(&ifmt_ctx);
 | 
			
		||||
    if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
 | 
			
		||||
        avio_close(ofmt_ctx->pb);
 | 
			
		||||
    avformat_free_context(ofmt_ctx);
 | 
			
		||||
 | 
			
		||||
    if (ret < 0)
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
 | 
			
		||||
 | 
			
		||||
    return ret ? 1 : 0;
 | 
			
		||||
}
 | 
			
		||||
@@ -298,7 +298,7 @@ FFmpeg has a @url{http://ffmpeg.org/ffmpeg-protocols.html#concat,
 | 
			
		||||
@code{concat}} protocol designed specifically for that, with examples in the
 | 
			
		||||
documentation.
 | 
			
		||||
 | 
			
		||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow one to concatenate
 | 
			
		||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
 | 
			
		||||
video by merely concatenating the files containing them.
 | 
			
		||||
 | 
			
		||||
Hence you may concatenate your multimedia files by first transcoding them to
 | 
			
		||||
@@ -392,7 +392,7 @@ VOB and a few other formats do not have a global header that describes
 | 
			
		||||
everything present in the file. Instead, applications are supposed to scan
 | 
			
		||||
the file to see what it contains. Since VOB files are frequently large, only
 | 
			
		||||
the beginning is scanned. If the subtitles happen only later in the file,
 | 
			
		||||
they will not be initially detected.
 | 
			
		||||
they will not be initally detected.
 | 
			
		||||
 | 
			
		||||
Some applications, including the @code{ffmpeg} command-line tool, can only
 | 
			
		||||
work with streams that were detected during the initial scan; streams that
 | 
			
		||||
 
 | 
			
		||||
@@ -90,8 +90,7 @@ the following diagram:
 | 
			
		||||
                                      |         |
 | 
			
		||||
                                      | decoded |
 | 
			
		||||
                                      | frames  |
 | 
			
		||||
                                      |_________|
 | 
			
		||||
 ________             ______________       |
 | 
			
		||||
 ________             ______________  |_________|
 | 
			
		||||
|        |           |              |      |
 | 
			
		||||
| output | <-------- | encoded data | <----+
 | 
			
		||||
| file   |   muxer   | packets      |   encoder
 | 
			
		||||
@@ -124,16 +123,11 @@ the same type. In the above diagram they can be represented by simply inserting
 | 
			
		||||
an additional step between decoding and encoding:
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
 _________                        ______________
 | 
			
		||||
|         |                      |              |
 | 
			
		||||
| decoded |                      | encoded data |
 | 
			
		||||
| frames  |\                   _ | packets      |
 | 
			
		||||
|_________| \                  /||______________|
 | 
			
		||||
             \   __________   /
 | 
			
		||||
  simple     _\||          | /  encoder
 | 
			
		||||
  filtergraph   | filtered |/
 | 
			
		||||
                | frames   |
 | 
			
		||||
                |__________|
 | 
			
		||||
 _________               __________              ______________
 | 
			
		||||
|         |  simple     |          |            |              |
 | 
			
		||||
| decoded |  fltrgrph   | filtered |  encoder   | encoded data |
 | 
			
		||||
| frames  | ----------> | frames   | ---------> | packets      |
 | 
			
		||||
|_________|             |__________|            |______________|
 | 
			
		||||
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@@ -272,13 +266,8 @@ ffmpeg -i INPUT -map 0 -c copy -c:v:1 libx264 -c:a:137 libvorbis OUTPUT
 | 
			
		||||
will copy all the streams except the second video, which will be encoded with
 | 
			
		||||
libx264, and the 138th audio, which will be encoded with libvorbis.
 | 
			
		||||
 | 
			
		||||
@item -t @var{duration} (@emph{input/output})
 | 
			
		||||
When used as an input option (before @code{-i}), limit the @var{duration} of
 | 
			
		||||
data read from the input file.
 | 
			
		||||
 | 
			
		||||
When used as an output option (before an output filename), stop writing the
 | 
			
		||||
output after its duration reaches @var{duration}.
 | 
			
		||||
 | 
			
		||||
@item -t @var{duration} (@emph{output})
 | 
			
		||||
Stop writing the output after its duration reaches @var{duration}.
 | 
			
		||||
@var{duration} may be a number in seconds, or in @code{hh:mm:ss[.xxx]} form.
 | 
			
		||||
 | 
			
		||||
-to and -t are mutually exclusive and -t has priority.
 | 
			
		||||
@@ -339,7 +328,7 @@ ffmpeg -i in.avi -metadata title="my title" out.flv
 | 
			
		||||
 | 
			
		||||
To set the language of the first audio stream:
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -i INPUT -metadata:s:a:0 language=eng OUTPUT
 | 
			
		||||
ffmpeg -i INPUT -metadata:s:a:1 language=eng OUTPUT
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@item -target @var{type} (@emph{output})
 | 
			
		||||
@@ -473,9 +462,6 @@ Set frame rate (Hz value, fraction or abbreviation).
 | 
			
		||||
 | 
			
		||||
As an input option, ignore any timestamps stored in the file and instead
 | 
			
		||||
generate timestamps assuming constant frame rate @var{fps}.
 | 
			
		||||
This is not the same as the @option{-framerate} option used for some input formats
 | 
			
		||||
like image2 or v4l2 (it used to be the same in older versions of FFmpeg).
 | 
			
		||||
If in doubt use @option{-framerate} instead of the input option @option{-r}.
 | 
			
		||||
 | 
			
		||||
As an output option, duplicate or drop input frames to achieve constant output
 | 
			
		||||
frame rate @var{fps}.
 | 
			
		||||
@@ -537,7 +523,7 @@ filter the stream.
 | 
			
		||||
This is an alias for @code{-filter:v}, see the @ref{filter_option,,-filter option}.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@section Advanced Video options
 | 
			
		||||
@section Advanced Video Options
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
@item -pix_fmt[:@var{stream_specifier}] @var{format} (@emph{input/output,per-stream})
 | 
			
		||||
@@ -651,14 +637,8 @@ Do not use any hardware acceleration (the default).
 | 
			
		||||
@item auto
 | 
			
		||||
Automatically select the hardware acceleration method.
 | 
			
		||||
 | 
			
		||||
@item vda
 | 
			
		||||
Use Apple VDA hardware acceleration.
 | 
			
		||||
 | 
			
		||||
@item vdpau
 | 
			
		||||
Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration.
 | 
			
		||||
 | 
			
		||||
@item dxva2
 | 
			
		||||
Use DXVA2 (DirectX Video Acceleration) hardware acceleration.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
This option has no effect if the selected hwaccel is not available or not
 | 
			
		||||
@@ -681,10 +661,6 @@ method chosen.
 | 
			
		||||
@item vdpau
 | 
			
		||||
For VDPAU, this option specifies the X11 display/screen to use. If this option
 | 
			
		||||
is not specified, the value of the @var{DISPLAY} environment variable is used
 | 
			
		||||
 | 
			
		||||
@item dxva2
 | 
			
		||||
For DXVA2, this option should contain the number of the display adapter to use.
 | 
			
		||||
If this option is not specified, the default adapter is used.
 | 
			
		||||
@end table
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@@ -720,7 +696,7 @@ filter the stream.
 | 
			
		||||
This is an alias for @code{-filter:a}, see the @ref{filter_option,,-filter option}.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@section Advanced Audio options
 | 
			
		||||
@section Advanced Audio options:
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
@item -atag @var{fourcc/tag} (@emph{output})
 | 
			
		||||
@@ -735,7 +711,7 @@ stereo but not 6 channels as 5.1. The default is to always try to guess. Use
 | 
			
		||||
0 to disable all guessing.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@section Subtitle options
 | 
			
		||||
@section Subtitle options:
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
@item -scodec @var{codec} (@emph{input/output})
 | 
			
		||||
@@ -746,7 +722,7 @@ Disable subtitle recording.
 | 
			
		||||
Deprecated, see -bsf
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@section Advanced Subtitle options
 | 
			
		||||
@section Advanced Subtitle options:
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
 | 
			
		||||
@@ -824,11 +800,6 @@ To map all the streams except the second audio, use negative mappings
 | 
			
		||||
ffmpeg -i INPUT -map 0 -map -0:a:1 OUTPUT
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
To pick the English audio stream:
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -i INPUT -map 0:m:language:eng OUTPUT
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
Note that using this option disables the default mappings for this output file.
 | 
			
		||||
 | 
			
		||||
@item -map_channel [@var{input_file_id}.@var{stream_specifier}.@var{channel_id}|-1][:@var{output_file_id}.@var{stream_specifier}]
 | 
			
		||||
@@ -1154,30 +1125,6 @@ requested by @command{ffserver}.
 | 
			
		||||
The option is intended for cases where features are needed that cannot be
 | 
			
		||||
specified to @command{ffserver} but can be to @command{ffmpeg}.
 | 
			
		||||
 | 
			
		||||
@item -discard (@emph{input})
 | 
			
		||||
Allows discarding specific streams or frames of streams at the demuxer.
 | 
			
		||||
Not all demuxers support this.
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
@item none
 | 
			
		||||
Discard no frame.
 | 
			
		||||
 | 
			
		||||
@item default
 | 
			
		||||
Default, which discards no frames.
 | 
			
		||||
 | 
			
		||||
@item noref
 | 
			
		||||
Discard all non-reference frames.
 | 
			
		||||
 | 
			
		||||
@item bidir
 | 
			
		||||
Discard all bidirectional frames.
 | 
			
		||||
 | 
			
		||||
@item nokey
 | 
			
		||||
Discard all frames excepts keyframes.
 | 
			
		||||
 | 
			
		||||
@item all
 | 
			
		||||
Discard all frames.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
As a special exception, you can use a bitmap subtitle stream as input: it
 | 
			
		||||
@@ -1458,11 +1405,11 @@ ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
 | 
			
		||||
You can put many streams of the same type in the output:
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -i test1.avi -i test2.avi -map 1:1 -map 1:0 -map 0:1 -map 0:0 -c copy -y test12.nut
 | 
			
		||||
ffmpeg -i test1.avi -i test2.avi -map 0:3 -map 0:2 -map 0:1 -map 0:0 -c copy test12.nut
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
The resulting output file @file{test12.nut} will contain the first four streams
 | 
			
		||||
from the input files in reverse order.
 | 
			
		||||
The resulting output file @file{test12.avi} will contain first four streams from
 | 
			
		||||
the input file in reverse order.
 | 
			
		||||
 | 
			
		||||
@item
 | 
			
		||||
To force CBR video output:
 | 
			
		||||
 
 | 
			
		||||
@@ -84,9 +84,6 @@ output. In the filtergraph, the input is associated to the label
 | 
			
		||||
ffmpeg-filters manual for more information about the filtergraph
 | 
			
		||||
syntax.
 | 
			
		||||
 | 
			
		||||
You can specify this parameter multiple times and cycle through the specified
 | 
			
		||||
filtergraphs along with the show modes by pressing the key @key{w}.
 | 
			
		||||
 | 
			
		||||
@item -af @var{filtergraph}
 | 
			
		||||
@var{filtergraph} is a description of the filtergraph to apply to
 | 
			
		||||
the input audio.
 | 
			
		||||
@@ -162,10 +159,6 @@ Force a specific video decoder.
 | 
			
		||||
 | 
			
		||||
@item -scodec @var{codec_name}
 | 
			
		||||
Force a specific subtitle decoder.
 | 
			
		||||
 | 
			
		||||
@item -autorotate
 | 
			
		||||
Automatically rotate the video according to presentation metadata. Set by
 | 
			
		||||
default, use -noautorotate to disable.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@section While playing
 | 
			
		||||
@@ -181,7 +174,7 @@ Toggle full screen.
 | 
			
		||||
Pause.
 | 
			
		||||
 | 
			
		||||
@item a
 | 
			
		||||
Cycle audio channel in the current program.
 | 
			
		||||
Cycle audio channel in the curret program.
 | 
			
		||||
 | 
			
		||||
@item v
 | 
			
		||||
Cycle video channel.
 | 
			
		||||
@@ -193,7 +186,7 @@ Cycle subtitle channel in the current program.
 | 
			
		||||
Cycle program.
 | 
			
		||||
 | 
			
		||||
@item w
 | 
			
		||||
Cycle video filters or show modes.
 | 
			
		||||
Show audio waves.
 | 
			
		||||
 | 
			
		||||
@item s
 | 
			
		||||
Step to the next frame.
 | 
			
		||||
 
 | 
			
		||||
@@ -119,10 +119,6 @@ Show payload data, as a hexadecimal and ASCII dump. Coupled with
 | 
			
		||||
 | 
			
		||||
The dump is printed as the "data" field. It may contain newlines.
 | 
			
		||||
 | 
			
		||||
@item -show_data_hash @var{algorithm}
 | 
			
		||||
Show a hash of payload data, for packets with @option{-show_packets} and for
 | 
			
		||||
codec extradata with @option{-show_streams}.
 | 
			
		||||
 | 
			
		||||
@item -show_error
 | 
			
		||||
Show information about the error found when trying to probe the input.
 | 
			
		||||
 | 
			
		||||
@@ -184,7 +180,7 @@ format : stream=codec_type
 | 
			
		||||
 | 
			
		||||
To show all the tags in the stream and format sections:
 | 
			
		||||
@example
 | 
			
		||||
stream_tags : format_tags
 | 
			
		||||
format_tags : format_tags
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
To show only the @code{title} tag (if available) in the stream
 | 
			
		||||
 
 | 
			
		||||
@@ -8,15 +8,15 @@
 | 
			
		||||
 | 
			
		||||
    <xsd:complexType name="ffprobeType">
 | 
			
		||||
        <xsd:sequence>
 | 
			
		||||
            <xsd:element name="program_version"  type="ffprobe:programVersionType"  minOccurs="0" maxOccurs="1" />
 | 
			
		||||
            <xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
 | 
			
		||||
            <xsd:element name="packets"  type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
 | 
			
		||||
            <xsd:element name="frames"   type="ffprobe:framesType"  minOccurs="0" maxOccurs="1" />
 | 
			
		||||
            <xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
 | 
			
		||||
            <xsd:element name="streams"  type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
 | 
			
		||||
            <xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
 | 
			
		||||
            <xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
 | 
			
		||||
            <xsd:element name="format"   type="ffprobe:formatType"  minOccurs="0" maxOccurs="1" />
 | 
			
		||||
            <xsd:element name="error"    type="ffprobe:errorType"   minOccurs="0" maxOccurs="1" />
 | 
			
		||||
            <xsd:element name="program_version"  type="ffprobe:programVersionType"  minOccurs="0" maxOccurs="1" />
 | 
			
		||||
            <xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
 | 
			
		||||
        </xsd:sequence>
 | 
			
		||||
    </xsd:complexType>
 | 
			
		||||
 | 
			
		||||
@@ -50,15 +50,9 @@
 | 
			
		||||
      <xsd:attribute name="pos"           type="xsd:long"  />
 | 
			
		||||
      <xsd:attribute name="flags"         type="xsd:string" use="required" />
 | 
			
		||||
      <xsd:attribute name="data"          type="xsd:string" />
 | 
			
		||||
      <xsd:attribute name="data_hash"     type="xsd:string" />
 | 
			
		||||
    </xsd:complexType>
 | 
			
		||||
 | 
			
		||||
    <xsd:complexType name="frameType">
 | 
			
		||||
      <xsd:sequence>
 | 
			
		||||
            <xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
 | 
			
		||||
            <xsd:element name="side_data_list" type="ffprobe:frameSideDataListType"   minOccurs="0" maxOccurs="1" />
 | 
			
		||||
      </xsd:sequence>
 | 
			
		||||
 | 
			
		||||
      <xsd:attribute name="media_type"    type="xsd:string" use="required"/>
 | 
			
		||||
      <xsd:attribute name="key_frame"     type="xsd:int"    use="required"/>
 | 
			
		||||
      <xsd:attribute name="pts"           type="xsd:long" />
 | 
			
		||||
@@ -93,16 +87,6 @@
 | 
			
		||||
      <xsd:attribute name="repeat_pict"            type="xsd:int"   />
 | 
			
		||||
    </xsd:complexType>
 | 
			
		||||
 | 
			
		||||
    <xsd:complexType name="frameSideDataListType">
 | 
			
		||||
        <xsd:sequence>
 | 
			
		||||
            <xsd:element name="side_data" type="ffprobe:frameSideDataType" minOccurs="1" maxOccurs="unbounded"/>
 | 
			
		||||
        </xsd:sequence>
 | 
			
		||||
    </xsd:complexType>
 | 
			
		||||
    <xsd:complexType name="frameSideDataType">
 | 
			
		||||
        <xsd:attribute name="side_data_type"              type="xsd:string"/>
 | 
			
		||||
        <xsd:attribute name="side_data_size"              type="xsd:int"   />
 | 
			
		||||
    </xsd:complexType>
 | 
			
		||||
 | 
			
		||||
    <xsd:complexType name="subtitleType">
 | 
			
		||||
      <xsd:attribute name="media_type"         type="xsd:string" fixed="subtitle" use="required"/>
 | 
			
		||||
      <xsd:attribute name="pts"                type="xsd:long" />
 | 
			
		||||
@@ -154,7 +138,6 @@
 | 
			
		||||
      <xsd:attribute name="codec_tag"        type="xsd:string" use="required"/>
 | 
			
		||||
      <xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
 | 
			
		||||
      <xsd:attribute name="extradata"        type="xsd:string" />
 | 
			
		||||
      <xsd:attribute name="extradata_hash"   type="xsd:string" />
 | 
			
		||||
 | 
			
		||||
      <!-- video attributes -->
 | 
			
		||||
      <xsd:attribute name="width"                type="xsd:int"/>
 | 
			
		||||
@@ -164,8 +147,6 @@
 | 
			
		||||
      <xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
 | 
			
		||||
      <xsd:attribute name="pix_fmt"              type="xsd:string"/>
 | 
			
		||||
      <xsd:attribute name="level"                type="xsd:int"/>
 | 
			
		||||
      <xsd:attribute name="color_range"          type="xsd:string"/>
 | 
			
		||||
      <xsd:attribute name="color_space"          type="xsd:string"/>
 | 
			
		||||
      <xsd:attribute name="timecode"             type="xsd:string"/>
 | 
			
		||||
 | 
			
		||||
      <!-- audio attributes -->
 | 
			
		||||
@@ -184,8 +165,6 @@
 | 
			
		||||
      <xsd:attribute name="duration_ts"      type="xsd:long"/>
 | 
			
		||||
      <xsd:attribute name="duration"         type="xsd:float"/>
 | 
			
		||||
      <xsd:attribute name="bit_rate"         type="xsd:int"/>
 | 
			
		||||
      <xsd:attribute name="max_bit_rate"     type="xsd:int"/>
 | 
			
		||||
      <xsd:attribute name="bits_per_raw_sample" type="xsd:int"/>
 | 
			
		||||
      <xsd:attribute name="nb_frames"        type="xsd:int"/>
 | 
			
		||||
      <xsd:attribute name="nb_read_frames"   type="xsd:int"/>
 | 
			
		||||
      <xsd:attribute name="nb_read_packets"  type="xsd:int"/>
 | 
			
		||||
@@ -240,7 +219,8 @@
 | 
			
		||||
      <xsd:attribute name="copyright"        type="xsd:string" use="required"/>
 | 
			
		||||
      <xsd:attribute name="build_date"       type="xsd:string" use="required"/>
 | 
			
		||||
      <xsd:attribute name="build_time"       type="xsd:string" use="required"/>
 | 
			
		||||
      <xsd:attribute name="compiler_ident"   type="xsd:string" use="required"/>
 | 
			
		||||
      <xsd:attribute name="compiler_type"    type="xsd:string" use="required"/>
 | 
			
		||||
      <xsd:attribute name="compiler_version" type="xsd:string" use="required"/>
 | 
			
		||||
      <xsd:attribute name="configuration"    type="xsd:string" use="required"/>
 | 
			
		||||
    </xsd:complexType>
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,11 +1,11 @@
 | 
			
		||||
# Port on which the server is listening. You must select a different
 | 
			
		||||
# port from your standard HTTP web server if it is running on the same
 | 
			
		||||
# computer.
 | 
			
		||||
HTTPPort 8090
 | 
			
		||||
Port 8090
 | 
			
		||||
 | 
			
		||||
# Address on which the server is bound. Only useful if you have
 | 
			
		||||
# several network interfaces.
 | 
			
		||||
HTTPBindAddress 0.0.0.0
 | 
			
		||||
BindAddress 0.0.0.0
 | 
			
		||||
 | 
			
		||||
# Number of simultaneous HTTP connections that can be handled. It has
 | 
			
		||||
# to be defined *before* the MaxClients parameter, since it defines the
 | 
			
		||||
 
 | 
			
		||||
@@ -66,12 +66,12 @@ http://@var{ffserver_ip_address}:@var{http_port}/@var{feed_name}
 | 
			
		||||
 | 
			
		||||
where @var{ffserver_ip_address} is the IP address of the machine where
 | 
			
		||||
@command{ffserver} is installed, @var{http_port} is the port number of
 | 
			
		||||
the HTTP server (configured through the @option{HTTPPort} option), and
 | 
			
		||||
the HTTP server (configured through the @option{Port} option), and
 | 
			
		||||
@var{feed_name} is the name of the corresponding feed defined in the
 | 
			
		||||
configuration file.
 | 
			
		||||
 | 
			
		||||
Each feed is associated to a file which is stored on disk. This stored
 | 
			
		||||
file is used to send pre-recorded data to a player as fast as
 | 
			
		||||
file is used to allow to send pre-recorded data to a player as fast as
 | 
			
		||||
possible when new content is added in real-time to the stream.
 | 
			
		||||
 | 
			
		||||
A "live-stream" or "stream" is a resource published by
 | 
			
		||||
@@ -101,7 +101,7 @@ http://@var{ffserver_ip_address}:@var{rtsp_port}/@var{stream_name}[@var{options}
 | 
			
		||||
the configuration file. @var{options} is a list of options specified
 | 
			
		||||
after the URL which affects how the stream is served by
 | 
			
		||||
@command{ffserver}. @var{http_port} and @var{rtsp_port} are the HTTP
 | 
			
		||||
and RTSP ports configured with the options @var{HTTPPort} and
 | 
			
		||||
and RTSP ports configured with the options @var{Port} and
 | 
			
		||||
@var{RTSPPort} respectively.
 | 
			
		||||
 | 
			
		||||
In case the stream is associated to a feed, the encoding parameters
 | 
			
		||||
@@ -203,9 +203,11 @@ WARNING: trying to stream test1.mpg doesn't work with WMP as it tries to
 | 
			
		||||
transfer the entire file before starting to play.
 | 
			
		||||
The same is true of AVI files.
 | 
			
		||||
 | 
			
		||||
You should edit the @file{ffserver.conf} file to suit your needs (in
 | 
			
		||||
terms of frame rates etc). Then install @command{ffserver} and
 | 
			
		||||
@command{ffmpeg}, write a script to start them up, and off you go.
 | 
			
		||||
@section What happens next?
 | 
			
		||||
 | 
			
		||||
You should edit the ffserver.conf file to suit your needs (in terms of
 | 
			
		||||
frame rates etc). Then install ffserver and ffmpeg, write a script to start
 | 
			
		||||
them up, and off you go.
 | 
			
		||||
 | 
			
		||||
@section What else can it do?
 | 
			
		||||
 | 
			
		||||
@@ -352,29 +354,20 @@ allow everybody else.
 | 
			
		||||
 | 
			
		||||
@section Global options
 | 
			
		||||
@table @option
 | 
			
		||||
@item HTTPPort @var{port_number}
 | 
			
		||||
@item Port @var{port_number}
 | 
			
		||||
@item RTSPPort @var{port_number}
 | 
			
		||||
 | 
			
		||||
@var{HTTPPort} sets the HTTP server listening TCP port number,
 | 
			
		||||
@var{RTSPPort} sets the RTSP server listening TCP port number.
 | 
			
		||||
 | 
			
		||||
@var{Port} is the equivalent of @var{HTTPPort} and is deprecated.
 | 
			
		||||
 | 
			
		||||
You must select a different port from your standard HTTP web server if
 | 
			
		||||
it is running on the same computer.
 | 
			
		||||
Set TCP port number on which the HTTP/RTSP server is listening. You
 | 
			
		||||
must select a different port from your standard HTTP web server if it
 | 
			
		||||
is running on the same computer.
 | 
			
		||||
 | 
			
		||||
If not specified, no corresponding server will be created.
 | 
			
		||||
 | 
			
		||||
@item HTTPBindAddress @var{ip_address}
 | 
			
		||||
@item BindAddress @var{ip_address}
 | 
			
		||||
@item RTSPBindAddress @var{ip_address}
 | 
			
		||||
Set address on which the HTTP/RTSP server is bound. Only useful if you
 | 
			
		||||
have several network interfaces.
 | 
			
		||||
 | 
			
		||||
@var{BindAddress} is the equivalent of @var{HTTPBindAddress} and is
 | 
			
		||||
deprecated.
 | 
			
		||||
 | 
			
		||||
@item MaxHTTPConnections @var{n}
 | 
			
		||||
Set number of simultaneous HTTP connections that can be handled. It
 | 
			
		||||
has to be defined @emph{before} the @option{MaxClients} parameter,
 | 
			
		||||
 
 | 
			
		||||
@@ -3,7 +3,7 @@ representing a number as input, which may be followed by one of the SI
 | 
			
		||||
unit prefixes, for example: 'K', 'M', or 'G'.
 | 
			
		||||
 | 
			
		||||
If 'i' is appended to the SI unit prefix, the complete prefix will be
 | 
			
		||||
interpreted as a unit prefix for binary multiples, which are based on
 | 
			
		||||
interpreted as a unit prefix for binary multiplies, which are based on
 | 
			
		||||
powers of 1024 instead of powers of 1000. Appending 'B' to the SI unit
 | 
			
		||||
prefix multiplies the value by 8. This allows using, for example:
 | 
			
		||||
'KB', 'MiB', 'G' and 'B' as number suffixes.
 | 
			
		||||
@@ -44,15 +44,8 @@ streams of this type.
 | 
			
		||||
If @var{stream_index} is given, then it matches the stream with number @var{stream_index}
 | 
			
		||||
in the program with the id @var{program_id}. Otherwise, it matches all streams in the
 | 
			
		||||
program.
 | 
			
		||||
@item #@var{stream_id} or i:@var{stream_id}
 | 
			
		||||
Match the stream by stream id (e.g. PID in MPEG-TS container).
 | 
			
		||||
@item m:@var{key}[:@var{value}]
 | 
			
		||||
Matches streams with the metadata tag @var{key} having the specified value. If
 | 
			
		||||
@var{value} is not given, matches streams that contain the given tag with any
 | 
			
		||||
value.
 | 
			
		||||
 | 
			
		||||
Note that in @command{ffmpeg}, matching by metadata will only work properly for
 | 
			
		||||
input files.
 | 
			
		||||
@item #@var{stream_id}
 | 
			
		||||
Matches the stream by a format-specific ID.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@section Generic options
 | 
			
		||||
@@ -196,8 +189,6 @@ following option is recognized:
 | 
			
		||||
set the file name to use for the report; @code{%p} is expanded to the name
 | 
			
		||||
of the program, @code{%t} is expanded to a timestamp, @code{%%} is expanded
 | 
			
		||||
to a plain @code{%}
 | 
			
		||||
@item level
 | 
			
		||||
set the log level
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
Errors in parsing the environment variable are not fatal, and will not
 | 
			
		||||
@@ -234,14 +225,10 @@ Possible flags for this option are:
 | 
			
		||||
@item sse4.1
 | 
			
		||||
@item sse4.2
 | 
			
		||||
@item avx
 | 
			
		||||
@item avx2
 | 
			
		||||
@item xop
 | 
			
		||||
@item fma3
 | 
			
		||||
@item fma4
 | 
			
		||||
@item 3dnow
 | 
			
		||||
@item 3dnowext
 | 
			
		||||
@item bmi1
 | 
			
		||||
@item bmi2
 | 
			
		||||
@item cmov
 | 
			
		||||
@end table
 | 
			
		||||
@item ARM
 | 
			
		||||
@@ -252,13 +239,6 @@ Possible flags for this option are:
 | 
			
		||||
@item vfp
 | 
			
		||||
@item vfpv3
 | 
			
		||||
@item neon
 | 
			
		||||
@item setend
 | 
			
		||||
@end table
 | 
			
		||||
@item AArch64
 | 
			
		||||
@table @samp
 | 
			
		||||
@item armv8
 | 
			
		||||
@item vfp
 | 
			
		||||
@item neon
 | 
			
		||||
@end table
 | 
			
		||||
@item PowerPC
 | 
			
		||||
@table @samp
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										1883
									
								
								doc/filters.texi
									
									
									
									
									
								
							
							
						
						
									
										1883
									
								
								doc/filters.texi
									
									
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -23,7 +23,7 @@ Reduce buffering.
 | 
			
		||||
 | 
			
		||||
@item probesize @var{integer} (@emph{input})
 | 
			
		||||
Set probing size in bytes, i.e. the size of the data to analyze to get
 | 
			
		||||
stream information. A higher value will enable detecting more
 | 
			
		||||
stream information. A higher value will allow to detect more
 | 
			
		||||
information in case it is dispersed into the stream, but will increase
 | 
			
		||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
 | 
			
		||||
 | 
			
		||||
@@ -63,7 +63,7 @@ Default is 0.
 | 
			
		||||
 | 
			
		||||
@item analyzeduration @var{integer} (@emph{input})
 | 
			
		||||
Specify how many microseconds are analyzed to probe the input. A
 | 
			
		||||
higher value will enable detecting more accurate information, but will
 | 
			
		||||
higher value will allow to detect more accurate information, but will
 | 
			
		||||
increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
 | 
			
		||||
 | 
			
		||||
@item cryptokey @var{hexadecimal string} (@emph{input})
 | 
			
		||||
 
 | 
			
		||||
@@ -130,7 +130,7 @@ Go to @url{http://x265.org/developers.html} and follow the instructions
 | 
			
		||||
for installing the library. Then pass @code{--enable-libx265} to configure
 | 
			
		||||
to enable it.
 | 
			
		||||
 | 
			
		||||
@float NOTE
 | 
			
		||||
@float note
 | 
			
		||||
x265 is under the GNU Public License Version 2 or later
 | 
			
		||||
(see @url{http://www.gnu.org/licenses/old-licenses/gpl-2.0.html} for
 | 
			
		||||
details), you must upgrade FFmpeg's license to GPL in order to use it.
 | 
			
		||||
@@ -205,7 +205,7 @@ library:
 | 
			
		||||
@item American Laser Games MM   @tab   @tab X
 | 
			
		||||
    @tab Multimedia format used in games like Mad Dog McCree.
 | 
			
		||||
@item 3GPP AMR                  @tab X @tab X
 | 
			
		||||
@item Amazing Studio Packed Animation File  @tab   @tab X
 | 
			
		||||
@item Amazing Studio Packed Animation File        @tab   @tab X
 | 
			
		||||
    @tab Multimedia format used in game Heart Of Darkness.
 | 
			
		||||
@item Apple HTTP Live Streaming @tab   @tab X
 | 
			
		||||
@item Artworx Data Format       @tab   @tab X
 | 
			
		||||
@@ -245,7 +245,6 @@ library:
 | 
			
		||||
    @tab Multimedia format used by Delphine Software games.
 | 
			
		||||
@item CD+G                      @tab   @tab X
 | 
			
		||||
    @tab Video format used by CD+G karaoke disks
 | 
			
		||||
@item Phantom Cine              @tab   @tab X
 | 
			
		||||
@item Commodore CDXL            @tab   @tab X
 | 
			
		||||
    @tab Amiga CD video format
 | 
			
		||||
@item Core Audio Format         @tab X @tab X
 | 
			
		||||
@@ -259,7 +258,6 @@ library:
 | 
			
		||||
@item Deluxe Paint Animation    @tab   @tab X
 | 
			
		||||
@item DFA                       @tab   @tab X
 | 
			
		||||
    @tab This format is used in Chronomaster game
 | 
			
		||||
@item DSD Stream File (DSF)     @tab   @tab X
 | 
			
		||||
@item DV video                  @tab X @tab X
 | 
			
		||||
@item DXA                       @tab   @tab X
 | 
			
		||||
    @tab This format is used in the non-Windows version of the Feeble Files
 | 
			
		||||
@@ -310,11 +308,9 @@ library:
 | 
			
		||||
    @tab Used by Linux Media Labs MPEG-4 PCI boards
 | 
			
		||||
@item LOAS                      @tab   @tab X
 | 
			
		||||
    @tab contains LATM multiplexed AAC audio
 | 
			
		||||
@item LRC                       @tab X @tab X
 | 
			
		||||
@item LVF                       @tab   @tab X
 | 
			
		||||
@item LXF                       @tab   @tab X
 | 
			
		||||
    @tab VR native stream format, used by Leitch/Harris' video servers.
 | 
			
		||||
@item Magic Lantern Video (MLV) @tab   @tab X
 | 
			
		||||
@item Matroska                  @tab X @tab X
 | 
			
		||||
@item Matroska audio            @tab X @tab
 | 
			
		||||
@item FFmpeg metadata           @tab X @tab X
 | 
			
		||||
@@ -492,13 +488,11 @@ following image formats are supported:
 | 
			
		||||
@item Name @tab Encoding @tab Decoding @tab Comments
 | 
			
		||||
@item .Y.U.V       @tab X @tab X
 | 
			
		||||
    @tab one raw file per component
 | 
			
		||||
@item Alias PIX    @tab X @tab X
 | 
			
		||||
    @tab Alias/Wavefront PIX image format
 | 
			
		||||
@item animated GIF @tab X @tab X
 | 
			
		||||
@item BMP          @tab X @tab X
 | 
			
		||||
    @tab Microsoft BMP image
 | 
			
		||||
@item BRender PIX  @tab   @tab X
 | 
			
		||||
    @tab Argonaut BRender 3D engine image format.
 | 
			
		||||
@item PIX          @tab   @tab X
 | 
			
		||||
    @tab PIX is an image format used in the Argonaut BRender engine.
 | 
			
		||||
@item DPX          @tab X @tab X
 | 
			
		||||
    @tab Digital Picture Exchange
 | 
			
		||||
@item EXR          @tab   @tab X
 | 
			
		||||
@@ -686,8 +680,8 @@ following image formats are supported:
 | 
			
		||||
@item LCL (LossLess Codec Library) MSZH  @tab     @tab  X
 | 
			
		||||
@item LCL (LossLess Codec Library) ZLIB  @tab  E  @tab  E
 | 
			
		||||
@item LOCO                   @tab     @tab  X
 | 
			
		||||
@item LucasArts SANM/Smush   @tab     @tab  X
 | 
			
		||||
    @tab Used in LucasArts games / SMUSH animations.
 | 
			
		||||
@item LucasArts Smush        @tab     @tab  X
 | 
			
		||||
    @tab Used in LucasArts games.
 | 
			
		||||
@item lossless MJPEG         @tab  X  @tab  X
 | 
			
		||||
@item Microsoft ATC Screen   @tab     @tab  X
 | 
			
		||||
    @tab Also known as Microsoft Screen 3.
 | 
			
		||||
@@ -722,8 +716,6 @@ following image formats are supported:
 | 
			
		||||
    @tab fourcc: VP50
 | 
			
		||||
@item On2 VP6                @tab     @tab  X
 | 
			
		||||
    @tab fourcc: VP60,VP61,VP62
 | 
			
		||||
@item On2 VP7                @tab     @tab  X
 | 
			
		||||
    @tab fourcc: VP70,VP71
 | 
			
		||||
@item VP8                    @tab  E  @tab  X
 | 
			
		||||
    @tab fourcc: VP80, encoding supported through external library libvpx
 | 
			
		||||
@item VP9                    @tab  E  @tab  X
 | 
			
		||||
@@ -753,11 +745,11 @@ following image formats are supported:
 | 
			
		||||
    @tab Texture dictionaries used by the Renderware Engine.
 | 
			
		||||
@item RL2 video              @tab     @tab  X
 | 
			
		||||
    @tab used in some games by Entertainment Software Partners
 | 
			
		||||
@item SGI RLE 8-bit          @tab     @tab  X
 | 
			
		||||
@item Sierra VMD video       @tab     @tab  X
 | 
			
		||||
    @tab Used in Sierra VMD files.
 | 
			
		||||
@item Silicon Graphics Motion Video Compressor 1 (MVC1)  @tab     @tab  X
 | 
			
		||||
@item Silicon Graphics Motion Video Compressor 2 (MVC2)  @tab     @tab  X
 | 
			
		||||
@item Silicon Graphics RLE 8-bit video  @tab     @tab  X
 | 
			
		||||
@item Smacker video          @tab     @tab  X
 | 
			
		||||
    @tab Video encoding used in Smacker.
 | 
			
		||||
@item SMPTE VC-1             @tab     @tab  X
 | 
			
		||||
@@ -823,7 +815,7 @@ following image formats are supported:
 | 
			
		||||
    @tab encoding supported through external library libaacplus
 | 
			
		||||
@item AAC                    @tab  E  @tab  X
 | 
			
		||||
    @tab encoding supported through external library libfaac and libvo-aacenc
 | 
			
		||||
@item AC-3                   @tab IX  @tab  IX
 | 
			
		||||
@item AC-3                   @tab IX  @tab  X
 | 
			
		||||
@item ADPCM 4X Movie         @tab     @tab  X
 | 
			
		||||
@item ADPCM CDROM XA         @tab     @tab  X
 | 
			
		||||
@item ADPCM Creative Technology @tab     @tab  X
 | 
			
		||||
@@ -867,8 +859,6 @@ following image formats are supported:
 | 
			
		||||
@item ADPCM Sound Blaster Pro 2-bit  @tab     @tab  X
 | 
			
		||||
@item ADPCM Sound Blaster Pro 2.6-bit  @tab     @tab  X
 | 
			
		||||
@item ADPCM Sound Blaster Pro 4-bit  @tab     @tab  X
 | 
			
		||||
@item ADPCM VIMA
 | 
			
		||||
    @tab Used in LucasArts SMUSH animations.
 | 
			
		||||
@item ADPCM Westwood Studios IMA @tab     @tab  X
 | 
			
		||||
    @tab Used in Westwood Studios games like Command and Conquer.
 | 
			
		||||
@item ADPCM Yamaha           @tab  X  @tab  X
 | 
			
		||||
@@ -901,10 +891,6 @@ following image formats are supported:
 | 
			
		||||
@item DPCM Sol               @tab     @tab  X
 | 
			
		||||
@item DPCM Xan               @tab     @tab  X
 | 
			
		||||
    @tab Used in Origin's Wing Commander IV AVI files.
 | 
			
		||||
@item DSD (Direct Stream Digitial), least significant bit first  @tab  @tab  X
 | 
			
		||||
@item DSD (Direct Stream Digitial), most significant bit first   @tab  @tab  X
 | 
			
		||||
@item DSD (Direct Stream Digitial), least significant bit first, planar  @tab  @tab  X
 | 
			
		||||
@item DSD (Direct Stream Digitial), most significant bit first, planar   @tab  @tab  X
 | 
			
		||||
@item DSP Group TrueSpeech   @tab     @tab  X
 | 
			
		||||
@item DV audio               @tab     @tab  X
 | 
			
		||||
@item Enhanced AC-3          @tab  X  @tab  X
 | 
			
		||||
@@ -927,14 +913,13 @@ following image formats are supported:
 | 
			
		||||
@item Monkey's Audio         @tab     @tab  X
 | 
			
		||||
@item MP1 (MPEG audio layer 1)  @tab     @tab IX
 | 
			
		||||
@item MP2 (MPEG audio layer 2)  @tab IX  @tab IX
 | 
			
		||||
    @tab encoding supported also through external library TwoLAME
 | 
			
		||||
    @tab libtwolame can be used alternatively for encoding.
 | 
			
		||||
@item MP3 (MPEG audio layer 3)  @tab  E  @tab IX
 | 
			
		||||
    @tab encoding supported through external library LAME, ADU MP3 and MP3onMP4 also supported
 | 
			
		||||
@item MPEG-4 Audio Lossless Coding (ALS)  @tab     @tab  X
 | 
			
		||||
@item Musepack SV7           @tab     @tab  X
 | 
			
		||||
@item Musepack SV8           @tab     @tab  X
 | 
			
		||||
@item Nellymoser Asao        @tab  X  @tab  X
 | 
			
		||||
@item On2 AVC (Audio for Video Codec) @tab     @tab  X
 | 
			
		||||
@item Opus                   @tab  E  @tab  E
 | 
			
		||||
    @tab supported through external library libopus
 | 
			
		||||
@item PCM A-law              @tab  X  @tab  X
 | 
			
		||||
@@ -1037,7 +1022,7 @@ performance on systems without hardware floating point support).
 | 
			
		||||
@item TED Talks captions @tab @tab X @tab   @tab X
 | 
			
		||||
@item VobSub (IDX+SUB) @tab   @tab X @tab   @tab X
 | 
			
		||||
@item VPlayer          @tab   @tab X @tab   @tab X
 | 
			
		||||
@item WebVTT           @tab X @tab X @tab X @tab X
 | 
			
		||||
@item WebVTT           @tab X @tab X @tab   @tab X
 | 
			
		||||
@item XSUB             @tab   @tab   @tab X @tab X
 | 
			
		||||
@end multitable
 | 
			
		||||
 | 
			
		||||
@@ -1055,7 +1040,6 @@ performance on systems without hardware floating point support).
 | 
			
		||||
@item HLS          @tab X
 | 
			
		||||
@item HTTP         @tab X
 | 
			
		||||
@item HTTPS        @tab X
 | 
			
		||||
@item Icecast      @tab X
 | 
			
		||||
@item MMSH         @tab X
 | 
			
		||||
@item MMST         @tab X
 | 
			
		||||
@item pipe         @tab X
 | 
			
		||||
@@ -1066,7 +1050,6 @@ performance on systems without hardware floating point support).
 | 
			
		||||
@item RTMPTE       @tab X
 | 
			
		||||
@item RTMPTS       @tab X
 | 
			
		||||
@item RTP          @tab X
 | 
			
		||||
@item SAMBA        @tab E
 | 
			
		||||
@item SCTP         @tab X
 | 
			
		||||
@item SFTP         @tab E
 | 
			
		||||
@item TCP          @tab X
 | 
			
		||||
@@ -1100,7 +1083,6 @@ performance on systems without hardware floating point support).
 | 
			
		||||
@item Video4Linux2      @tab X      @tab X
 | 
			
		||||
@item VfW capture       @tab X      @tab
 | 
			
		||||
@item X11 grabbing      @tab X      @tab
 | 
			
		||||
@item Win32 grabbing    @tab X      @tab
 | 
			
		||||
@end multitable
 | 
			
		||||
 | 
			
		||||
@code{X} means that input/output is supported.
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										168
									
								
								doc/indevs.texi
									
									
									
									
									
								
							
							
						
						
									
										168
									
								
								doc/indevs.texi
									
									
									
									
									
								
							@@ -1,7 +1,7 @@
 | 
			
		||||
@chapter Input Devices
 | 
			
		||||
@c man begin INPUT DEVICES
 | 
			
		||||
 | 
			
		||||
Input devices are configured elements in FFmpeg which enable accessing
 | 
			
		||||
Input devices are configured elements in FFmpeg which allow to access
 | 
			
		||||
the data coming from a multimedia device attached to your system.
 | 
			
		||||
 | 
			
		||||
When you configure your FFmpeg build, all the supported input devices
 | 
			
		||||
@@ -13,8 +13,8 @@ You can disable all the input devices using the configure option
 | 
			
		||||
option "--enable-indev=@var{INDEV}", or you can disable a particular
 | 
			
		||||
input device using the option "--disable-indev=@var{INDEV}".
 | 
			
		||||
 | 
			
		||||
The option "-devices" of the ff* tools will display the list of
 | 
			
		||||
supported input devices.
 | 
			
		||||
The option "-formats" of the ff* tools will display the list of
 | 
			
		||||
supported input devices (amongst the demuxers).
 | 
			
		||||
 | 
			
		||||
A description of the currently available input devices follows.
 | 
			
		||||
 | 
			
		||||
@@ -51,41 +51,6 @@ ffmpeg -f alsa -i hw:0 alsaout.wav
 | 
			
		||||
For more information see:
 | 
			
		||||
@url{http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html}
 | 
			
		||||
 | 
			
		||||
@section avfoundation
 | 
			
		||||
 | 
			
		||||
AVFoundation input device.
 | 
			
		||||
 | 
			
		||||
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
 | 
			
		||||
The older QTKit framework has been marked deprecated since OSX version 10.7.
 | 
			
		||||
 | 
			
		||||
The filename passed as input is parsed to contain either a device name or index.
 | 
			
		||||
The device index can also be given by using -video_device_index.
 | 
			
		||||
A given device index will override any given device name.
 | 
			
		||||
If the desired device consists of numbers only, use -video_device_index to identify it.
 | 
			
		||||
The default device will be chosen if an empty string  or the device name "default" is given.
 | 
			
		||||
The available devices can be enumerated by using -list_devices.
 | 
			
		||||
The pixel format can be set using -pixel_format.
 | 
			
		||||
Available formats:
 | 
			
		||||
 monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
 | 
			
		||||
 bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
 | 
			
		||||
 yuv420p, nv12, yuyv422, gray
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -f avfoundation -i "0" out.mpg
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -f avfoundation -video_device_index 0 -i "" out.mpg
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -f avfoundation -pixel_format bgr0 -i "default" out.mpg
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -f avfoundation -list_devices true -i ""
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@section bktr
 | 
			
		||||
 | 
			
		||||
BSD video input device.
 | 
			
		||||
@@ -227,81 +192,6 @@ ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
 | 
			
		||||
 | 
			
		||||
See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1).
 | 
			
		||||
 | 
			
		||||
@section gdigrab
 | 
			
		||||
 | 
			
		||||
Win32 GDI-based screen capture device.
 | 
			
		||||
 | 
			
		||||
This device allows you to capture a region of the display on Windows.
 | 
			
		||||
 | 
			
		||||
There are two options for the input filename:
 | 
			
		||||
@example
 | 
			
		||||
desktop
 | 
			
		||||
@end example
 | 
			
		||||
or
 | 
			
		||||
@example
 | 
			
		||||
title=@var{window_title}
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
The first option will capture the entire desktop, or a fixed region of the
 | 
			
		||||
desktop. The second option will instead capture the contents of a single
 | 
			
		||||
window, regardless of its position on the screen.
 | 
			
		||||
 | 
			
		||||
For example, to grab the entire desktop using @command{ffmpeg}:
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -f gdigrab -framerate 6 -i desktop out.mpg
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
Grab a 640x480 region at position @code{10,20}:
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -f gdigrab -framerate 6 -offset_x 10 -offset_y 20 -video_size vga -i desktop out.mpg
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
Grab the contents of the window named "Calculator"
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -f gdigrab -framerate 6 -i title=Calculator out.mpg
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@subsection Options
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
@item draw_mouse
 | 
			
		||||
Specify whether to draw the mouse pointer. Use the value @code{0} to
 | 
			
		||||
not draw the pointer. Default value is @code{1}.
 | 
			
		||||
 | 
			
		||||
@item framerate
 | 
			
		||||
Set the grabbing frame rate. Default value is @code{ntsc},
 | 
			
		||||
corresponding to a frame rate of @code{30000/1001}.
 | 
			
		||||
 | 
			
		||||
@item show_region
 | 
			
		||||
Show grabbed region on screen.
 | 
			
		||||
 | 
			
		||||
If @var{show_region} is specified with @code{1}, then the grabbing
 | 
			
		||||
region will be indicated on screen. With this option, it is easy to
 | 
			
		||||
know what is being grabbed if only a portion of the screen is grabbed.
 | 
			
		||||
 | 
			
		||||
Note that @var{show_region} is incompatible with grabbing the contents
 | 
			
		||||
of a single window.
 | 
			
		||||
 | 
			
		||||
For example:
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -f gdigrab -show_region 1 -framerate 6 -video_size cif -offset_x 10 -offset_y 20 -i desktop out.mpg
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@item video_size
 | 
			
		||||
Set the video frame size. The default is to capture the full screen if @file{desktop} is selected, or the full window size if @file{title=@var{window_title}} is selected.
 | 
			
		||||
 | 
			
		||||
@item offset_x
 | 
			
		||||
When capturing a region with @var{video_size}, set the distance from the left edge of the screen or desktop.
 | 
			
		||||
 | 
			
		||||
Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned to the left of your primary monitor, you will need to use a negative @var{offset_x} value to move the region to that monitor.
 | 
			
		||||
 | 
			
		||||
@item offset_y
 | 
			
		||||
When capturing a region with @var{video_size}, set the distance from the top edge of the screen or desktop.
 | 
			
		||||
 | 
			
		||||
Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned above your primary monitor, you will need to use a negative @var{offset_y} value to move the region to that monitor.
 | 
			
		||||
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@section iec61883
 | 
			
		||||
 | 
			
		||||
FireWire DV/HDV input device using libiec61883.
 | 
			
		||||
@@ -483,28 +373,10 @@ ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
 | 
			
		||||
 | 
			
		||||
@end itemize
 | 
			
		||||
 | 
			
		||||
@section libcdio
 | 
			
		||||
 | 
			
		||||
Audio-CD input device based on cdio.
 | 
			
		||||
 | 
			
		||||
To enable this input device during configuration you need libcdio
 | 
			
		||||
installed on your system. Requires the configure option
 | 
			
		||||
@code{--enable-libcdio}.
 | 
			
		||||
 | 
			
		||||
This device allows playing and grabbing from an Audio-CD.
 | 
			
		||||
 | 
			
		||||
For example to copy with @command{ffmpeg} the entire Audio-CD in /dev/sr0,
 | 
			
		||||
you may run the command:
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -f libcdio -i /dev/sr0 cd.wav
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@section libdc1394
 | 
			
		||||
 | 
			
		||||
IIDC1394 input device, based on libdc1394 and libraw1394.
 | 
			
		||||
 | 
			
		||||
Requires the configure option @code{--enable-libdc1394}.
 | 
			
		||||
 | 
			
		||||
@section openal
 | 
			
		||||
 | 
			
		||||
The OpenAL input device provides audio capture on all systems with a
 | 
			
		||||
@@ -659,33 +531,6 @@ Record a stream from default device:
 | 
			
		||||
ffmpeg -f pulse -i default /tmp/pulse.wav
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@section qtkit
 | 
			
		||||
 | 
			
		||||
QTKit input device.
 | 
			
		||||
 | 
			
		||||
The filename passed as input is parsed to contain either a device name or index.
 | 
			
		||||
The device index can also be given by using -video_device_index.
 | 
			
		||||
A given device index will override any given device name.
 | 
			
		||||
If the desired device consists of numbers only, use -video_device_index to identify it.
 | 
			
		||||
The default device will be chosen if an empty string  or the device name "default" is given.
 | 
			
		||||
The available devices can be enumerated by using -list_devices.
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -f qtkit -i "0" out.mpg
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -f qtkit -video_device_index 0 -i "" out.mpg
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -f qtkit -i "default" out.mpg
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -f qtkit -list_devices true -i ""
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@section sndio
 | 
			
		||||
 | 
			
		||||
sndio input device.
 | 
			
		||||
@@ -833,9 +678,6 @@ other filename will be interpreted as device number 0.
 | 
			
		||||
 | 
			
		||||
X11 video input device.
 | 
			
		||||
 | 
			
		||||
Depends on X11, Xext, and Xfixes. Requires the configure option
 | 
			
		||||
@code{--enable-x11grab}.
 | 
			
		||||
 | 
			
		||||
This device allows one to capture a region of an X11 display.
 | 
			
		||||
 | 
			
		||||
The filename passed as input has the syntax:
 | 
			
		||||
@@ -916,10 +758,6 @@ ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_siz
 | 
			
		||||
 | 
			
		||||
@item video_size
 | 
			
		||||
Set the video frame size. Default value is @code{vga}.
 | 
			
		||||
 | 
			
		||||
@item use_shm
 | 
			
		||||
Use the MIT-SHM extension for shared memory. Default value is @code{1}.
 | 
			
		||||
It may be necessary to disable it for remote displays.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@c man end INPUT DEVICES
 | 
			
		||||
 
 | 
			
		||||
@@ -22,7 +22,7 @@ a mail for every change to every issue.
 | 
			
		||||
(the above does all work already after light testing)
 | 
			
		||||
 | 
			
		||||
The subscription URL for the ffmpeg-trac list is:
 | 
			
		||||
http(s)://lists.ffmpeg.org/mailman/listinfo/ffmpeg-trac
 | 
			
		||||
http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
 | 
			
		||||
The URL of the webinterface of the tracker is:
 | 
			
		||||
http(s)://trac.ffmpeg.org
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										140
									
								
								doc/muxers.texi
									
									
									
									
									
								
							
							
						
						
									
										140
									
								
								doc/muxers.texi
									
									
									
									
									
								
							@@ -233,10 +233,6 @@ to @var{wrap}.
 | 
			
		||||
Start the playlist sequence number from @var{number}. Default value is
 | 
			
		||||
0.
 | 
			
		||||
 | 
			
		||||
@item hls_base_url @var{baseurl}
 | 
			
		||||
Append @var{baseurl} to every entry in the playlist.
 | 
			
		||||
Useful to generate playlists with absolute paths.
 | 
			
		||||
 | 
			
		||||
Note that the playlist sequence number must be unique for each segment
 | 
			
		||||
and it is not to be confused with the segment filename sequence number
 | 
			
		||||
which can be cyclic, for example if the @option{wrap} option is
 | 
			
		||||
@@ -551,12 +547,6 @@ This operation can take a while, and will not work in various situations such
 | 
			
		||||
as fragmented output, thus it is not enabled by default.
 | 
			
		||||
@item -movflags rtphint
 | 
			
		||||
Add RTP hinting tracks to the output file.
 | 
			
		||||
@item -movflags disable_chpl
 | 
			
		||||
Disable Nero chapter markers (chpl atom).  Normally, both Nero chapters
 | 
			
		||||
and a QuickTime chapter track are written to the file. With this option
 | 
			
		||||
set, only the QuickTime chapter track will be written. Nero chapters can
 | 
			
		||||
cause failures when the file is reprocessed with certain tagging programs, like
 | 
			
		||||
mp3Tag 2.61a and iTunes 11.3, most likely other versions are affected as well.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@subsection Example
 | 
			
		||||
@@ -643,10 +633,7 @@ Set the first PID for data packets (default 0x0100, max 0x0f00).
 | 
			
		||||
@item -mpegts_m2ts_mode @var{number}
 | 
			
		||||
Enable m2ts mode if set to 1. Default value is -1 which disables m2ts mode.
 | 
			
		||||
@item -muxrate @var{number}
 | 
			
		||||
Set a constant muxrate (default VBR).
 | 
			
		||||
@item -pcr_period @var{numer}
 | 
			
		||||
Override the default PCR retransmission time (default 20ms), ignored
 | 
			
		||||
if variable muxrate is selected.
 | 
			
		||||
Set muxrate.
 | 
			
		||||
@item -pes_payload_size @var{number}
 | 
			
		||||
Set minimum PES packet payload in bytes.
 | 
			
		||||
@item -mpegts_flags @var{flags}
 | 
			
		||||
@@ -715,30 +702,6 @@ Alternatively you can write the command as:
 | 
			
		||||
ffmpeg -benchmark -i INPUT -f null -
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@section nut
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
@item -syncpoints @var{flags}
 | 
			
		||||
Change the syncpoint usage in nut:
 | 
			
		||||
@table @option
 | 
			
		||||
@item @var{default} use the normal low-overhead seeking aids.
 | 
			
		||||
@item @var{none} do not use the syncpoints at all, reducing the overhead but making the stream non-seekable;
 | 
			
		||||
    Use of this option is not recommended, as the resulting files are very damage
 | 
			
		||||
    sensitive and seeking is not possible. Also in general the overhead from
 | 
			
		||||
    syncpoints is negligible. Note, -@code{write_index} 0 can be used to disable
 | 
			
		||||
    all growing data tables, allowing to mux endless streams with limited memory
 | 
			
		||||
    and wihout these disadvantages.
 | 
			
		||||
@item @var{timestamped} extend the syncpoint with a wallclock field.
 | 
			
		||||
@end table
 | 
			
		||||
The @var{none} and @var{timestamped} flags are experimental.
 | 
			
		||||
@item -write_index @var{bool}
 | 
			
		||||
Write index at the end, the default is to write an index.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -i INPUT -f_strict experimental -syncpoints none - | processor
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@section ogg
 | 
			
		||||
 | 
			
		||||
Ogg container muxer.
 | 
			
		||||
@@ -804,11 +767,6 @@ reference stream. The default value is @code{auto}.
 | 
			
		||||
Override the inner container format, by default it is guessed by the filename
 | 
			
		||||
extension.
 | 
			
		||||
 | 
			
		||||
@item segment_format_options @var{options_list}
 | 
			
		||||
Set output format options using a :-separated list of key=value
 | 
			
		||||
parameters. Values containing the @code{:} special character must be
 | 
			
		||||
escaped.
 | 
			
		||||
 | 
			
		||||
@item segment_list @var{name}
 | 
			
		||||
Generate also a listfile named @var{name}. If not specified no
 | 
			
		||||
listfile is generated.
 | 
			
		||||
@@ -825,21 +783,17 @@ Allow caching (only affects M3U8 list files).
 | 
			
		||||
Allow live-friendly file generation.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@item segment_list_type @var{type}
 | 
			
		||||
Select the listing format.
 | 
			
		||||
@table @option
 | 
			
		||||
@item @var{flat} use a simple flat list of entries.
 | 
			
		||||
@item @var{hls} use a m3u8-like structure.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@item segment_list_size @var{size}
 | 
			
		||||
Update the list file so that it contains at most @var{size}
 | 
			
		||||
Update the list file so that it contains at most the last @var{size}
 | 
			
		||||
segments. If 0 the list file will contain all the segments. Default
 | 
			
		||||
value is 0.
 | 
			
		||||
 | 
			
		||||
@item segment_list_entry_prefix @var{prefix}
 | 
			
		||||
Prepend @var{prefix} to each entry. Useful to generate absolute paths.
 | 
			
		||||
By default no prefix is applied.
 | 
			
		||||
Set @var{prefix} to prepend to the name of each entry filename. By
 | 
			
		||||
default no prefix is applied.
 | 
			
		||||
 | 
			
		||||
@item segment_list_type @var{type}
 | 
			
		||||
Specify the format for the segment list file.
 | 
			
		||||
 | 
			
		||||
The following values are recognized:
 | 
			
		||||
@table @samp
 | 
			
		||||
@@ -890,16 +844,6 @@ Note that splitting may not be accurate, unless you force the
 | 
			
		||||
reference stream key-frames at the given time. See the introductory
 | 
			
		||||
notice and the examples below.
 | 
			
		||||
 | 
			
		||||
@item segment_atclocktime @var{1|0}
 | 
			
		||||
If set to "1" split at regular clock time intervals starting from 00:00
 | 
			
		||||
o'clock. The @var{time} value specified in @option{segment_time} is
 | 
			
		||||
used for setting the length of the splitting interval.
 | 
			
		||||
 | 
			
		||||
For example with @option{segment_time} set to "900" this makes it possible
 | 
			
		||||
to create files at 12:00 o'clock, 12:15, 12:30, etc.
 | 
			
		||||
 | 
			
		||||
Default value is "0".
 | 
			
		||||
 | 
			
		||||
@item segment_time_delta @var{delta}
 | 
			
		||||
Specify the accuracy time when selecting the start time for a
 | 
			
		||||
segment, expressed as a duration specification. Default value is "0".
 | 
			
		||||
@@ -956,7 +900,7 @@ argument must be a time duration specification, and defaults to 0.
 | 
			
		||||
 | 
			
		||||
@itemize
 | 
			
		||||
@item
 | 
			
		||||
Remux the content of file @file{in.mkv} to a list of segments
 | 
			
		||||
To remux the content of file @file{in.mkv} to a list of segments
 | 
			
		||||
@file{out-000.nut}, @file{out-001.nut}, etc., and write the list of
 | 
			
		||||
generated segments to @file{out.list}:
 | 
			
		||||
@example
 | 
			
		||||
@@ -964,20 +908,14 @@ ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.list out%03d.nu
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@item
 | 
			
		||||
Segment input and set output format options for the output segments:
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -i in.mkv -f segment -segment_time 10 -segment_format_options movflags=+faststart out%03d.mp4
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@item
 | 
			
		||||
Segment the input file according to the split points specified by the
 | 
			
		||||
@var{segment_times} option:
 | 
			
		||||
As the example above, but segment the input file according to the split
 | 
			
		||||
points specified by the @var{segment_times} option:
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 out%03d.nut
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@item
 | 
			
		||||
Use the @command{ffmpeg} @option{force_key_frames}
 | 
			
		||||
As the example above, but use the @command{ffmpeg} @option{force_key_frames}
 | 
			
		||||
option to force key frames in the input at the specified location, together
 | 
			
		||||
with the segment option @option{segment_time_delta} to account for
 | 
			
		||||
possible roundings operated when setting key frame times.
 | 
			
		||||
@@ -996,7 +934,7 @@ ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_fr
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@item
 | 
			
		||||
Convert the @file{in.mkv} to TS segments using the @code{libx264}
 | 
			
		||||
To convert the @file{in.mkv} to TS segments using the @code{libx264}
 | 
			
		||||
and @code{libfaac} encoders:
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -i in.mkv -map 0 -codec:v libx264 -codec:a libfaac -f ssegment -segment_list out.list out%03d.ts
 | 
			
		||||
@@ -1011,28 +949,6 @@ ffmpeg -re -i in.mkv -codec copy -map 0 -f segment -segment_list playlist.m3u8 \
 | 
			
		||||
@end example
 | 
			
		||||
@end itemize
 | 
			
		||||
 | 
			
		||||
@section smoothstreaming
 | 
			
		||||
 | 
			
		||||
Smooth Streaming muxer generates a set of files (Manifest, chunks) suitable for serving with conventional web server.
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
@item window_size
 | 
			
		||||
Specify the number of fragments kept in the manifest. Default 0 (keep all).
 | 
			
		||||
 | 
			
		||||
@item extra_window_size
 | 
			
		||||
Specify the number of fragments kept outside of the manifest before removing from disk. Default 5.
 | 
			
		||||
 | 
			
		||||
@item lookahead_count
 | 
			
		||||
Specify the number of lookahead fragments. Default 2.
 | 
			
		||||
 | 
			
		||||
@item min_frag_duration
 | 
			
		||||
Specify the minimum fragment duration (in microseconds). Default 5000000.
 | 
			
		||||
 | 
			
		||||
@item remove_at_exit
 | 
			
		||||
Specify whether to remove all fragments when finished. Default 0 (do not remove).
 | 
			
		||||
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@section tee
 | 
			
		||||
 | 
			
		||||
The tee muxer can be used to write the same data to several files or any
 | 
			
		||||
@@ -1070,7 +986,7 @@ It is possible to specify to which streams a given bitstream filter
 | 
			
		||||
applies, by appending a stream specifier to the option separated by
 | 
			
		||||
@code{/}. @var{spec} must be a stream specifier (see @ref{Format
 | 
			
		||||
stream specifiers}).  If the stream specifier is not specified, the
 | 
			
		||||
bitstream filters will be applied to all streams in the output.
 | 
			
		||||
bistream filters will be applied to all streams in the output.
 | 
			
		||||
 | 
			
		||||
Several bitstream filters can be specified, separated by ",".
 | 
			
		||||
 | 
			
		||||
@@ -1117,34 +1033,4 @@ Note: some codecs may need different options depending on the output format;
 | 
			
		||||
the auto-detection of this can not work with the tee muxer. The main example
 | 
			
		||||
is the @option{global_header} flag.
 | 
			
		||||
 | 
			
		||||
@section webm_dash_manifest
 | 
			
		||||
 | 
			
		||||
WebM DASH Manifest muxer.
 | 
			
		||||
 | 
			
		||||
This muxer implements the WebM DASH Manifest specification to generate the DASH manifest XML.
 | 
			
		||||
 | 
			
		||||
@subsection Options
 | 
			
		||||
 | 
			
		||||
This muxer supports the following options:
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
@item adaptation_sets
 | 
			
		||||
This option has the following syntax: "id=x,streams=a,b,c id=y,streams=d,e" where x and y are the
 | 
			
		||||
unique identifiers of the adaptation sets and a,b,c,d and e are the indices of the corresponding
 | 
			
		||||
audio and video streams. Any number of adaptation sets can be added using this option.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@subsection Example
 | 
			
		||||
@example
 | 
			
		||||
ffmpeg -f webm_dash_manifest -i video1.webm \
 | 
			
		||||
       -f webm_dash_manifest -i video2.webm \
 | 
			
		||||
       -f webm_dash_manifest -i audio1.webm \
 | 
			
		||||
       -f webm_dash_manifest -i audio2.webm \
 | 
			
		||||
       -map 0 -map 1 -map 2 -map 3 \
 | 
			
		||||
       -c copy \
 | 
			
		||||
       -f webm_dash_manifest \
 | 
			
		||||
       -adaptation_sets "id=0,streams=0,1 id=1,streams=2,3" \
 | 
			
		||||
       manifest.xml
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@c man end MUXERS
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										21
									
								
								doc/nut.texi
									
									
									
									
									
								
							
							
						
						
									
										21
									
								
								doc/nut.texi
									
									
									
									
									
								
							@@ -21,27 +21,6 @@ The official nut specification is at svn://svn.mplayerhq.hu/nut
 | 
			
		||||
In case of any differences between this text and the official specification,
 | 
			
		||||
the official specification shall prevail.
 | 
			
		||||
 | 
			
		||||
@chapter Modes
 | 
			
		||||
NUT has some variants signaled by using the flags field in its main header.
 | 
			
		||||
 | 
			
		||||
@multitable @columnfractions .4 .4
 | 
			
		||||
@item BROADCAST   @tab Extend the syncpoint to report the sender wallclock
 | 
			
		||||
@item PIPE        @tab Omit completely the syncpoint
 | 
			
		||||
@end multitable
 | 
			
		||||
 | 
			
		||||
@section BROADCAST
 | 
			
		||||
 | 
			
		||||
The BROADCAST variant provides a secondary time reference to facilitate
 | 
			
		||||
detecting endpoint latency and network delays.
 | 
			
		||||
It assumes all the endpoint clocks are syncronized.
 | 
			
		||||
To be used in real-time scenarios.
 | 
			
		||||
 | 
			
		||||
@section PIPE
 | 
			
		||||
 | 
			
		||||
The PIPE variant assumes NUT is used as non-seekable intermediate container,
 | 
			
		||||
by not using syncpoint removes unneeded overhead and reduces the overall
 | 
			
		||||
memory usage.
 | 
			
		||||
 | 
			
		||||
@chapter Container-specific codec tags
 | 
			
		||||
 | 
			
		||||
@section Generic raw YUVA formats
 | 
			
		||||
 
 | 
			
		||||
@@ -79,6 +79,9 @@ qpel{8,16}_mc??_old_c / *pixels{8,16}_l4
 | 
			
		||||
    Just used to work around a bug in an old libavcodec encoder version.
 | 
			
		||||
    Don't optimize them.
 | 
			
		||||
 | 
			
		||||
tpel_mc_func {put,avg}_tpel_pixels_tab
 | 
			
		||||
    Used only for SVQ3, so only optimize them if you need fast SVQ3 decoding.
 | 
			
		||||
 | 
			
		||||
add_bytes/diff_bytes
 | 
			
		||||
    For huffyuv only, optimize if you want a faster ffhuffyuv codec.
 | 
			
		||||
 | 
			
		||||
@@ -136,6 +139,9 @@ dct_unquantize_mpeg2
 | 
			
		||||
dct_unquantize_h263
 | 
			
		||||
    Used in MPEG-4/H.263 en/decoding.
 | 
			
		||||
 | 
			
		||||
FIXME remaining functions?
 | 
			
		||||
BTW, most of these functions are in dsputil.c/.h, some are in mpegvideo.c/.h.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Alignment:
 | 
			
		||||
@@ -262,6 +268,17 @@ CELL/SPU:
 | 
			
		||||
http://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/30B3520C93F437AB87257060006FFE5E/$file/Language_Extensions_for_CBEA_2.4.pdf
 | 
			
		||||
http://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/9F820A5FFA3ECE8C8725716A0062585F/$file/CBE_Handbook_v1.1_24APR2007_pub.pdf
 | 
			
		||||
 | 
			
		||||
SPARC-specific:
 | 
			
		||||
---------------
 | 
			
		||||
SPARC Joint Programming Specification (JPS1): Commonality
 | 
			
		||||
http://www.fujitsu.com/downloads/PRMPWR/JPS1-R1.0.4-Common-pub.pdf
 | 
			
		||||
 | 
			
		||||
UltraSPARC III Processor User's Manual (contains instruction timings)
 | 
			
		||||
http://www.sun.com/processors/manuals/USIIIv2.pdf
 | 
			
		||||
 | 
			
		||||
VIS Whitepaper (contains optimization guidelines)
 | 
			
		||||
http://www.sun.com/processors/vis/download/vis/vis_whitepaper.pdf
 | 
			
		||||
 | 
			
		||||
GCC asm links:
 | 
			
		||||
--------------
 | 
			
		||||
official doc but quite ugly
 | 
			
		||||
 
 | 
			
		||||
@@ -13,8 +13,8 @@ You can disable all the output devices using the configure option
 | 
			
		||||
option "--enable-outdev=@var{OUTDEV}", or you can disable a particular
 | 
			
		||||
input device using the option "--disable-outdev=@var{OUTDEV}".
 | 
			
		||||
 | 
			
		||||
The option "-devices" of the ff* tools will display the list of
 | 
			
		||||
enabled output devices.
 | 
			
		||||
The option "-formats" of the ff* tools will display the list of
 | 
			
		||||
enabled output devices (amongst the muxers).
 | 
			
		||||
 | 
			
		||||
A description of the currently available output devices follows.
 | 
			
		||||
 | 
			
		||||
@@ -220,11 +220,11 @@ This output device allows one to render to OpenGL context.
 | 
			
		||||
Context may be provided by application or default SDL window is created.
 | 
			
		||||
 | 
			
		||||
When device renders to external context, application must implement handlers for following messages:
 | 
			
		||||
@code{AV_DEV_TO_APP_CREATE_WINDOW_BUFFER} - create OpenGL context on current thread.
 | 
			
		||||
@code{AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER} - make OpenGL context current.
 | 
			
		||||
@code{AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER} - swap buffers.
 | 
			
		||||
@code{AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER} - destroy OpenGL context.
 | 
			
		||||
Application is also required to inform a device about current resolution by sending @code{AV_APP_TO_DEV_WINDOW_SIZE} message.
 | 
			
		||||
@code{AV_CTL_MESSAGE_CREATE_WINDOW_BUFFER} - create OpenGL context on current thread.
 | 
			
		||||
@code{AV_CTL_MESSAGE_PREPARE_WINDOW_BUFFER} - make OpenGL context current.
 | 
			
		||||
@code{AV_CTL_MESSAGE_DISPLAY_WINDOW_BUFFER} - swap buffers.
 | 
			
		||||
@code{AV_CTL_MESSAGE_DESTROY_WINDOW_BUFFER} - destroy OpenGL context.
 | 
			
		||||
Application is also required to inform a device about current resolution by sending @code{AV_DEVICE_WINDOW_RESIZED} message.
 | 
			
		||||
 | 
			
		||||
@subsection Options
 | 
			
		||||
@table @option
 | 
			
		||||
@@ -237,10 +237,6 @@ Application must provide OpenGL context and both @code{window_size_cb} and @code
 | 
			
		||||
@item window_title
 | 
			
		||||
Set the SDL window title, if not specified default to the filename specified for the output device.
 | 
			
		||||
Ignored when @option{no_window} is set.
 | 
			
		||||
@item window_size
 | 
			
		||||
Set preferred window size, can be a string of the form widthxheight or a video size abbreviation.
 | 
			
		||||
If not specified it defaults to the size of the input video, downscaled according to the aspect ratio.
 | 
			
		||||
Mostly usable when @option{no_window} is not set.
 | 
			
		||||
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@@ -294,20 +290,6 @@ When both options are provided then the highest value is used
 | 
			
		||||
are set to 0 (which is default), the device will use the default
 | 
			
		||||
PulseAudio duration value. By default PulseAudio set buffer duration
 | 
			
		||||
to around 2 seconds.
 | 
			
		||||
 | 
			
		||||
@item prebuf
 | 
			
		||||
Specify pre-buffering size in bytes. The server does not start with
 | 
			
		||||
playback before at least @option{prebuf} bytes are available in the
 | 
			
		||||
buffer. By default this option is initialized to the same value as
 | 
			
		||||
@option{buffer_size} or @option{buffer_duration} (whichever is bigger).
 | 
			
		||||
 | 
			
		||||
@item minreq
 | 
			
		||||
Specify minimum request size in bytes. The server does not request less
 | 
			
		||||
than @option{minreq} bytes from the client, instead waits until the buffer
 | 
			
		||||
is free enough to request more bytes at once. It is recommended to not set
 | 
			
		||||
this option, which will initialize this to a value that is deemed sensible
 | 
			
		||||
by the server.
 | 
			
		||||
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@subsection Examples
 | 
			
		||||
@@ -406,26 +388,19 @@ For example, @code{dual-headed:0.1} would specify screen 1 of display
 | 
			
		||||
Check the X11 specification for more detailed information about the
 | 
			
		||||
display name format.
 | 
			
		||||
 | 
			
		||||
@item window_id
 | 
			
		||||
When set to non-zero value then device doesn't create new window,
 | 
			
		||||
but uses existing one with provided @var{window_id}. By default
 | 
			
		||||
this options is set to zero and device creates its own window.
 | 
			
		||||
 | 
			
		||||
@item window_size
 | 
			
		||||
Set the created window size, can be a string of the form
 | 
			
		||||
@var{width}x@var{height} or a video size abbreviation. If not
 | 
			
		||||
specified it defaults to the size of the input video.
 | 
			
		||||
Ignored when @var{window_id} is set.
 | 
			
		||||
 | 
			
		||||
@item window_x
 | 
			
		||||
@item window_y
 | 
			
		||||
Set the X and Y window offsets for the created window. They are both
 | 
			
		||||
set to 0 by default. The values may be ignored by the window manager.
 | 
			
		||||
Ignored when @var{window_id} is set.
 | 
			
		||||
 | 
			
		||||
@item window_title
 | 
			
		||||
Set the window title, if not specified default to the filename
 | 
			
		||||
specified for the output device. Ignored when @var{window_id} is set.
 | 
			
		||||
specified for the output device.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
For more information about XVideo see @url{http://www.x.org/}.
 | 
			
		||||
 
 | 
			
		||||
@@ -24,20 +24,6 @@ If not, then you should install a different compiler that has no
 | 
			
		||||
hard-coded path to gas. In the worst case pass @code{--disable-asm}
 | 
			
		||||
to configure.
 | 
			
		||||
 | 
			
		||||
@section Advanced linking configuration
 | 
			
		||||
 | 
			
		||||
If you compiled FFmpeg libraries statically and you want to use them to
 | 
			
		||||
build your own shared library, you may need to force PIC support (with
 | 
			
		||||
@code{--enable-pic} during FFmpeg configure) and add the following option
 | 
			
		||||
to your project LDFLAGS:
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
-Wl,-Bsymbolic
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
If your target platform requires position independent binaries, you should
 | 
			
		||||
pass the correct linking flag (e.g. @code{-pie}) to @code{--extra-ldexeflags}.
 | 
			
		||||
 | 
			
		||||
@section BSD
 | 
			
		||||
 | 
			
		||||
BSD make will not build FFmpeg, you need to install and use GNU Make
 | 
			
		||||
@@ -66,14 +52,14 @@ unacelerated code.
 | 
			
		||||
 | 
			
		||||
Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from
 | 
			
		||||
@url{https://github.com/FFmpeg/gas-preprocessor} or
 | 
			
		||||
@url{https://github.com/yuvi/gas-preprocessor}(currently outdated) to build the optimized
 | 
			
		||||
assembly functions. Put the Perl script somewhere
 | 
			
		||||
@url{http://github.com/yuvi/gas-preprocessor} to build the optimized
 | 
			
		||||
assembler functions. Put the Perl script somewhere
 | 
			
		||||
in your PATH, FFmpeg's configure will pick it up automatically.
 | 
			
		||||
 | 
			
		||||
Mac OS X on amd64 and x86 requires @command{yasm} to build most of the
 | 
			
		||||
optimized assembly functions. @uref{http://www.finkproject.org/, Fink},
 | 
			
		||||
optimized assembler functions. @uref{http://www.finkproject.org/, Fink},
 | 
			
		||||
@uref{http://www.gentoo.org/proj/en/gentoo-alt/prefix/bootstrap-macos.xml, Gentoo Prefix},
 | 
			
		||||
@uref{https://mxcl.github.com/homebrew/, Homebrew}
 | 
			
		||||
@uref{http://mxcl.github.com/homebrew/, Homebrew}
 | 
			
		||||
or @uref{http://www.macports.org, MacPorts} can easily provide it.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -278,7 +264,7 @@ llrint() in its C library.
 | 
			
		||||
Install your Cygwin with all the "Base" packages, plus the
 | 
			
		||||
following "Devel" ones:
 | 
			
		||||
@example
 | 
			
		||||
binutils, gcc4-core, make, git, mingw-runtime, texinfo
 | 
			
		||||
binutils, gcc4-core, make, git, mingw-runtime, texi2html
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
In order to run FATE you will also need the following "Utils" packages:
 | 
			
		||||
 
 | 
			
		||||
@@ -166,7 +166,7 @@ This protocol accepts the following options.
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
@item timeout
 | 
			
		||||
Set timeout in microseconds of socket I/O operations used by the underlying low level
 | 
			
		||||
Set timeout of socket I/O operations used by the underlying low level
 | 
			
		||||
operation. By default it is set to -1, which means that the timeout is
 | 
			
		||||
not specified.
 | 
			
		||||
 | 
			
		||||
@@ -244,7 +244,7 @@ Override the User-Agent header. If not specified the protocol will use a
 | 
			
		||||
string describing the libavformat build. ("Lavf/<version>")
 | 
			
		||||
 | 
			
		||||
@item timeout
 | 
			
		||||
Set timeout in microseconds of socket I/O operations used by the underlying low level
 | 
			
		||||
Set timeout of socket I/O operations used by the underlying low level
 | 
			
		||||
operation. By default it is set to -1, which means that the timeout is
 | 
			
		||||
not specified.
 | 
			
		||||
 | 
			
		||||
@@ -255,7 +255,7 @@ Export the MIME type.
 | 
			
		||||
If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
 | 
			
		||||
supports this, the metadata has to be retrieved by the application by reading
 | 
			
		||||
the @option{icy_metadata_headers} and @option{icy_metadata_packet} options.
 | 
			
		||||
The default is 1.
 | 
			
		||||
The default is 0.
 | 
			
		||||
 | 
			
		||||
@item icy_metadata_headers
 | 
			
		||||
If the server supports ICY metadata, this contains the ICY-specific HTTP reply
 | 
			
		||||
@@ -293,50 +293,6 @@ The required syntax to play a stream specifying a cookie is:
 | 
			
		||||
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@section Icecast
 | 
			
		||||
 | 
			
		||||
Icecast protocol (stream to Icecast servers)
 | 
			
		||||
 | 
			
		||||
This protocol accepts the following options:
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
@item ice_genre
 | 
			
		||||
Set the stream genre.
 | 
			
		||||
 | 
			
		||||
@item ice_name
 | 
			
		||||
Set the stream name.
 | 
			
		||||
 | 
			
		||||
@item ice_description
 | 
			
		||||
Set the stream description.
 | 
			
		||||
 | 
			
		||||
@item ice_url
 | 
			
		||||
Set the stream website URL.
 | 
			
		||||
 | 
			
		||||
@item ice_public
 | 
			
		||||
Set if the stream should be public.
 | 
			
		||||
The default is 0 (not public).
 | 
			
		||||
 | 
			
		||||
@item user_agent
 | 
			
		||||
Override the User-Agent header. If not specified a string of the form
 | 
			
		||||
"Lavf/<version>" will be used.
 | 
			
		||||
 | 
			
		||||
@item password
 | 
			
		||||
Set the Icecast mountpoint password.
 | 
			
		||||
 | 
			
		||||
@item content_type
 | 
			
		||||
Set the stream content type. This must be set if it is different from
 | 
			
		||||
audio/mpeg.
 | 
			
		||||
 | 
			
		||||
@item legacy_icecast
 | 
			
		||||
This enables support for Icecast versions < 2.4.0, that do not support the
 | 
			
		||||
HTTP PUT method but the SOURCE method.
 | 
			
		||||
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
icecast://[@var{username}[:@var{password}]@@]@var{server}:@var{port}/@var{mountpoint}
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
@section mmst
 | 
			
		||||
 | 
			
		||||
MMS (Microsoft Media Server) protocol over TCP.
 | 
			
		||||
@@ -581,35 +537,6 @@ The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
 | 
			
		||||
for streaming multimedia content within HTTPS requests to traverse
 | 
			
		||||
firewalls.
 | 
			
		||||
 | 
			
		||||
@section libsmbclient
 | 
			
		||||
 | 
			
		||||
libsmbclient permits one to manipulate CIFS/SMB network resources.
 | 
			
		||||
 | 
			
		||||
Following syntax is required.
 | 
			
		||||
 | 
			
		||||
@example
 | 
			
		||||
smb://[[domain:]user[:password@@]]server[/share[/path[/file]]]
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
This protocol accepts the following options.
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
@item timeout
 | 
			
		||||
Set timeout in miliseconds of socket I/O operations used by the underlying
 | 
			
		||||
low level operation. By default it is set to -1, which means that the timeout
 | 
			
		||||
is not specified.
 | 
			
		||||
 | 
			
		||||
@item truncate
 | 
			
		||||
Truncate existing files on write, if set to 1. A value of 0 prevents
 | 
			
		||||
truncating. Default value is 1.
 | 
			
		||||
 | 
			
		||||
@item workgroup
 | 
			
		||||
Set the workgroup used for making connections. By default workgroup is not specified.
 | 
			
		||||
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
For more information see: @url{http://www.samba.org/}.
 | 
			
		||||
 | 
			
		||||
@section libssh
 | 
			
		||||
 | 
			
		||||
Secure File Transfer Protocol via libssh
 | 
			
		||||
@@ -764,7 +691,7 @@ data transferred over RDT).
 | 
			
		||||
 | 
			
		||||
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
 | 
			
		||||
supporting it (currently Darwin Streaming Server and Mischa Spiegelmock's
 | 
			
		||||
@uref{https://github.com/revmischa/rtsp-server, RTSP server}).
 | 
			
		||||
@uref{http://github.com/revmischa/rtsp-server, RTSP server}).
 | 
			
		||||
 | 
			
		||||
The required syntax for a RTSP url is:
 | 
			
		||||
@example
 | 
			
		||||
@@ -783,7 +710,7 @@ Do not start playing the stream immediately if set to 1. Default value
 | 
			
		||||
is 0.
 | 
			
		||||
 | 
			
		||||
@item rtsp_transport
 | 
			
		||||
Set RTSP transport protocols.
 | 
			
		||||
Set RTSP trasport protocols.
 | 
			
		||||
 | 
			
		||||
It accepts the following values:
 | 
			
		||||
@table @samp
 | 
			
		||||
@@ -815,8 +742,6 @@ The following values are accepted:
 | 
			
		||||
Accept packets only from negotiated peer address and port.
 | 
			
		||||
@item listen
 | 
			
		||||
Act as a server, listening for an incoming connection.
 | 
			
		||||
@item prefer_tcp
 | 
			
		||||
Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
Default value is @samp{none}.
 | 
			
		||||
@@ -842,17 +767,17 @@ Set maximum local UDP port. Default value is 65000.
 | 
			
		||||
@item timeout
 | 
			
		||||
Set maximum timeout (in seconds) to wait for incoming connections.
 | 
			
		||||
 | 
			
		||||
A value of -1 means infinite (default). This option implies the
 | 
			
		||||
A value of -1 mean infinite (default). This option implies the
 | 
			
		||||
@option{rtsp_flags} set to @samp{listen}.
 | 
			
		||||
 | 
			
		||||
@item reorder_queue_size
 | 
			
		||||
Set number of packets to buffer for handling of reordered packets.
 | 
			
		||||
 | 
			
		||||
@item stimeout
 | 
			
		||||
Set socket TCP I/O timeout in microseconds.
 | 
			
		||||
Set socket TCP I/O timeout in micro seconds.
 | 
			
		||||
 | 
			
		||||
@item user-agent
 | 
			
		||||
Override User-Agent header. If not specified, it defaults to the
 | 
			
		||||
Override User-Agent header. If not specified, it default to the
 | 
			
		||||
libavformat identifier string.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@@ -1033,30 +958,6 @@ this binary block are used as master key, the following 14 bytes are
 | 
			
		||||
used as master salt.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@section subfile
 | 
			
		||||
 | 
			
		||||
Virtually extract a segment of a file or another stream.
 | 
			
		||||
The underlying stream must be seekable.
 | 
			
		||||
 | 
			
		||||
Accepted options:
 | 
			
		||||
@table @option
 | 
			
		||||
@item start
 | 
			
		||||
Start offset of the extracted segment, in bytes.
 | 
			
		||||
@item end
 | 
			
		||||
End offset of the extracted segment, in bytes.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
Examples:
 | 
			
		||||
 | 
			
		||||
Extract a chapter from a DVD VOB file (start and end sectors obtained
 | 
			
		||||
externally and multiplied by 2048):
 | 
			
		||||
@example
 | 
			
		||||
subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
 | 
			
		||||
@end example
 | 
			
		||||
 | 
			
		||||
Play an AVI file directly from a TAR archive:
 | 
			
		||||
subfile,,start,183241728,end,366490624,,:archive.tar
 | 
			
		||||
 | 
			
		||||
@section tcp
 | 
			
		||||
 | 
			
		||||
Transmission Control Protocol.
 | 
			
		||||
@@ -1081,8 +982,8 @@ Set raise error timeout, expressed in microseconds.
 | 
			
		||||
This option is only relevant in read mode: if no data arrived in more
 | 
			
		||||
than this time interval, raise error.
 | 
			
		||||
 | 
			
		||||
@item listen_timeout=@var{milliseconds}
 | 
			
		||||
Set listen timeout, expressed in milliseconds.
 | 
			
		||||
@item listen_timeout=@var{microseconds}
 | 
			
		||||
Set listen timeout, expressed in microseconds.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
The following example shows how to setup a listening TCP connection
 | 
			
		||||
@@ -1173,9 +1074,8 @@ The list of supported options follows.
 | 
			
		||||
 | 
			
		||||
@table @option
 | 
			
		||||
@item buffer_size=@var{size}
 | 
			
		||||
Set the UDP maximum socket buffer size in bytes. This is used to set either
 | 
			
		||||
the receive or send buffer size, depending on what the socket is used for.
 | 
			
		||||
Default is 64KB.  See also @var{fifo_size}.
 | 
			
		||||
Set the UDP socket buffer size in bytes. This is used both for the
 | 
			
		||||
receiving and the sending buffer size.
 | 
			
		||||
 | 
			
		||||
@item localport=@var{port}
 | 
			
		||||
Override the local UDP port to bind with.
 | 
			
		||||
@@ -1226,12 +1126,6 @@ Set raise error timeout, expressed in microseconds.
 | 
			
		||||
 | 
			
		||||
This option is only relevant in read mode: if no data arrived in more
 | 
			
		||||
than this time interval, raise error.
 | 
			
		||||
 | 
			
		||||
@item broadcast=@var{1|0}
 | 
			
		||||
Explicitly allow or disallow UDP broadcasting.
 | 
			
		||||
 | 
			
		||||
Note that broadcasting may not work properly on networks having
 | 
			
		||||
a broadcast storm protection.
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@subsection Examples
 | 
			
		||||
 
 | 
			
		||||
@@ -112,14 +112,6 @@ bayer dither
 | 
			
		||||
 | 
			
		||||
@item ed
 | 
			
		||||
error diffusion dither
 | 
			
		||||
 | 
			
		||||
@item a_dither
 | 
			
		||||
arithmetic dither, based using addition
 | 
			
		||||
 | 
			
		||||
@item x_dither
 | 
			
		||||
arithmetic dither, based using xor (more random/less apparent patterning that
 | 
			
		||||
a_dither).
 | 
			
		||||
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@end table
 | 
			
		||||
 
 | 
			
		||||
@@ -618,6 +618,7 @@ flip wavelet?
 | 
			
		||||
try to use the wavelet transformed predicted image (motion compensated image) as context for coding the residual coefficients
 | 
			
		||||
try the MV length as context for coding the residual coefficients
 | 
			
		||||
use extradata for stuff which is in the keyframes now?
 | 
			
		||||
the MV median predictor is patented IIRC
 | 
			
		||||
implement per picture halfpel interpolation
 | 
			
		||||
try different range coder state transition tables for different contexts
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										23
									
								
								doc/style.min.css
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										23
									
								
								doc/style.min.css
									
									
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										59
									
								
								doc/t2h.init
									
									
									
									
									
								
							
							
						
						
									
										59
									
								
								doc/t2h.init
									
									
									
									
									
								
							@@ -1,35 +1,26 @@
 | 
			
		||||
# Init file for texi2html.
 | 
			
		||||
 | 
			
		||||
# This is deprecated, and the makeinfo/texi2any version is doc/t2h.pm
 | 
			
		||||
 | 
			
		||||
# no horiz rules between sections
 | 
			
		||||
$end_section = \&FFmpeg_end_section;
 | 
			
		||||
sub FFmpeg_end_section($$)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
my $TEMPLATE_HEADER1 = $ENV{"FFMPEG_HEADER1"} || <<EOT;
 | 
			
		||||
<!DOCTYPE html>
 | 
			
		||||
<html lang="en">
 | 
			
		||||
  <head>
 | 
			
		||||
    <meta charset="utf-8" />
 | 
			
		||||
    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
 | 
			
		||||
    <title>FFmpeg documentation</title>
 | 
			
		||||
    <link rel="stylesheet" href="bootstrap.min.css" />
 | 
			
		||||
    <link rel="stylesheet" href="style.min.css" />
 | 
			
		||||
$EXTRA_HEAD =
 | 
			
		||||
'<link rel="icon" href="favicon.png" type="image/png" />
 | 
			
		||||
';
 | 
			
		||||
 | 
			
		||||
$CSS_LINES = $ENV{"FFMPEG_CSS"} || <<EOT;
 | 
			
		||||
<link rel="stylesheet" type="text/css" href="default.css" />
 | 
			
		||||
EOT
 | 
			
		||||
 | 
			
		||||
my $TEMPLATE_HEADER2 = $ENV{"FFMPEG_HEADER2"} || <<EOT;
 | 
			
		||||
  </head>
 | 
			
		||||
  <body>
 | 
			
		||||
    <div style="width: 95%; margin: auto">
 | 
			
		||||
my $TEMPLATE_HEADER = $ENV{"FFMPEG_HEADER"} || <<EOT;
 | 
			
		||||
<link rel="icon" href="favicon.png" type="image/png" />
 | 
			
		||||
</head>
 | 
			
		||||
<body>
 | 
			
		||||
<div id="container">
 | 
			
		||||
<div id="body">
 | 
			
		||||
EOT
 | 
			
		||||
 | 
			
		||||
my $TEMPLATE_FOOTER = $ENV{"FFMPEG_FOOTER"} || <<EOT;
 | 
			
		||||
    </div>
 | 
			
		||||
  </body>
 | 
			
		||||
</html>
 | 
			
		||||
EOT
 | 
			
		||||
$PRE_BODY_CLOSE = '</div></div>';
 | 
			
		||||
 | 
			
		||||
$SMALL_RULE = '';
 | 
			
		||||
$BODYTEXT = '';
 | 
			
		||||
@@ -91,25 +82,21 @@ sub FFmpeg_print_page_head($$)
 | 
			
		||||
    $longtitle = "FFmpeg documentation : " . $longtitle;
 | 
			
		||||
 | 
			
		||||
    print $fh <<EOT;
 | 
			
		||||
$TEMPLATE_HEADER1
 | 
			
		||||
$description
 | 
			
		||||
<meta name="keywords" content="$longtitle">
 | 
			
		||||
<meta name="Generator" content="$Texi2HTML::THISDOC{program}">
 | 
			
		||||
<!DOCTYPE html>
 | 
			
		||||
<html>
 | 
			
		||||
$Texi2HTML::THISDOC{'copying'}<!-- Created on $Texi2HTML::THISDOC{today} by $Texi2HTML::THISDOC{program} -->
 | 
			
		||||
<!--
 | 
			
		||||
$Texi2HTML::THISDOC{program_authors}
 | 
			
		||||
-->
 | 
			
		||||
$encoding
 | 
			
		||||
$TEMPLATE_HEADER2
 | 
			
		||||
EOT
 | 
			
		||||
}
 | 
			
		||||
<head>
 | 
			
		||||
<title>$longtitle</title>
 | 
			
		||||
 | 
			
		||||
$print_page_foot = \&FFmpeg_print_page_foot;
 | 
			
		||||
sub FFmpeg_print_page_foot($$)
 | 
			
		||||
{
 | 
			
		||||
    my $fh = shift;
 | 
			
		||||
    print $fh <<EOT;
 | 
			
		||||
$TEMPLATE_FOOTER
 | 
			
		||||
$description
 | 
			
		||||
<meta name="keywords" content="$longtitle">
 | 
			
		||||
<meta name="Generator" content="$Texi2HTML::THISDOC{program}">
 | 
			
		||||
$encoding
 | 
			
		||||
$CSS_LINES
 | 
			
		||||
$TEMPLATE_HEADER
 | 
			
		||||
EOT
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										221
									
								
								doc/t2h.pm
									
									
									
									
									
								
							
							
						
						
									
										221
									
								
								doc/t2h.pm
									
									
									
									
									
								
							@@ -1,221 +0,0 @@
 | 
			
		||||
# makeinfo HTML output init file
 | 
			
		||||
#
 | 
			
		||||
# Copyright (c) 2011, 2012 Free Software Foundation, Inc.
 | 
			
		||||
# Copyright (c) 2014 Andreas Cadhalpun
 | 
			
		||||
# Copyright (c) 2014 Tiancheng "Timothy" Gu
 | 
			
		||||
#
 | 
			
		||||
# This file is part of FFmpeg.
 | 
			
		||||
#
 | 
			
		||||
# FFmpeg is free software; you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU General Public License as published by
 | 
			
		||||
# the Free Software Foundation; either version 3 of the License, or
 | 
			
		||||
# (at your option) any later version.
 | 
			
		||||
#
 | 
			
		||||
# FFmpeg is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 | 
			
		||||
# General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU General Public
 | 
			
		||||
# License along with FFmpeg; if not, write to the Free Software
 | 
			
		||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 | 
			
		||||
 | 
			
		||||
# no navigation elements
 | 
			
		||||
set_from_init_file('HEADERS', 0);
 | 
			
		||||
 | 
			
		||||
# TOC and Chapter headings link
 | 
			
		||||
set_from_init_file('TOC_LINKS', 1);
 | 
			
		||||
 | 
			
		||||
# print the TOC where @contents is used
 | 
			
		||||
set_from_init_file('INLINE_CONTENTS', 1);
 | 
			
		||||
 | 
			
		||||
# make chapters <h2>
 | 
			
		||||
set_from_init_file('CHAPTER_HEADER_LEVEL', 2);
 | 
			
		||||
 | 
			
		||||
# Do not add <hr>
 | 
			
		||||
set_from_init_file('DEFAULT_RULE', '');
 | 
			
		||||
set_from_init_file('BIG_RULE', '');
 | 
			
		||||
 | 
			
		||||
# Customized file beginning
 | 
			
		||||
sub ffmpeg_begin_file($$$)
 | 
			
		||||
{
 | 
			
		||||
    my $self = shift;
 | 
			
		||||
    my $filename = shift;
 | 
			
		||||
    my $element = shift;
 | 
			
		||||
 | 
			
		||||
    my $command;
 | 
			
		||||
    if ($element and $self->get_conf('SPLIT')) {
 | 
			
		||||
        $command = $self->element_command($element);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    my ($title, $description, $encoding, $date, $css_lines,
 | 
			
		||||
        $doctype, $bodytext, $copying_comment, $after_body_open,
 | 
			
		||||
        $extra_head, $program_and_version, $program_homepage,
 | 
			
		||||
        $program, $generator) = $self->_file_header_informations($command);
 | 
			
		||||
 | 
			
		||||
    my $links = $self->_get_links ($filename, $element);
 | 
			
		||||
 | 
			
		||||
    my $head1 = $ENV{"FFMPEG_HEADER1"} || <<EOT;
 | 
			
		||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
 | 
			
		||||
<html>
 | 
			
		||||
<!-- Created by $program_and_version, $program_homepage -->
 | 
			
		||||
  <head>
 | 
			
		||||
    <meta charset="utf-8">
 | 
			
		||||
    <title>
 | 
			
		||||
EOT
 | 
			
		||||
    my $head_title = <<EOT;
 | 
			
		||||
      $title
 | 
			
		||||
EOT
 | 
			
		||||
 | 
			
		||||
    my $head2 = $ENV{"FFMPEG_HEADER2"} || <<EOT;
 | 
			
		||||
    </title>
 | 
			
		||||
    <link rel="stylesheet" type="text/css" href="bootstrap.min.css">
 | 
			
		||||
    <link rel="stylesheet" type="text/css" href="style.min.css">
 | 
			
		||||
  </head>
 | 
			
		||||
  <body>
 | 
			
		||||
    <div style="width: 95%; margin: auto">
 | 
			
		||||
      <h1>
 | 
			
		||||
EOT
 | 
			
		||||
 | 
			
		||||
    my $head3 = $ENV{"FFMPEG_HEADER3"} || <<EOT;
 | 
			
		||||
      </h1>
 | 
			
		||||
EOT
 | 
			
		||||
 | 
			
		||||
    return $head1 . $head_title . $head2 . $head_title . $head3;
 | 
			
		||||
}
 | 
			
		||||
texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file);
 | 
			
		||||
 | 
			
		||||
# Customized file ending
 | 
			
		||||
sub ffmpeg_end_file($)
 | 
			
		||||
{
 | 
			
		||||
    my $self = shift;
 | 
			
		||||
    my $program_string = &{$self->{'format_program_string'}}($self);
 | 
			
		||||
    my $program_text = <<EOT;
 | 
			
		||||
      <p style="font-size: small;">
 | 
			
		||||
        $program_string
 | 
			
		||||
      </p>
 | 
			
		||||
EOT
 | 
			
		||||
    my $footer = $ENV{FFMPEG_FOOTER} || <<EOT;
 | 
			
		||||
    </div>
 | 
			
		||||
  </body>
 | 
			
		||||
</html>
 | 
			
		||||
EOT
 | 
			
		||||
    return $program_text . $footer;
 | 
			
		||||
}
 | 
			
		||||
texinfo_register_formatting_function('end_file', \&ffmpeg_end_file);
 | 
			
		||||
 | 
			
		||||
# Dummy title command
 | 
			
		||||
# Ignore title. Title is handled through ffmpeg_begin_file().
 | 
			
		||||
set_from_init_file('USE_TITLEPAGE_FOR_TITLE', 1);
 | 
			
		||||
sub ffmpeg_title($$$$)
 | 
			
		||||
{
 | 
			
		||||
    return '';
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
texinfo_register_command_formatting('titlefont',
 | 
			
		||||
                                    \&ffmpeg_title);
 | 
			
		||||
 | 
			
		||||
# Customized float command. Part of code borrowed from GNU Texinfo.
 | 
			
		||||
sub ffmpeg_float($$$$$)
 | 
			
		||||
{
 | 
			
		||||
    my $self = shift;
 | 
			
		||||
    my $cmdname = shift;
 | 
			
		||||
    my $command = shift;
 | 
			
		||||
    my $args = shift;
 | 
			
		||||
    my $content = shift;
 | 
			
		||||
 | 
			
		||||
    my ($caption, $prepended) = Texinfo::Common::float_name_caption($self,
 | 
			
		||||
                                                                $command);
 | 
			
		||||
    my $caption_text = '';
 | 
			
		||||
    my $prepended_text;
 | 
			
		||||
    my $prepended_save = '';
 | 
			
		||||
 | 
			
		||||
    if ($self->in_string()) {
 | 
			
		||||
        if ($prepended) {
 | 
			
		||||
            $prepended_text = $self->convert_tree_new_formatting_context(
 | 
			
		||||
                $prepended, 'float prepended');
 | 
			
		||||
        } else {
 | 
			
		||||
            $prepended_text = '';
 | 
			
		||||
        }
 | 
			
		||||
        if ($caption) {
 | 
			
		||||
            $caption_text = $self->convert_tree_new_formatting_context(
 | 
			
		||||
                {'contents' => $caption->{'args'}->[0]->{'contents'}},
 | 
			
		||||
                'float caption');
 | 
			
		||||
        }
 | 
			
		||||
        return $prepended.$content.$caption_text;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    my $id = $self->command_id($command);
 | 
			
		||||
    my $label;
 | 
			
		||||
    if (defined($id) and $id ne '') {
 | 
			
		||||
        $label = "<a name=\"$id\"></a>";
 | 
			
		||||
    } else {
 | 
			
		||||
        $label = '';
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if ($prepended) {
 | 
			
		||||
        if ($caption) {
 | 
			
		||||
            # prepend the prepended tree to the first paragraph
 | 
			
		||||
            my @caption_original_contents = @{$caption->{'args'}->[0]->{'contents'}};
 | 
			
		||||
            my @caption_contents;
 | 
			
		||||
            my $new_paragraph;
 | 
			
		||||
            while (@caption_original_contents) {
 | 
			
		||||
                my $content = shift @caption_original_contents;
 | 
			
		||||
                if ($content->{'type'} and $content->{'type'} eq 'paragraph') {
 | 
			
		||||
                    %{$new_paragraph} = %{$content};
 | 
			
		||||
                    $new_paragraph->{'contents'} = [@{$content->{'contents'}}];
 | 
			
		||||
                    unshift (@{$new_paragraph->{'contents'}}, {'cmdname' => 'strong',
 | 
			
		||||
                             'args' => [{'type' => 'brace_command_arg',
 | 
			
		||||
                                                    'contents' => [$prepended]}]});
 | 
			
		||||
                    push @caption_contents, $new_paragraph;
 | 
			
		||||
                    last;
 | 
			
		||||
                } else {
 | 
			
		||||
                    push @caption_contents, $content;
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
            push @caption_contents, @caption_original_contents;
 | 
			
		||||
            if ($new_paragraph) {
 | 
			
		||||
                $caption_text = $self->convert_tree_new_formatting_context(
 | 
			
		||||
                 {'contents' => \@caption_contents}, 'float caption');
 | 
			
		||||
                $prepended_text = '';
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        if ($caption_text eq '') {
 | 
			
		||||
            $prepended_text = $self->convert_tree_new_formatting_context(
 | 
			
		||||
                $prepended, 'float prepended');
 | 
			
		||||
            if ($prepended_text ne '') {
 | 
			
		||||
                $prepended_save = $prepended_text;
 | 
			
		||||
                $prepended_text = '<p><strong>'.$prepended_text.'</strong></p>';
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    } else {
 | 
			
		||||
        $prepended_text = '';
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if ($caption and $caption_text eq '') {
 | 
			
		||||
        $caption_text = $self->convert_tree_new_formatting_context(
 | 
			
		||||
            $caption->{'args'}->[0], 'float caption');
 | 
			
		||||
    }
 | 
			
		||||
    if ($prepended_text.$caption_text ne '') {
 | 
			
		||||
        $prepended_text = $self->_attribute_class('div','float-caption'). '>'
 | 
			
		||||
                . $prepended_text;
 | 
			
		||||
        $caption_text .= '</div>';
 | 
			
		||||
    }
 | 
			
		||||
    my $html_class = '';
 | 
			
		||||
    if ($prepended_save =~ /NOTE/) {
 | 
			
		||||
        $html_class = 'info';
 | 
			
		||||
        $prepended_text = '';
 | 
			
		||||
        $caption_text   = '';
 | 
			
		||||
    } elsif ($prepended_save =~ /IMPORTANT/) {
 | 
			
		||||
        $html_class = 'warning';
 | 
			
		||||
        $prepended_text = '';
 | 
			
		||||
        $caption_text   = '';
 | 
			
		||||
    }
 | 
			
		||||
    return $self->_attribute_class('div', $html_class). '>' . "\n" .
 | 
			
		||||
        $prepended_text . $caption_text . $content . '</div>';
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
texinfo_register_command_formatting('float',
 | 
			
		||||
                                    \&ffmpeg_float);
 | 
			
		||||
 | 
			
		||||
1;
 | 
			
		||||
@@ -282,14 +282,6 @@ INF: while(<$inf>) {
 | 
			
		||||
        $_ = "\n=over 4\n";
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    /^\@(multitable)\s+{.*/ and do {
 | 
			
		||||
        push @endwstack, $endw;
 | 
			
		||||
        push @icstack, $ic;
 | 
			
		||||
        $endw = $1;
 | 
			
		||||
        $ic = "";
 | 
			
		||||
        $_ = "\n=over 4\n";
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    /^\@((?:small)?example|display)/ and do {
 | 
			
		||||
        push @endwstack, $endw;
 | 
			
		||||
        $endw = $1;
 | 
			
		||||
@@ -306,10 +298,10 @@ INF: while(<$inf>) {
 | 
			
		||||
 | 
			
		||||
    /^\@tab\s+(.*\S)\s*$/ and $endw eq "multitable" and do {
 | 
			
		||||
        my $columns = $1;
 | 
			
		||||
        $columns =~ s/\@tab//;
 | 
			
		||||
        $columns =~ s/\@tab/ : /;
 | 
			
		||||
 | 
			
		||||
        $_ = $columns;
 | 
			
		||||
        $chapter =~ s/$//;
 | 
			
		||||
        $_ = " : ". $columns;
 | 
			
		||||
        $chapter =~ s/\n+\s+$//;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    /^\@itemx?\s*(.+)?$/ and do {
 | 
			
		||||
@@ -332,12 +324,12 @@ $inf = pop @instack;
 | 
			
		||||
 | 
			
		||||
die "No filename or title\n" unless defined $fn && defined $tl;
 | 
			
		||||
 | 
			
		||||
# always use utf8
 | 
			
		||||
print "=encoding utf8\n\n";
 | 
			
		||||
 | 
			
		||||
$chapters{NAME} = "$fn \- $tl\n";
 | 
			
		||||
$chapters{FOOTNOTES} .= "=back\n" if exists $chapters{FOOTNOTES};
 | 
			
		||||
 | 
			
		||||
# always use utf8
 | 
			
		||||
print "=encoding utf8\n\n";
 | 
			
		||||
 | 
			
		||||
unshift @chapters_sequence, "NAME";
 | 
			
		||||
for $chapter (@chapters_sequence) {
 | 
			
		||||
    if (exists $chapters{$chapter}) {
 | 
			
		||||
 
 | 
			
		||||
@@ -782,9 +782,6 @@ large numbers (usually 2^53 and larger).
 | 
			
		||||
Round the value of expression @var{expr} upwards to the nearest
 | 
			
		||||
integer. For example, "ceil(1.5)" is "2.0".
 | 
			
		||||
 | 
			
		||||
@item clip(x, min, max)
 | 
			
		||||
Return the value of @var{x} clipped between @var{min} and @var{max}.
 | 
			
		||||
 | 
			
		||||
@item cos(x)
 | 
			
		||||
Compute cosine of @var{x}.
 | 
			
		||||
 | 
			
		||||
@@ -844,7 +841,7 @@ Return 1.0 if @var{x} is +/-INFINITY, 0.0 otherwise.
 | 
			
		||||
Return 1.0 if @var{x} is NAN, 0.0 otherwise.
 | 
			
		||||
 | 
			
		||||
@item ld(var)
 | 
			
		||||
Load the value of the internal variable with number
 | 
			
		||||
Allow to load the value of the internal variable with number
 | 
			
		||||
@var{var}, which was previously stored with st(@var{var}, @var{expr}).
 | 
			
		||||
The function returns the loaded value.
 | 
			
		||||
 | 
			
		||||
@@ -912,7 +909,7 @@ Compute the square root of @var{expr}. This is equivalent to
 | 
			
		||||
Compute expression @code{1/(1 + exp(4*x))}.
 | 
			
		||||
 | 
			
		||||
@item st(var, expr)
 | 
			
		||||
Store the value of the expression @var{expr} in an internal
 | 
			
		||||
Allow to store the value of the expression @var{expr} in an internal
 | 
			
		||||
variable. @var{var} specifies the number of the variable where to
 | 
			
		||||
store the value, and it is a value ranging from 0 to 9. The function
 | 
			
		||||
returns the value stored in the internal variable.
 | 
			
		||||
@@ -1034,7 +1031,7 @@ indication of the corresponding powers of 10 and of 2.
 | 
			
		||||
10^24 / 2^70
 | 
			
		||||
@end table
 | 
			
		||||
 | 
			
		||||
@c man end EXPRESSION EVALUATION
 | 
			
		||||
@c man end
 | 
			
		||||
 | 
			
		||||
@chapter OpenCL Options
 | 
			
		||||
@c man begin OPENCL OPTIONS
 | 
			
		||||
 
 | 
			
		||||
@@ -1,424 +0,0 @@
 | 
			
		||||
This document is a tutorial/initiation for writing simple filters in
 | 
			
		||||
libavfilter.
 | 
			
		||||
 | 
			
		||||
Foreword: just like everything else in FFmpeg, libavfilter is monolithic, which
 | 
			
		||||
means that it is highly recommended that you submit your filters to the FFmpeg
 | 
			
		||||
development mailing-list and make sure it is applied. Otherwise, your filter is
 | 
			
		||||
likely to have a very short lifetime due to more a less regular internal API
 | 
			
		||||
changes, and a limited distribution, review, and testing.
 | 
			
		||||
 | 
			
		||||
Bootstrap
 | 
			
		||||
=========
 | 
			
		||||
 | 
			
		||||
Let's say you want to write a new simple video filter called "foobar" which
 | 
			
		||||
takes one frame in input, changes the pixels in whatever fashion you fancy, and
 | 
			
		||||
outputs the modified frame. The most simple way of doing this is to take a
 | 
			
		||||
similar filter.  We'll pick edgedetect, but any other should do. You can look
 | 
			
		||||
for others using the `./ffmpeg -v 0 -filters|grep ' V->V '` command.
 | 
			
		||||
 | 
			
		||||
 - cp libavfilter/vf_{edgedetect,foobar}.c
 | 
			
		||||
 - sed -i s/edgedetect/foobar/g -i libavfilter/vf_foobar.c
 | 
			
		||||
 - sed -i s/EdgeDetect/Foobar/g -i libavfilter/vf_foobar.c
 | 
			
		||||
 - edit libavfilter/Makefile, and add an entry for "foobar" following the
 | 
			
		||||
   pattern of the other filters.
 | 
			
		||||
 - edit libavfilter/allfilters.c, and add an entry for "foobar" following the
 | 
			
		||||
   pattern of the other filters.
 | 
			
		||||
 - ./configure ...
 | 
			
		||||
 - make -j<whatever> ffmpeg
 | 
			
		||||
 - ./ffmpeg -i tests/lena.pnm -vf foobar foobar.png
 | 
			
		||||
 | 
			
		||||
If everything went right, you should get a foobar.png with Lena edge-detected.
 | 
			
		||||
 | 
			
		||||
That's it, your new playground is ready.
 | 
			
		||||
 | 
			
		||||
Some little details about what's going on:
 | 
			
		||||
libavfilter/allfilters.c:avfilter_register_all() is called at runtime to create
 | 
			
		||||
a list of the available filters, but it's important to know that this file is
 | 
			
		||||
also parsed by the configure script, which in turn will define variables for
 | 
			
		||||
the build system and the C:
 | 
			
		||||
 | 
			
		||||
    --- after running configure ---
 | 
			
		||||
 | 
			
		||||
    $ grep FOOBAR config.mak
 | 
			
		||||
    CONFIG_FOOBAR_FILTER=yes
 | 
			
		||||
    $ grep FOOBAR config.h
 | 
			
		||||
    #define CONFIG_FOOBAR_FILTER 1
 | 
			
		||||
 | 
			
		||||
CONFIG_FOOBAR_FILTER=yes from the config.mak is later used to enable the filter in
 | 
			
		||||
libavfilter/Makefile and CONFIG_FOOBAR_FILTER=1 from the config.h will be used
 | 
			
		||||
for registering the filter in libavfilter/allfilters.c.
 | 
			
		||||
 | 
			
		||||
Filter code layout
 | 
			
		||||
==================
 | 
			
		||||
 | 
			
		||||
You now need some theory about the general code layout of a filter. Open your
 | 
			
		||||
libavfilter/vf_foobar.c. This section will detail the important parts of the
 | 
			
		||||
code you need to understand before messing with it.
 | 
			
		||||
 | 
			
		||||
Copyright
 | 
			
		||||
---------
 | 
			
		||||
 | 
			
		||||
First chunk is the copyright. Most filters are LGPL, and we are assuming
 | 
			
		||||
vf_foobar is as well. We are also assuming vf_foobar is not an edge detector
 | 
			
		||||
filter, so you can update the boilerplate with your credits.
 | 
			
		||||
 | 
			
		||||
Doxy
 | 
			
		||||
----
 | 
			
		||||
 | 
			
		||||
Next chunk is the Doxygen about the file. See http://ffmpeg.org/doxygen/trunk/.
 | 
			
		||||
Detail here what the filter is, does, and add some references if you feel like
 | 
			
		||||
it.
 | 
			
		||||
 | 
			
		||||
Context
 | 
			
		||||
-------
 | 
			
		||||
 | 
			
		||||
Skip the headers and scroll down to the definition of FoobarContext. This is
 | 
			
		||||
your local state context. It is already filled with 0 when you get it so do not
 | 
			
		||||
worry about uninitialized read into this context. This is where you put every
 | 
			
		||||
"global" information you need, typically the variable storing the user options.
 | 
			
		||||
You'll notice the first field "const AVClass *class"; it's the only field you
 | 
			
		||||
need to keep assuming you have a context. There are some magic you don't care
 | 
			
		||||
about around this field, just let it be (in first position) for now.
 | 
			
		||||
 | 
			
		||||
Options
 | 
			
		||||
-------
 | 
			
		||||
 | 
			
		||||
Then comes the options array. This is what will define the user accessible
 | 
			
		||||
options. For example, -vf foobar=mode=colormix:high=0.4:low=0.1. Most options
 | 
			
		||||
have the following pattern:
 | 
			
		||||
  name, description, offset, type, default value, minimum value, maximum value, flags
 | 
			
		||||
 | 
			
		||||
 - name is the option name, keep it simple, lowercase
 | 
			
		||||
 - description are short, in lowercase, without period, and describe what they
 | 
			
		||||
   do, for example "set the foo of the bar"
 | 
			
		||||
 - offset is the offset of the field in your local context, see the OFFSET()
 | 
			
		||||
   macro; the option parser will use that information to fill the fields
 | 
			
		||||
   according to the user input
 | 
			
		||||
 - type is any of AV_OPT_TYPE_* defined in libavutil/opt.h
 | 
			
		||||
 - default value is an union where you pick the appropriate type; "{.dbl=0.3}",
 | 
			
		||||
   "{.i64=0x234}", "{.str=NULL}", ...
 | 
			
		||||
 - min and max values define the range of available values, inclusive
 | 
			
		||||
 - flags are AVOption generic flags. See AV_OPT_FLAG_* definitions
 | 
			
		||||
 | 
			
		||||
In doubt, just look at the other AVOption definitions all around the codebase,
 | 
			
		||||
there are tons of examples.
 | 
			
		||||
 | 
			
		||||
Class
 | 
			
		||||
-----
 | 
			
		||||
 | 
			
		||||
AVFILTER_DEFINE_CLASS(foobar) will define a unique foobar_class with some kind
 | 
			
		||||
of signature referencing the options, etc. which will be referenced in the
 | 
			
		||||
definition of the AVFilter.
 | 
			
		||||
 | 
			
		||||
Filter definition
 | 
			
		||||
-----------------
 | 
			
		||||
 | 
			
		||||
At the end of the file, you will find foobar_inputs, foobar_outputs and
 | 
			
		||||
the AVFilter ff_vf_foobar. Don't forget to update the AVFilter.description with
 | 
			
		||||
a description of what the filter does, starting with a capitalized letter and
 | 
			
		||||
ending with a period. You'd better drop the AVFilter.flags entry for now, and
 | 
			
		||||
re-add them later depending on the capabilities of your filter.
 | 
			
		||||
 | 
			
		||||
Callbacks
 | 
			
		||||
---------
 | 
			
		||||
 | 
			
		||||
Let's now study the common callbacks. Before going into details, note that all
 | 
			
		||||
these callbacks are explained in details in libavfilter/avfilter.h, so in
 | 
			
		||||
doubt, refer to the doxy in that file.
 | 
			
		||||
 | 
			
		||||
init()
 | 
			
		||||
~~~~~~
 | 
			
		||||
 | 
			
		||||
First one to be called is init(). It's flagged as cold because not called
 | 
			
		||||
often. Look for "cold" on
 | 
			
		||||
http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html for more
 | 
			
		||||
information.
 | 
			
		||||
 | 
			
		||||
As the name suggests, init() is where you eventually initialize and allocate
 | 
			
		||||
your buffers, pre-compute your data, etc. Note that at this point, your local
 | 
			
		||||
context already has the user options initialized, but you still haven't any
 | 
			
		||||
clue about the kind of data input you will get, so this function is often
 | 
			
		||||
mainly used to sanitize the user options.
 | 
			
		||||
 | 
			
		||||
Some init()s will also define the number of inputs or outputs dynamically
 | 
			
		||||
according to the user options. A good example of this is the split filter, but
 | 
			
		||||
we won't cover this here since vf_foobar is just a simple 1:1 filter.
 | 
			
		||||
 | 
			
		||||
uninit()
 | 
			
		||||
~~~~~~~~
 | 
			
		||||
 | 
			
		||||
Similarly, there is the uninit() callback, doing what the name suggest. Free
 | 
			
		||||
everything you allocated here.
 | 
			
		||||
 | 
			
		||||
query_formats()
 | 
			
		||||
~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
This is following the init() and is used for the format negotiation, basically
 | 
			
		||||
where you say what pixel format(s) (gray, rgb 32, yuv 4:2:0, ...) you accept
 | 
			
		||||
for your inputs, and what you can output. All pixel formats are defined in
 | 
			
		||||
libavutil/pixfmt.h. If you don't change the pixel format between the input and
 | 
			
		||||
the output, you just have to define a pixel formats array and call
 | 
			
		||||
ff_set_common_formats(). For more complex negotiation, you can refer to other
 | 
			
		||||
filters such as vf_scale.
 | 
			
		||||
 | 
			
		||||
config_props()
 | 
			
		||||
~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
This callback is not necessary, but you will probably have one or more
 | 
			
		||||
config_props() anyway. It's not a callback for the filter itself but for its
 | 
			
		||||
inputs or outputs (they're called "pads" - AVFilterPad - in libavfilter's
 | 
			
		||||
lexicon).
 | 
			
		||||
 | 
			
		||||
Inside the input config_props(), you are at a point where you know which pixel
 | 
			
		||||
format has been picked after query_formats(), and more information such as the
 | 
			
		||||
video width and height (inlink->{w,h}). So if you need to update your internal
 | 
			
		||||
context state depending on your input you can do it here. In edgedetect you can
 | 
			
		||||
see that this callback is used to allocate buffers depending on these
 | 
			
		||||
information. They will be destroyed in uninit().
 | 
			
		||||
 | 
			
		||||
Inside the output config_props(), you can define what you want to change in the
 | 
			
		||||
output. Typically, if your filter is going to double the size of the video, you
 | 
			
		||||
will update outlink->w and outlink->h.
 | 
			
		||||
 | 
			
		||||
filter_frame()
 | 
			
		||||
~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
This is the callback you are waiting from the beginning: it is where you
 | 
			
		||||
process the received frames. Along with the frame, you get the input link from
 | 
			
		||||
where the frame comes from.
 | 
			
		||||
 | 
			
		||||
    static int filter_frame(AVFilterLink *inlink, AVFrame *in) { ... }
 | 
			
		||||
 | 
			
		||||
You can get the filter context through that input link:
 | 
			
		||||
 | 
			
		||||
    AVFilterContext *ctx = inlink->dst;
 | 
			
		||||
 | 
			
		||||
Then access your internal state context:
 | 
			
		||||
 | 
			
		||||
    FoobarContext *foobar = ctx->priv;
 | 
			
		||||
 | 
			
		||||
And also the output link where you will send your frame when you are done:
 | 
			
		||||
 | 
			
		||||
    AVFilterLink *outlink = ctx->outputs[0];
 | 
			
		||||
 | 
			
		||||
Here, we are picking the first output. You can have several, but in our case we
 | 
			
		||||
only have one since we are in a 1:1 input-output situation.
 | 
			
		||||
 | 
			
		||||
If you want to define a simple pass-through filter, you can just do:
 | 
			
		||||
 | 
			
		||||
    return ff_filter_frame(outlink, in);
 | 
			
		||||
 | 
			
		||||
But of course, you probably want to change the data of that frame.
 | 
			
		||||
 | 
			
		||||
This can be done by accessing frame->data[] and frame->linesize[].  Important
 | 
			
		||||
note here: the width does NOT match the linesize. The linesize is always
 | 
			
		||||
greater or equal to the width. The padding created should not be changed or
 | 
			
		||||
even read. Typically, keep in mind that a previous filter in your chain might
 | 
			
		||||
have altered the frame dimension but not the linesize. Imagine a crop filter
 | 
			
		||||
that halves the video size: the linesizes won't be changed, just the width.
 | 
			
		||||
 | 
			
		||||
    <-------------- linesize ------------------------>
 | 
			
		||||
    +-------------------------------+----------------+ ^
 | 
			
		||||
    |                               |                | |
 | 
			
		||||
    |                               |                | |
 | 
			
		||||
    |           picture             |    padding     | | height
 | 
			
		||||
    |                               |                | |
 | 
			
		||||
    |                               |                | |
 | 
			
		||||
    +-------------------------------+----------------+ v
 | 
			
		||||
    <----------- width ------------->
 | 
			
		||||
 | 
			
		||||
Before modifying the "in" frame, you have to make sure it is writable, or get a
 | 
			
		||||
new one. Multiple scenarios are possible here depending on the kind of
 | 
			
		||||
processing you are doing.
 | 
			
		||||
 | 
			
		||||
Let's say you want to change one pixel depending on multiple pixels (typically
 | 
			
		||||
the surrounding ones) of the input. In that case, you can't do an in-place
 | 
			
		||||
processing of the input so you will need to allocate a new frame, with the same
 | 
			
		||||
properties as the input one, and send that new frame to the next filter:
 | 
			
		||||
 | 
			
		||||
    AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
 | 
			
		||||
    if (!out) {
 | 
			
		||||
        av_frame_free(&in);
 | 
			
		||||
        return AVERROR(ENOMEM);
 | 
			
		||||
    }
 | 
			
		||||
    av_frame_copy_props(out, in);
 | 
			
		||||
 | 
			
		||||
    // out->data[...] = foobar(in->data[...])
 | 
			
		||||
 | 
			
		||||
    av_frame_free(&in);
 | 
			
		||||
    return ff_filter_frame(outlink, out);
 | 
			
		||||
 | 
			
		||||
In-place processing
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
If you can just alter the input frame, you probably just want to do that
 | 
			
		||||
instead:
 | 
			
		||||
 | 
			
		||||
    av_frame_make_writable(in);
 | 
			
		||||
    // in->data[...] = foobar(in->data[...])
 | 
			
		||||
    return ff_filter_frame(outlink, in);
 | 
			
		||||
 | 
			
		||||
You may wonder why a frame might not be writable. The answer is that for
 | 
			
		||||
example a previous filter might still own the frame data: imagine a filter
 | 
			
		||||
prior to yours in the filtergraph that needs to cache the frame. You must not
 | 
			
		||||
alter that frame, otherwise it will make that previous filter buggy. This is
 | 
			
		||||
where av_frame_make_writable() helps (it won't have any effect if the frame
 | 
			
		||||
already is writable).
 | 
			
		||||
 | 
			
		||||
The problem with using av_frame_make_writable() is that in the worst case it
 | 
			
		||||
will copy the whole input frame before you change it all over again with your
 | 
			
		||||
filter: if the frame is not writable, av_frame_make_writable() will allocate
 | 
			
		||||
new buffers, and copy the input frame data. You don't want that, and you can
 | 
			
		||||
avoid it by just allocating a new buffer if necessary, and process from in to
 | 
			
		||||
out in your filter, saving the memcpy. Generally, this is done following this
 | 
			
		||||
scheme:
 | 
			
		||||
 | 
			
		||||
    int direct = 0;
 | 
			
		||||
    AVFrame *out;
 | 
			
		||||
 | 
			
		||||
    if (av_frame_is_writable(in)) {
 | 
			
		||||
        direct = 1;
 | 
			
		||||
        out = in;
 | 
			
		||||
    } else {
 | 
			
		||||
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
 | 
			
		||||
        if (!out) {
 | 
			
		||||
            av_frame_free(&in);
 | 
			
		||||
            return AVERROR(ENOMEM);
 | 
			
		||||
        }
 | 
			
		||||
        av_frame_copy_props(out, in);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // out->data[...] = foobar(in->data[...])
 | 
			
		||||
 | 
			
		||||
    if (!direct)
 | 
			
		||||
        av_frame_free(&in);
 | 
			
		||||
    return ff_filter_frame(outlink, out);
 | 
			
		||||
 | 
			
		||||
Of course, this will only work if you can do in-place processing. To test if
 | 
			
		||||
your filter handles well the permissions, you can use the perms filter. For
 | 
			
		||||
example with:
 | 
			
		||||
 | 
			
		||||
    -vf perms=random,foobar
 | 
			
		||||
 | 
			
		||||
Make sure no automatic pixel conversion is inserted between perms and foobar,
 | 
			
		||||
otherwise the frames permissions might change again and the test will be
 | 
			
		||||
meaningless: add av_log(0,0,"direct=%d\n",direct) in your code to check that.
 | 
			
		||||
You can avoid the issue with something like:
 | 
			
		||||
 | 
			
		||||
    -vf format=rgb24,perms=random,foobar
 | 
			
		||||
 | 
			
		||||
...assuming your filter accepts rgb24 of course. This will make sure the
 | 
			
		||||
necessary conversion is inserted before the perms filter.
 | 
			
		||||
 | 
			
		||||
Timeline
 | 
			
		||||
~~~~~~~~
 | 
			
		||||
 | 
			
		||||
Adding timeline support
 | 
			
		||||
(http://ffmpeg.org/ffmpeg-filters.html#Timeline-editing) is often an easy
 | 
			
		||||
feature to add. In the most simple case, you just have to add
 | 
			
		||||
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC to the AVFilter.flags. You can typically
 | 
			
		||||
do this when your filter does not need to save the previous context frames, or
 | 
			
		||||
basically if your filter just alter whatever goes in and doesn't need
 | 
			
		||||
previous/future information. See for instance commit 86cb986ce that adds
 | 
			
		||||
timeline support to the fieldorder filter.
 | 
			
		||||
 | 
			
		||||
In some cases, you might need to reset your context somehow. This is handled by
 | 
			
		||||
the AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL flag which is used if the filter
 | 
			
		||||
must not process the frames but still wants to keep track of the frames going
 | 
			
		||||
through (to keep them in cache for when it's enabled again). See for example
 | 
			
		||||
commit 69d72140a that adds timeline support to the phase filter.
 | 
			
		||||
 | 
			
		||||
Threading
 | 
			
		||||
~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
libavfilter does not yet support frame threading, but you can add slice
 | 
			
		||||
threading to your filters.
 | 
			
		||||
 | 
			
		||||
Let's say the foobar filter has the following frame processing function:
 | 
			
		||||
 | 
			
		||||
    dst = out->data[0];
 | 
			
		||||
    src = in ->data[0];
 | 
			
		||||
 | 
			
		||||
    for (y = 0; y < inlink->h; y++) {
 | 
			
		||||
        for (x = 0; x < inlink->w; x++)
 | 
			
		||||
            dst[x] = foobar(src[x]);
 | 
			
		||||
        dst += out->linesize[0];
 | 
			
		||||
        src += in ->linesize[0];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
The first thing is to make this function work into slices. The new code will
 | 
			
		||||
look like this:
 | 
			
		||||
 | 
			
		||||
    for (y = slice_start; y < slice_end; y++) {
 | 
			
		||||
        for (x = 0; x < inlink->w; x++)
 | 
			
		||||
            dst[x] = foobar(src[x]);
 | 
			
		||||
        dst += out->linesize[0];
 | 
			
		||||
        src += in ->linesize[0];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
The source and destination pointers, and slice_start/slice_end will be defined
 | 
			
		||||
according to the number of jobs. Generally, it looks like this:
 | 
			
		||||
 | 
			
		||||
    const int slice_start = (in->height *  jobnr   ) / nb_jobs;
 | 
			
		||||
    const int slice_end   = (in->height * (jobnr+1)) / nb_jobs;
 | 
			
		||||
    uint8_t       *dst = out->data[0] + slice_start * out->linesize[0];
 | 
			
		||||
    const uint8_t *src =  in->data[0] + slice_start *  in->linesize[0];
 | 
			
		||||
 | 
			
		||||
This new code will be isolated in a new filter_slice():
 | 
			
		||||
 | 
			
		||||
    static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) { ... }
 | 
			
		||||
 | 
			
		||||
Note that we need our input and output frame to define slice_{start,end} and
 | 
			
		||||
dst/src, which are not available in that callback. They will be transmitted
 | 
			
		||||
through the opaque void *arg. You have to define a structure which contains
 | 
			
		||||
everything you need:
 | 
			
		||||
 | 
			
		||||
    typedef struct ThreadData {
 | 
			
		||||
        AVFrame *in, *out;
 | 
			
		||||
    } ThreadData;
 | 
			
		||||
 | 
			
		||||
If you need some more information from your local context, put them here.
 | 
			
		||||
 | 
			
		||||
In you filter_slice function, you access it like that:
 | 
			
		||||
 | 
			
		||||
    const ThreadData *td = arg;
 | 
			
		||||
 | 
			
		||||
Then in your filter_frame() callback, you need to call the threading
 | 
			
		||||
distributor with something like this:
 | 
			
		||||
 | 
			
		||||
    ThreadData td;
 | 
			
		||||
 | 
			
		||||
    // ...
 | 
			
		||||
 | 
			
		||||
    td.in  = in;
 | 
			
		||||
    td.out = out;
 | 
			
		||||
    ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));
 | 
			
		||||
 | 
			
		||||
    // ...
 | 
			
		||||
 | 
			
		||||
    return ff_filter_frame(outlink, out);
 | 
			
		||||
 | 
			
		||||
Last step is to add AVFILTER_FLAG_SLICE_THREADS flag to AVFilter.flags.
 | 
			
		||||
 | 
			
		||||
For more example of slice threading additions, you can try to run git log -p
 | 
			
		||||
--grep 'slice threading' libavfilter/
 | 
			
		||||
 | 
			
		||||
Finalization
 | 
			
		||||
~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
When your awesome filter is finished, you have a few more steps before you're
 | 
			
		||||
done:
 | 
			
		||||
 | 
			
		||||
 - write its documentation in doc/filters.texi, and test the output with make
 | 
			
		||||
   doc/ffmpeg-filters.html.
 | 
			
		||||
 - add a FATE test, generally by adding an entry in
 | 
			
		||||
   tests/fate/filter-video.mak, add running make fate-filter-foobar GEN=1 to
 | 
			
		||||
   generate the data.
 | 
			
		||||
 - add an entry in the Changelog
 | 
			
		||||
 - edit libavfilter/version.h and increase LIBAVFILTER_VERSION_MINOR by one
 | 
			
		||||
   (and reset LIBAVFILTER_VERSION_MICRO to 100)
 | 
			
		||||
 - git add ... && git commit -m "avfilter: add foobar filter." && git format-patch -1
 | 
			
		||||
 | 
			
		||||
When all of this is done, you can submit your patch to the ffmpeg-devel
 | 
			
		||||
mailing-list for review.  If you need any help, feel free to come on our IRC
 | 
			
		||||
channel, #ffmpeg-devel on irc.freenode.net.
 | 
			
		||||
							
								
								
									
										47
									
								
								ffmpeg.h
									
									
									
									
									
								
							
							
						
						
									
										47
									
								
								ffmpeg.h
									
									
									
									
									
								
							@@ -44,7 +44,6 @@
 | 
			
		||||
#include "libavutil/fifo.h"
 | 
			
		||||
#include "libavutil/pixfmt.h"
 | 
			
		||||
#include "libavutil/rational.h"
 | 
			
		||||
#include "libavutil/threadmessage.h"
 | 
			
		||||
 | 
			
		||||
#include "libswresample/swresample.h"
 | 
			
		||||
 | 
			
		||||
@@ -61,8 +60,6 @@ enum HWAccelID {
 | 
			
		||||
    HWACCEL_NONE = 0,
 | 
			
		||||
    HWACCEL_AUTO,
 | 
			
		||||
    HWACCEL_VDPAU,
 | 
			
		||||
    HWACCEL_DXVA2,
 | 
			
		||||
    HWACCEL_VDA,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
typedef struct HWAccel {
 | 
			
		||||
@@ -204,8 +201,6 @@ typedef struct OptionsContext {
 | 
			
		||||
    int        nb_guess_layout_max;
 | 
			
		||||
    SpecifierOpt *apad;
 | 
			
		||||
    int        nb_apad;
 | 
			
		||||
    SpecifierOpt *discard;
 | 
			
		||||
    int        nb_discard;
 | 
			
		||||
} OptionsContext;
 | 
			
		||||
 | 
			
		||||
typedef struct InputFilter {
 | 
			
		||||
@@ -242,12 +237,7 @@ typedef struct InputStream {
 | 
			
		||||
    int file_index;
 | 
			
		||||
    AVStream *st;
 | 
			
		||||
    int discard;             /* true if stream data should be discarded */
 | 
			
		||||
    int user_set_discard;
 | 
			
		||||
    int decoding_needed;     /* non zero if the packets must be decoded in 'raw_fifo', see DECODING_FOR_* */
 | 
			
		||||
#define DECODING_FOR_OST    1
 | 
			
		||||
#define DECODING_FOR_FILTER 2
 | 
			
		||||
 | 
			
		||||
    AVCodecContext *dec_ctx;
 | 
			
		||||
    int decoding_needed;     /* true if the packets must be decoded in 'raw_fifo' */
 | 
			
		||||
    AVCodec *dec;
 | 
			
		||||
    AVFrame *decoded_frame;
 | 
			
		||||
    AVFrame *filter_frame; /* a ref of decoded_frame, to be sent to filters */
 | 
			
		||||
@@ -267,7 +257,7 @@ typedef struct InputStream {
 | 
			
		||||
    double ts_scale;
 | 
			
		||||
    int saw_first_ts;
 | 
			
		||||
    int showed_multi_packet_warning;
 | 
			
		||||
    AVDictionary *decoder_opts;
 | 
			
		||||
    AVDictionary *opts;
 | 
			
		||||
    AVRational framerate;               /* framerate forced with -r */
 | 
			
		||||
    int top_field_first;
 | 
			
		||||
    int guess_layout_max;
 | 
			
		||||
@@ -316,15 +306,6 @@ typedef struct InputStream {
 | 
			
		||||
    int  (*hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame);
 | 
			
		||||
    enum AVPixelFormat hwaccel_pix_fmt;
 | 
			
		||||
    enum AVPixelFormat hwaccel_retrieved_pix_fmt;
 | 
			
		||||
 | 
			
		||||
    /* stats */
 | 
			
		||||
    // combined size of all the packets read
 | 
			
		||||
    uint64_t data_size;
 | 
			
		||||
    /* number of packets successfully read for this stream */
 | 
			
		||||
    uint64_t nb_packets;
 | 
			
		||||
    // number of frames/samples retrieved from the decoder
 | 
			
		||||
    uint64_t frames_decoded;
 | 
			
		||||
    uint64_t samples_decoded;
 | 
			
		||||
} InputStream;
 | 
			
		||||
 | 
			
		||||
typedef struct InputFile {
 | 
			
		||||
@@ -344,10 +325,13 @@ typedef struct InputFile {
 | 
			
		||||
    int accurate_seek;
 | 
			
		||||
 | 
			
		||||
#if HAVE_PTHREADS
 | 
			
		||||
    AVThreadMessageQueue *in_thread_queue;
 | 
			
		||||
    pthread_t thread;           /* thread reading from this file */
 | 
			
		||||
    int non_blocking;           /* reading packets from the thread should not block */
 | 
			
		||||
    int finished;               /* the thread has exited */
 | 
			
		||||
    int joined;                 /* the thread has been joined */
 | 
			
		||||
    pthread_mutex_t fifo_lock;  /* lock for access to fifo */
 | 
			
		||||
    pthread_cond_t  fifo_cond;  /* the main thread will signal on this cond after reading from fifo */
 | 
			
		||||
    AVFifoBuffer *fifo;         /* demuxed packets are stored here; freed by the main thread */
 | 
			
		||||
#endif
 | 
			
		||||
} InputFile;
 | 
			
		||||
 | 
			
		||||
@@ -384,7 +368,6 @@ typedef struct OutputStream {
 | 
			
		||||
    /* dts of the last packet sent to the muxer */
 | 
			
		||||
    int64_t last_mux_dts;
 | 
			
		||||
    AVBitStreamFilterContext *bitstream_filters;
 | 
			
		||||
    AVCodecContext *enc_ctx;
 | 
			
		||||
    AVCodec *enc;
 | 
			
		||||
    int64_t max_frames;
 | 
			
		||||
    AVFrame *filtered_frame;
 | 
			
		||||
@@ -405,7 +388,7 @@ typedef struct OutputStream {
 | 
			
		||||
    double forced_keyframes_expr_const_values[FKF_NB];
 | 
			
		||||
 | 
			
		||||
    /* audio only */
 | 
			
		||||
    int *audio_channels_map;             /* list of the channels id to pick from the source stream */
 | 
			
		||||
    int audio_channels_map[SWR_CH_MAX];  /* list of the channels id to pick from the source stream */
 | 
			
		||||
    int audio_channels_mapped;           /* number of channels in audio_channels_map */
 | 
			
		||||
 | 
			
		||||
    char *logfile_prefix;
 | 
			
		||||
@@ -417,7 +400,7 @@ typedef struct OutputStream {
 | 
			
		||||
    char *filters_script;  ///< filtergraph script associated to the -filter_script option
 | 
			
		||||
 | 
			
		||||
    int64_t sws_flags;
 | 
			
		||||
    AVDictionary *encoder_opts;
 | 
			
		||||
    AVDictionary *opts;
 | 
			
		||||
    AVDictionary *swr_opts;
 | 
			
		||||
    AVDictionary *resample_opts;
 | 
			
		||||
    char *apad;
 | 
			
		||||
@@ -431,15 +414,6 @@ typedef struct OutputStream {
 | 
			
		||||
    int keep_pix_fmt;
 | 
			
		||||
 | 
			
		||||
    AVCodecParserContext *parser;
 | 
			
		||||
 | 
			
		||||
    /* stats */
 | 
			
		||||
    // combined size of all the packets written
 | 
			
		||||
    uint64_t data_size;
 | 
			
		||||
    // number of packets send to the muxer
 | 
			
		||||
    uint64_t packets_written;
 | 
			
		||||
    // number of frames/samples sent to the encoder
 | 
			
		||||
    uint64_t frames_encoded;
 | 
			
		||||
    uint64_t samples_encoded;
 | 
			
		||||
} OutputStream;
 | 
			
		||||
 | 
			
		||||
typedef struct OutputFile {
 | 
			
		||||
@@ -505,12 +479,11 @@ void show_usage(void);
 | 
			
		||||
 | 
			
		||||
void opt_output_file(void *optctx, const char *filename);
 | 
			
		||||
 | 
			
		||||
void remove_avoptions(AVDictionary **a, AVDictionary *b);
 | 
			
		||||
void assert_avoptions(AVDictionary *m);
 | 
			
		||||
 | 
			
		||||
int guess_input_channel_layout(InputStream *ist);
 | 
			
		||||
 | 
			
		||||
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *avctx, AVCodec *codec, enum AVPixelFormat target);
 | 
			
		||||
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum AVPixelFormat target);
 | 
			
		||||
void choose_sample_fmt(AVStream *st, AVCodec *codec);
 | 
			
		||||
 | 
			
		||||
int configure_filtergraph(FilterGraph *fg);
 | 
			
		||||
@@ -521,7 +494,5 @@ FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost);
 | 
			
		||||
int ffmpeg_parse_options(int argc, char **argv);
 | 
			
		||||
 | 
			
		||||
int vdpau_init(AVCodecContext *s);
 | 
			
		||||
int dxva2_init(AVCodecContext *s);
 | 
			
		||||
int vda_init(AVCodecContext *s);
 | 
			
		||||
 | 
			
		||||
#endif /* FFMPEG_H */
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										620
									
								
								ffmpeg_dxva2.c
									
									
									
									
									
								
							
							
						
						
									
										620
									
								
								ffmpeg_dxva2.c
									
									
									
									
									
								
							@@ -1,620 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
 * This file is part of FFmpeg.
 | 
			
		||||
 *
 | 
			
		||||
 * FFmpeg is free software; you can redistribute it and/or
 | 
			
		||||
 * modify it under the terms of the GNU Lesser General Public
 | 
			
		||||
 * License as published by the Free Software Foundation; either
 | 
			
		||||
 * version 2.1 of the License, or (at your option) any later version.
 | 
			
		||||
 *
 | 
			
		||||
 * FFmpeg is distributed in the hope that it will be useful,
 | 
			
		||||
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 | 
			
		||||
 * Lesser General Public License for more details.
 | 
			
		||||
 *
 | 
			
		||||
 * You should have received a copy of the GNU Lesser General Public
 | 
			
		||||
 * License along with FFmpeg; if not, write to the Free Software
 | 
			
		||||
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include <windows.h>
 | 
			
		||||
 | 
			
		||||
#ifdef _WIN32_WINNT
 | 
			
		||||
#undef _WIN32_WINNT
 | 
			
		||||
#endif
 | 
			
		||||
#define _WIN32_WINNT 0x0600
 | 
			
		||||
#define DXVA2API_USE_BITFIELDS
 | 
			
		||||
#define COBJMACROS
 | 
			
		||||
 | 
			
		||||
#include <stdint.h>
 | 
			
		||||
 | 
			
		||||
#include <d3d9.h>
 | 
			
		||||
#include <dxva2api.h>
 | 
			
		||||
 | 
			
		||||
#include "ffmpeg.h"
 | 
			
		||||
 | 
			
		||||
#include "libavcodec/dxva2.h"
 | 
			
		||||
 | 
			
		||||
#include "libavutil/avassert.h"
 | 
			
		||||
#include "libavutil/buffer.h"
 | 
			
		||||
#include "libavutil/frame.h"
 | 
			
		||||
#include "libavutil/imgutils.h"
 | 
			
		||||
#include "libavutil/pixfmt.h"
 | 
			
		||||
 | 
			
		||||
/* define all the GUIDs used directly here,
 | 
			
		||||
   to avoid problems with inconsistent dxva2api.h versions in mingw-w64 and different MSVC version */
 | 
			
		||||
#include <initguid.h>
 | 
			
		||||
DEFINE_GUID(IID_IDirectXVideoDecoderService, 0xfc51a551,0xd5e7,0x11d9,0xaf,0x55,0x00,0x05,0x4e,0x43,0xff,0x02);
 | 
			
		||||
 | 
			
		||||
DEFINE_GUID(DXVA2_ModeMPEG2_VLD,      0xee27417f, 0x5e28,0x4e65,0xbe,0xea,0x1d,0x26,0xb5,0x08,0xad,0xc9);
 | 
			
		||||
DEFINE_GUID(DXVA2_ModeMPEG2and1_VLD,  0x86695f12, 0x340e,0x4f04,0x9f,0xd3,0x92,0x53,0xdd,0x32,0x74,0x60);
 | 
			
		||||
DEFINE_GUID(DXVA2_ModeH264_E,         0x1b81be68, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
 | 
			
		||||
DEFINE_GUID(DXVA2_ModeH264_F,         0x1b81be69, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
 | 
			
		||||
DEFINE_GUID(DXVADDI_Intel_ModeH264_E, 0x604F8E68, 0x4951,0x4C54,0x88,0xFE,0xAB,0xD2,0x5C,0x15,0xB3,0xD6);
 | 
			
		||||
DEFINE_GUID(DXVA2_ModeVC1_D,          0x1b81beA3, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
 | 
			
		||||
DEFINE_GUID(DXVA2_ModeVC1_D2010,      0x1b81beA4, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
 | 
			
		||||
DEFINE_GUID(DXVA2_NoEncrypt,          0x1b81beD0, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
 | 
			
		||||
DEFINE_GUID(GUID_NULL,                0x00000000, 0x0000,0x0000,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00);
 | 
			
		||||
 | 
			
		||||
typedef IDirect3D9* WINAPI pDirect3DCreate9(UINT);
 | 
			
		||||
typedef HRESULT WINAPI pCreateDeviceManager9(UINT *, IDirect3DDeviceManager9 **);
 | 
			
		||||
 | 
			
		||||
typedef struct dxva2_mode {
 | 
			
		||||
  const GUID     *guid;
 | 
			
		||||
  enum AVCodecID codec;
 | 
			
		||||
} dxva2_mode;
 | 
			
		||||
 | 
			
		||||
static const dxva2_mode dxva2_modes[] = {
 | 
			
		||||
    /* MPEG-2 */
 | 
			
		||||
    { &DXVA2_ModeMPEG2_VLD,      AV_CODEC_ID_MPEG2VIDEO },
 | 
			
		||||
    { &DXVA2_ModeMPEG2and1_VLD,  AV_CODEC_ID_MPEG2VIDEO },
 | 
			
		||||
 | 
			
		||||
    /* H.264 */
 | 
			
		||||
    { &DXVA2_ModeH264_F,         AV_CODEC_ID_H264 },
 | 
			
		||||
    { &DXVA2_ModeH264_E,         AV_CODEC_ID_H264 },
 | 
			
		||||
    /* Intel specific H.264 mode */
 | 
			
		||||
    { &DXVADDI_Intel_ModeH264_E, AV_CODEC_ID_H264 },
 | 
			
		||||
 | 
			
		||||
    /* VC-1 / WMV3 */
 | 
			
		||||
    { &DXVA2_ModeVC1_D2010,      AV_CODEC_ID_VC1  },
 | 
			
		||||
    { &DXVA2_ModeVC1_D2010,      AV_CODEC_ID_WMV3 },
 | 
			
		||||
    { &DXVA2_ModeVC1_D,          AV_CODEC_ID_VC1  },
 | 
			
		||||
    { &DXVA2_ModeVC1_D,          AV_CODEC_ID_WMV3 },
 | 
			
		||||
 | 
			
		||||
    { NULL,                      0 },
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
typedef struct surface_info {
 | 
			
		||||
    int used;
 | 
			
		||||
    uint64_t age;
 | 
			
		||||
} surface_info;
 | 
			
		||||
 | 
			
		||||
typedef struct DXVA2Context {
 | 
			
		||||
    HMODULE d3dlib;
 | 
			
		||||
    HMODULE dxva2lib;
 | 
			
		||||
 | 
			
		||||
    HANDLE  deviceHandle;
 | 
			
		||||
 | 
			
		||||
    IDirect3D9                  *d3d9;
 | 
			
		||||
    IDirect3DDevice9            *d3d9device;
 | 
			
		||||
    IDirect3DDeviceManager9     *d3d9devmgr;
 | 
			
		||||
    IDirectXVideoDecoderService *decoder_service;
 | 
			
		||||
    IDirectXVideoDecoder        *decoder;
 | 
			
		||||
 | 
			
		||||
    GUID                        decoder_guid;
 | 
			
		||||
    DXVA2_ConfigPictureDecode   decoder_config;
 | 
			
		||||
 | 
			
		||||
    LPDIRECT3DSURFACE9          *surfaces;
 | 
			
		||||
    surface_info                *surface_infos;
 | 
			
		||||
    uint32_t                    num_surfaces;
 | 
			
		||||
    uint64_t                    surface_age;
 | 
			
		||||
 | 
			
		||||
    AVFrame                     *tmp_frame;
 | 
			
		||||
} DXVA2Context;
 | 
			
		||||
 | 
			
		||||
typedef struct DXVA2SurfaceWrapper {
 | 
			
		||||
    DXVA2Context         *ctx;
 | 
			
		||||
    LPDIRECT3DSURFACE9   surface;
 | 
			
		||||
    IDirectXVideoDecoder *decoder;
 | 
			
		||||
} DXVA2SurfaceWrapper;
 | 
			
		||||
 | 
			
		||||
static void dxva2_destroy_decoder(AVCodecContext *s)
 | 
			
		||||
{
 | 
			
		||||
    InputStream  *ist = s->opaque;
 | 
			
		||||
    DXVA2Context *ctx = ist->hwaccel_ctx;
 | 
			
		||||
    int i;
 | 
			
		||||
 | 
			
		||||
    if (ctx->surfaces) {
 | 
			
		||||
        for (i = 0; i < ctx->num_surfaces; i++) {
 | 
			
		||||
            if (ctx->surfaces[i])
 | 
			
		||||
                IDirect3DSurface9_Release(ctx->surfaces[i]);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    av_freep(&ctx->surfaces);
 | 
			
		||||
    av_freep(&ctx->surface_infos);
 | 
			
		||||
    ctx->num_surfaces = 0;
 | 
			
		||||
    ctx->surface_age  = 0;
 | 
			
		||||
 | 
			
		||||
    if (ctx->decoder) {
 | 
			
		||||
        IDirectXVideoDecoder_Release(ctx->decoder);
 | 
			
		||||
        ctx->decoder = NULL;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void dxva2_uninit(AVCodecContext *s)
 | 
			
		||||
{
 | 
			
		||||
    InputStream  *ist = s->opaque;
 | 
			
		||||
    DXVA2Context *ctx = ist->hwaccel_ctx;
 | 
			
		||||
 | 
			
		||||
    ist->hwaccel_uninit        = NULL;
 | 
			
		||||
    ist->hwaccel_get_buffer    = NULL;
 | 
			
		||||
    ist->hwaccel_retrieve_data = NULL;
 | 
			
		||||
 | 
			
		||||
    if (ctx->decoder)
 | 
			
		||||
        dxva2_destroy_decoder(s);
 | 
			
		||||
 | 
			
		||||
    if (ctx->decoder_service)
 | 
			
		||||
        IDirectXVideoDecoderService_Release(ctx->decoder_service);
 | 
			
		||||
 | 
			
		||||
    if (ctx->d3d9devmgr && ctx->deviceHandle != INVALID_HANDLE_VALUE)
 | 
			
		||||
        IDirect3DDeviceManager9_CloseDeviceHandle(ctx->d3d9devmgr, ctx->deviceHandle);
 | 
			
		||||
 | 
			
		||||
    if (ctx->d3d9devmgr)
 | 
			
		||||
        IDirect3DDeviceManager9_Release(ctx->d3d9devmgr);
 | 
			
		||||
 | 
			
		||||
    if (ctx->d3d9device)
 | 
			
		||||
        IDirect3DDevice9_Release(ctx->d3d9device);
 | 
			
		||||
 | 
			
		||||
    if (ctx->d3d9)
 | 
			
		||||
        IDirect3D9_Release(ctx->d3d9);
 | 
			
		||||
 | 
			
		||||
    if (ctx->d3dlib)
 | 
			
		||||
        FreeLibrary(ctx->d3dlib);
 | 
			
		||||
 | 
			
		||||
    if (ctx->dxva2lib)
 | 
			
		||||
        FreeLibrary(ctx->dxva2lib);
 | 
			
		||||
 | 
			
		||||
    av_frame_free(&ctx->tmp_frame);
 | 
			
		||||
 | 
			
		||||
    av_freep(&ist->hwaccel_ctx);
 | 
			
		||||
    av_freep(&s->hwaccel_context);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void dxva2_release_buffer(void *opaque, uint8_t *data)
 | 
			
		||||
{
 | 
			
		||||
    DXVA2SurfaceWrapper *w   = opaque;
 | 
			
		||||
    DXVA2Context        *ctx = w->ctx;
 | 
			
		||||
    int i;
 | 
			
		||||
 | 
			
		||||
    for (i = 0; i < ctx->num_surfaces; i++) {
 | 
			
		||||
        if (ctx->surfaces[i] == w->surface) {
 | 
			
		||||
            ctx->surface_infos[i].used = 0;
 | 
			
		||||
            break;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    IDirect3DSurface9_Release(w->surface);
 | 
			
		||||
    IDirectXVideoDecoder_Release(w->decoder);
 | 
			
		||||
    av_free(w);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int dxva2_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
 | 
			
		||||
{
 | 
			
		||||
    InputStream  *ist = s->opaque;
 | 
			
		||||
    DXVA2Context *ctx = ist->hwaccel_ctx;
 | 
			
		||||
    int i, old_unused = -1;
 | 
			
		||||
    LPDIRECT3DSURFACE9 surface;
 | 
			
		||||
    DXVA2SurfaceWrapper *w = NULL;
 | 
			
		||||
 | 
			
		||||
    av_assert0(frame->format == AV_PIX_FMT_DXVA2_VLD);
 | 
			
		||||
 | 
			
		||||
    for (i = 0; i < ctx->num_surfaces; i++) {
 | 
			
		||||
        surface_info *info = &ctx->surface_infos[i];
 | 
			
		||||
        if (!info->used && (old_unused == -1 || info->age < ctx->surface_infos[old_unused].age))
 | 
			
		||||
            old_unused = i;
 | 
			
		||||
    }
 | 
			
		||||
    if (old_unused == -1) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "No free DXVA2 surface!\n");
 | 
			
		||||
        return AVERROR(ENOMEM);
 | 
			
		||||
    }
 | 
			
		||||
    i = old_unused;
 | 
			
		||||
 | 
			
		||||
    surface = ctx->surfaces[i];
 | 
			
		||||
 | 
			
		||||
    w = av_mallocz(sizeof(*w));
 | 
			
		||||
    if (!w)
 | 
			
		||||
        return AVERROR(ENOMEM);
 | 
			
		||||
 | 
			
		||||
    frame->buf[0] = av_buffer_create((uint8_t*)surface, 0,
 | 
			
		||||
                                     dxva2_release_buffer, w,
 | 
			
		||||
                                     AV_BUFFER_FLAG_READONLY);
 | 
			
		||||
    if (!frame->buf[0]) {
 | 
			
		||||
        av_free(w);
 | 
			
		||||
        return AVERROR(ENOMEM);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    w->ctx     = ctx;
 | 
			
		||||
    w->surface = surface;
 | 
			
		||||
    IDirect3DSurface9_AddRef(w->surface);
 | 
			
		||||
    w->decoder = ctx->decoder;
 | 
			
		||||
    IDirectXVideoDecoder_AddRef(w->decoder);
 | 
			
		||||
 | 
			
		||||
    ctx->surface_infos[i].used = 1;
 | 
			
		||||
    ctx->surface_infos[i].age  = ctx->surface_age++;
 | 
			
		||||
 | 
			
		||||
    frame->data[3] = (uint8_t *)surface;
 | 
			
		||||
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int dxva2_retrieve_data(AVCodecContext *s, AVFrame *frame)
 | 
			
		||||
{
 | 
			
		||||
    LPDIRECT3DSURFACE9 surface =  (LPDIRECT3DSURFACE9)frame->data[3];
 | 
			
		||||
    InputStream        *ist = s->opaque;
 | 
			
		||||
    DXVA2Context       *ctx = ist->hwaccel_ctx;
 | 
			
		||||
    D3DSURFACE_DESC    surfaceDesc;
 | 
			
		||||
    D3DLOCKED_RECT     LockedRect;
 | 
			
		||||
    HRESULT            hr;
 | 
			
		||||
    int                ret;
 | 
			
		||||
 | 
			
		||||
    IDirect3DSurface9_GetDesc(surface, &surfaceDesc);
 | 
			
		||||
 | 
			
		||||
    ctx->tmp_frame->width  = frame->width;
 | 
			
		||||
    ctx->tmp_frame->height = frame->height;
 | 
			
		||||
    ctx->tmp_frame->format = AV_PIX_FMT_NV12;
 | 
			
		||||
 | 
			
		||||
    ret = av_frame_get_buffer(ctx->tmp_frame, 32);
 | 
			
		||||
    if (ret < 0)
 | 
			
		||||
        return ret;
 | 
			
		||||
 | 
			
		||||
    hr = IDirect3DSurface9_LockRect(surface, &LockedRect, NULL, D3DLOCK_READONLY);
 | 
			
		||||
    if (FAILED(hr)) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Unable to lock DXVA2 surface\n");
 | 
			
		||||
        return AVERROR_UNKNOWN;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    av_image_copy_plane(ctx->tmp_frame->data[0], ctx->tmp_frame->linesize[0],
 | 
			
		||||
                        (uint8_t*)LockedRect.pBits,
 | 
			
		||||
                        LockedRect.Pitch, frame->width, frame->height);
 | 
			
		||||
 | 
			
		||||
    av_image_copy_plane(ctx->tmp_frame->data[1], ctx->tmp_frame->linesize[1],
 | 
			
		||||
                        (uint8_t*)LockedRect.pBits + LockedRect.Pitch * surfaceDesc.Height,
 | 
			
		||||
                        LockedRect.Pitch, frame->width, frame->height / 2);
 | 
			
		||||
 | 
			
		||||
    IDirect3DSurface9_UnlockRect(surface);
 | 
			
		||||
 | 
			
		||||
    ret = av_frame_copy_props(ctx->tmp_frame, frame);
 | 
			
		||||
    if (ret < 0)
 | 
			
		||||
        goto fail;
 | 
			
		||||
 | 
			
		||||
    av_frame_unref(frame);
 | 
			
		||||
    av_frame_move_ref(frame, ctx->tmp_frame);
 | 
			
		||||
 | 
			
		||||
    return 0;
 | 
			
		||||
fail:
 | 
			
		||||
    av_frame_unref(ctx->tmp_frame);
 | 
			
		||||
    return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int dxva2_alloc(AVCodecContext *s)
 | 
			
		||||
{
 | 
			
		||||
    InputStream  *ist = s->opaque;
 | 
			
		||||
    int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
 | 
			
		||||
    DXVA2Context *ctx;
 | 
			
		||||
    pDirect3DCreate9      *createD3D = NULL;
 | 
			
		||||
    pCreateDeviceManager9 *createDeviceManager = NULL;
 | 
			
		||||
    HRESULT hr;
 | 
			
		||||
    D3DPRESENT_PARAMETERS d3dpp = {0};
 | 
			
		||||
    D3DDISPLAYMODE        d3ddm;
 | 
			
		||||
    unsigned resetToken = 0;
 | 
			
		||||
    UINT adapter = D3DADAPTER_DEFAULT;
 | 
			
		||||
 | 
			
		||||
    ctx = av_mallocz(sizeof(*ctx));
 | 
			
		||||
    if (!ctx)
 | 
			
		||||
        return AVERROR(ENOMEM);
 | 
			
		||||
 | 
			
		||||
    ctx->deviceHandle = INVALID_HANDLE_VALUE;
 | 
			
		||||
 | 
			
		||||
    ist->hwaccel_ctx           = ctx;
 | 
			
		||||
    ist->hwaccel_uninit        = dxva2_uninit;
 | 
			
		||||
    ist->hwaccel_get_buffer    = dxva2_get_buffer;
 | 
			
		||||
    ist->hwaccel_retrieve_data = dxva2_retrieve_data;
 | 
			
		||||
 | 
			
		||||
    ctx->d3dlib = LoadLibrary("d3d9.dll");
 | 
			
		||||
    if (!ctx->d3dlib) {
 | 
			
		||||
        av_log(NULL, loglevel, "Failed to load D3D9 library\n");
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
    ctx->dxva2lib = LoadLibrary("dxva2.dll");
 | 
			
		||||
    if (!ctx->dxva2lib) {
 | 
			
		||||
        av_log(NULL, loglevel, "Failed to load DXVA2 library\n");
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    createD3D = (pDirect3DCreate9 *)GetProcAddress(ctx->d3dlib, "Direct3DCreate9");
 | 
			
		||||
    if (!createD3D) {
 | 
			
		||||
        av_log(NULL, loglevel, "Failed to locate Direct3DCreate9\n");
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
    createDeviceManager = (pCreateDeviceManager9 *)GetProcAddress(ctx->dxva2lib, "DXVA2CreateDirect3DDeviceManager9");
 | 
			
		||||
    if (!createDeviceManager) {
 | 
			
		||||
        av_log(NULL, loglevel, "Failed to locate DXVA2CreateDirect3DDeviceManager9\n");
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ctx->d3d9 = createD3D(D3D_SDK_VERSION);
 | 
			
		||||
    if (!ctx->d3d9) {
 | 
			
		||||
        av_log(NULL, loglevel, "Failed to create IDirect3D object\n");
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (ist->hwaccel_device) {
 | 
			
		||||
        adapter = atoi(ist->hwaccel_device);
 | 
			
		||||
        av_log(NULL, AV_LOG_INFO, "Using HWAccel device %d\n", adapter);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    IDirect3D9_GetAdapterDisplayMode(ctx->d3d9, adapter, &d3ddm);
 | 
			
		||||
    d3dpp.Windowed         = TRUE;
 | 
			
		||||
    d3dpp.BackBufferWidth  = 640;
 | 
			
		||||
    d3dpp.BackBufferHeight = 480;
 | 
			
		||||
    d3dpp.BackBufferCount  = 0;
 | 
			
		||||
    d3dpp.BackBufferFormat = d3ddm.Format;
 | 
			
		||||
    d3dpp.SwapEffect       = D3DSWAPEFFECT_DISCARD;
 | 
			
		||||
    d3dpp.Flags            = D3DPRESENTFLAG_VIDEO;
 | 
			
		||||
 | 
			
		||||
    hr = IDirect3D9_CreateDevice(ctx->d3d9, adapter, D3DDEVTYPE_HAL, GetShellWindow(),
 | 
			
		||||
                                 D3DCREATE_SOFTWARE_VERTEXPROCESSING | D3DCREATE_MULTITHREADED | D3DCREATE_FPU_PRESERVE,
 | 
			
		||||
                                 &d3dpp, &ctx->d3d9device);
 | 
			
		||||
    if (FAILED(hr)) {
 | 
			
		||||
        av_log(NULL, loglevel, "Failed to create Direct3D device\n");
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    hr = createDeviceManager(&resetToken, &ctx->d3d9devmgr);
 | 
			
		||||
    if (FAILED(hr)) {
 | 
			
		||||
        av_log(NULL, loglevel, "Failed to create Direct3D device manager\n");
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    hr = IDirect3DDeviceManager9_ResetDevice(ctx->d3d9devmgr, ctx->d3d9device, resetToken);
 | 
			
		||||
    if (FAILED(hr)) {
 | 
			
		||||
        av_log(NULL, loglevel, "Failed to bind Direct3D device to device manager\n");
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    hr = IDirect3DDeviceManager9_OpenDeviceHandle(ctx->d3d9devmgr, &ctx->deviceHandle);
 | 
			
		||||
    if (FAILED(hr)) {
 | 
			
		||||
        av_log(NULL, loglevel, "Failed to open device handle\n");
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    hr = IDirect3DDeviceManager9_GetVideoService(ctx->d3d9devmgr, ctx->deviceHandle, &IID_IDirectXVideoDecoderService, (void **)&ctx->decoder_service);
 | 
			
		||||
    if (FAILED(hr)) {
 | 
			
		||||
        av_log(NULL, loglevel, "Failed to create IDirectXVideoDecoderService\n");
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ctx->tmp_frame = av_frame_alloc();
 | 
			
		||||
    if (!ctx->tmp_frame)
 | 
			
		||||
        goto fail;
 | 
			
		||||
 | 
			
		||||
    s->hwaccel_context = av_mallocz(sizeof(struct dxva_context));
 | 
			
		||||
    if (!s->hwaccel_context)
 | 
			
		||||
        goto fail;
 | 
			
		||||
 | 
			
		||||
    return 0;
 | 
			
		||||
fail:
 | 
			
		||||
    dxva2_uninit(s);
 | 
			
		||||
    return AVERROR(EINVAL);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int dxva2_get_decoder_configuration(AVCodecContext *s, const GUID *device_guid,
 | 
			
		||||
                                           const DXVA2_VideoDesc *desc,
 | 
			
		||||
                                           DXVA2_ConfigPictureDecode *config)
 | 
			
		||||
{
 | 
			
		||||
    InputStream  *ist = s->opaque;
 | 
			
		||||
    int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
 | 
			
		||||
    DXVA2Context *ctx = ist->hwaccel_ctx;
 | 
			
		||||
    unsigned cfg_count = 0, best_score = 0;
 | 
			
		||||
    DXVA2_ConfigPictureDecode *cfg_list = NULL;
 | 
			
		||||
    DXVA2_ConfigPictureDecode best_cfg = {{0}};
 | 
			
		||||
    HRESULT hr;
 | 
			
		||||
    int i;
 | 
			
		||||
 | 
			
		||||
    hr = IDirectXVideoDecoderService_GetDecoderConfigurations(ctx->decoder_service, device_guid, desc, NULL, &cfg_count, &cfg_list);
 | 
			
		||||
    if (FAILED(hr)) {
 | 
			
		||||
        av_log(NULL, loglevel, "Unable to retrieve decoder configurations\n");
 | 
			
		||||
        return AVERROR(EINVAL);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (i = 0; i < cfg_count; i++) {
 | 
			
		||||
        DXVA2_ConfigPictureDecode *cfg = &cfg_list[i];
 | 
			
		||||
 | 
			
		||||
        unsigned score;
 | 
			
		||||
        if (cfg->ConfigBitstreamRaw == 1)
 | 
			
		||||
            score = 1;
 | 
			
		||||
        else if (s->codec_id == AV_CODEC_ID_H264 && cfg->ConfigBitstreamRaw == 2)
 | 
			
		||||
            score = 2;
 | 
			
		||||
        else
 | 
			
		||||
            continue;
 | 
			
		||||
        if (IsEqualGUID(&cfg->guidConfigBitstreamEncryption, &DXVA2_NoEncrypt))
 | 
			
		||||
            score += 16;
 | 
			
		||||
        if (score > best_score) {
 | 
			
		||||
            best_score = score;
 | 
			
		||||
            best_cfg   = *cfg;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    CoTaskMemFree(cfg_list);
 | 
			
		||||
 | 
			
		||||
    if (!best_score) {
 | 
			
		||||
        av_log(NULL, loglevel, "No valid decoder configuration available\n");
 | 
			
		||||
        return AVERROR(EINVAL);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    *config = best_cfg;
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int dxva2_create_decoder(AVCodecContext *s)
 | 
			
		||||
{
 | 
			
		||||
    InputStream  *ist = s->opaque;
 | 
			
		||||
    int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
 | 
			
		||||
    DXVA2Context *ctx = ist->hwaccel_ctx;
 | 
			
		||||
    struct dxva_context *dxva_ctx = s->hwaccel_context;
 | 
			
		||||
    GUID *guid_list = NULL;
 | 
			
		||||
    unsigned guid_count = 0, i, j;
 | 
			
		||||
    GUID device_guid = GUID_NULL;
 | 
			
		||||
    D3DFORMAT target_format = 0;
 | 
			
		||||
    DXVA2_VideoDesc desc = { 0 };
 | 
			
		||||
    DXVA2_ConfigPictureDecode config;
 | 
			
		||||
    HRESULT hr;
 | 
			
		||||
    int surface_alignment;
 | 
			
		||||
    int ret;
 | 
			
		||||
 | 
			
		||||
    hr = IDirectXVideoDecoderService_GetDecoderDeviceGuids(ctx->decoder_service, &guid_count, &guid_list);
 | 
			
		||||
    if (FAILED(hr)) {
 | 
			
		||||
        av_log(NULL, loglevel, "Failed to retrieve decoder device GUIDs\n");
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (i = 0; dxva2_modes[i].guid; i++) {
 | 
			
		||||
        D3DFORMAT *target_list = NULL;
 | 
			
		||||
        unsigned target_count = 0;
 | 
			
		||||
        const dxva2_mode *mode = &dxva2_modes[i];
 | 
			
		||||
        if (mode->codec != s->codec_id)
 | 
			
		||||
            continue;
 | 
			
		||||
 | 
			
		||||
        for (j = 0; j < guid_count; j++) {
 | 
			
		||||
            if (IsEqualGUID(mode->guid, &guid_list[j]))
 | 
			
		||||
                break;
 | 
			
		||||
        }
 | 
			
		||||
        if (j == guid_count)
 | 
			
		||||
            continue;
 | 
			
		||||
 | 
			
		||||
        hr = IDirectXVideoDecoderService_GetDecoderRenderTargets(ctx->decoder_service, mode->guid, &target_count, &target_list);
 | 
			
		||||
        if (FAILED(hr)) {
 | 
			
		||||
            continue;
 | 
			
		||||
        }
 | 
			
		||||
        for (j = 0; j < target_count; j++) {
 | 
			
		||||
            const D3DFORMAT format = target_list[j];
 | 
			
		||||
            if (format == MKTAG('N','V','1','2')) {
 | 
			
		||||
                target_format = format;
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        CoTaskMemFree(target_list);
 | 
			
		||||
        if (target_format) {
 | 
			
		||||
            device_guid = *mode->guid;
 | 
			
		||||
            break;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    CoTaskMemFree(guid_list);
 | 
			
		||||
 | 
			
		||||
    if (IsEqualGUID(&device_guid, &GUID_NULL)) {
 | 
			
		||||
        av_log(NULL, loglevel, "No decoder device for codec found\n");
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    desc.SampleWidth  = s->coded_width;
 | 
			
		||||
    desc.SampleHeight = s->coded_height;
 | 
			
		||||
    desc.Format       = target_format;
 | 
			
		||||
 | 
			
		||||
    ret = dxva2_get_decoder_configuration(s, &device_guid, &desc, &config);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* decoding MPEG-2 requires additional alignment on some Intel GPUs,
 | 
			
		||||
       but it causes issues for H.264 on certain AMD GPUs..... */
 | 
			
		||||
    if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO)
 | 
			
		||||
        surface_alignment = 32;
 | 
			
		||||
    else
 | 
			
		||||
        surface_alignment = 16;
 | 
			
		||||
 | 
			
		||||
    /* 4 base work surfaces */
 | 
			
		||||
    ctx->num_surfaces = 4;
 | 
			
		||||
 | 
			
		||||
    /* add surfaces based on number of possible refs */
 | 
			
		||||
    if (s->codec_id == AV_CODEC_ID_H264)
 | 
			
		||||
        ctx->num_surfaces += 16;
 | 
			
		||||
    else
 | 
			
		||||
        ctx->num_surfaces += 2;
 | 
			
		||||
 | 
			
		||||
    /* add extra surfaces for frame threading */
 | 
			
		||||
    if (s->active_thread_type & FF_THREAD_FRAME)
 | 
			
		||||
        ctx->num_surfaces += s->thread_count;
 | 
			
		||||
 | 
			
		||||
    ctx->surfaces      = av_mallocz(ctx->num_surfaces * sizeof(*ctx->surfaces));
 | 
			
		||||
    ctx->surface_infos = av_mallocz(ctx->num_surfaces * sizeof(*ctx->surface_infos));
 | 
			
		||||
 | 
			
		||||
    if (!ctx->surfaces || !ctx->surface_infos) {
 | 
			
		||||
        av_log(NULL, loglevel, "Unable to allocate surface arrays\n");
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    hr = IDirectXVideoDecoderService_CreateSurface(ctx->decoder_service,
 | 
			
		||||
                                                   FFALIGN(s->coded_width, surface_alignment),
 | 
			
		||||
                                                   FFALIGN(s->coded_height, surface_alignment),
 | 
			
		||||
                                                   ctx->num_surfaces - 1,
 | 
			
		||||
                                                   target_format, D3DPOOL_DEFAULT, 0,
 | 
			
		||||
                                                   DXVA2_VideoDecoderRenderTarget,
 | 
			
		||||
                                                   ctx->surfaces, NULL);
 | 
			
		||||
    if (FAILED(hr)) {
 | 
			
		||||
        av_log(NULL, loglevel, "Failed to create %d video surfaces\n", ctx->num_surfaces);
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    hr = IDirectXVideoDecoderService_CreateVideoDecoder(ctx->decoder_service, &device_guid,
 | 
			
		||||
                                                        &desc, &config, ctx->surfaces,
 | 
			
		||||
                                                        ctx->num_surfaces, &ctx->decoder);
 | 
			
		||||
    if (FAILED(hr)) {
 | 
			
		||||
        av_log(NULL, loglevel, "Failed to create DXVA2 video decoder\n");
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ctx->decoder_guid   = device_guid;
 | 
			
		||||
    ctx->decoder_config = config;
 | 
			
		||||
 | 
			
		||||
    dxva_ctx->cfg           = &ctx->decoder_config;
 | 
			
		||||
    dxva_ctx->decoder       = ctx->decoder;
 | 
			
		||||
    dxva_ctx->surface       = ctx->surfaces;
 | 
			
		||||
    dxva_ctx->surface_count = ctx->num_surfaces;
 | 
			
		||||
 | 
			
		||||
    if (IsEqualGUID(&ctx->decoder_guid, &DXVADDI_Intel_ModeH264_E))
 | 
			
		||||
        dxva_ctx->workaround |= FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO;
 | 
			
		||||
 | 
			
		||||
    return 0;
 | 
			
		||||
fail:
 | 
			
		||||
    dxva2_destroy_decoder(s);
 | 
			
		||||
    return AVERROR(EINVAL);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int dxva2_init(AVCodecContext *s)
 | 
			
		||||
{
 | 
			
		||||
    InputStream *ist = s->opaque;
 | 
			
		||||
    int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
 | 
			
		||||
    DXVA2Context *ctx;
 | 
			
		||||
    int ret;
 | 
			
		||||
 | 
			
		||||
    if (!ist->hwaccel_ctx) {
 | 
			
		||||
        ret = dxva2_alloc(s);
 | 
			
		||||
        if (ret < 0)
 | 
			
		||||
            return ret;
 | 
			
		||||
    }
 | 
			
		||||
    ctx = ist->hwaccel_ctx;
 | 
			
		||||
 | 
			
		||||
    if (s->codec_id == AV_CODEC_ID_H264 &&
 | 
			
		||||
        (s->profile & ~FF_PROFILE_H264_CONSTRAINED) > FF_PROFILE_H264_HIGH) {
 | 
			
		||||
        av_log(NULL, loglevel, "Unsupported H.264 profile for DXVA2 HWAccel: %d\n", s->profile);
 | 
			
		||||
        return AVERROR(EINVAL);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (ctx->decoder)
 | 
			
		||||
        dxva2_destroy_decoder(s);
 | 
			
		||||
 | 
			
		||||
    ret = dxva2_create_decoder(s);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        av_log(NULL, loglevel, "Error creating the DXVA2 decoder\n");
 | 
			
		||||
        return ret;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										101
									
								
								ffmpeg_filter.c
									
									
									
									
									
								
							
							
						
						
									
										101
									
								
								ffmpeg_filter.c
									
									
									
									
									
								
							@@ -37,23 +37,21 @@
 | 
			
		||||
#include "libavutil/imgutils.h"
 | 
			
		||||
#include "libavutil/samplefmt.h"
 | 
			
		||||
 | 
			
		||||
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, AVCodec *codec, enum AVPixelFormat target)
 | 
			
		||||
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum AVPixelFormat target)
 | 
			
		||||
{
 | 
			
		||||
    if (codec && codec->pix_fmts) {
 | 
			
		||||
        const enum AVPixelFormat *p = codec->pix_fmts;
 | 
			
		||||
        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
 | 
			
		||||
        int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
 | 
			
		||||
        enum AVPixelFormat best= AV_PIX_FMT_NONE;
 | 
			
		||||
        static const enum AVPixelFormat mjpeg_formats[] =
 | 
			
		||||
            { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
 | 
			
		||||
        static const enum AVPixelFormat ljpeg_formats[] =
 | 
			
		||||
            { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
 | 
			
		||||
              AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
 | 
			
		||||
        const enum AVPixelFormat mjpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
 | 
			
		||||
        const enum AVPixelFormat ljpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
 | 
			
		||||
                                                     AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
 | 
			
		||||
 | 
			
		||||
        if (enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
 | 
			
		||||
            if (enc_ctx->codec_id == AV_CODEC_ID_MJPEG) {
 | 
			
		||||
        if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
 | 
			
		||||
            if (st->codec->codec_id == AV_CODEC_ID_MJPEG) {
 | 
			
		||||
                p = mjpeg_formats;
 | 
			
		||||
            } else if (enc_ctx->codec_id == AV_CODEC_ID_LJPEG) {
 | 
			
		||||
            } else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) {
 | 
			
		||||
                p =ljpeg_formats;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
@@ -99,21 +97,21 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec)
 | 
			
		||||
 | 
			
		||||
static char *choose_pix_fmts(OutputStream *ost)
 | 
			
		||||
{
 | 
			
		||||
    AVDictionaryEntry *strict_dict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
 | 
			
		||||
    AVDictionaryEntry *strict_dict = av_dict_get(ost->opts, "strict", NULL, 0);
 | 
			
		||||
    if (strict_dict)
 | 
			
		||||
        // used by choose_pixel_fmt() and below
 | 
			
		||||
        av_opt_set(ost->enc_ctx, "strict", strict_dict->value, 0);
 | 
			
		||||
        av_opt_set(ost->st->codec, "strict", strict_dict->value, 0);
 | 
			
		||||
 | 
			
		||||
     if (ost->keep_pix_fmt) {
 | 
			
		||||
        if (ost->filter)
 | 
			
		||||
            avfilter_graph_set_auto_convert(ost->filter->graph->graph,
 | 
			
		||||
                                            AVFILTER_AUTO_CONVERT_NONE);
 | 
			
		||||
        if (ost->enc_ctx->pix_fmt == AV_PIX_FMT_NONE)
 | 
			
		||||
        if (ost->st->codec->pix_fmt == AV_PIX_FMT_NONE)
 | 
			
		||||
            return NULL;
 | 
			
		||||
        return av_strdup(av_get_pix_fmt_name(ost->enc_ctx->pix_fmt));
 | 
			
		||||
        return av_strdup(av_get_pix_fmt_name(ost->st->codec->pix_fmt));
 | 
			
		||||
    }
 | 
			
		||||
    if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
 | 
			
		||||
        return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc_ctx, ost->enc, ost->enc_ctx->pix_fmt)));
 | 
			
		||||
    if (ost->st->codec->pix_fmt != AV_PIX_FMT_NONE) {
 | 
			
		||||
        return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc, ost->st->codec->pix_fmt)));
 | 
			
		||||
    } else if (ost->enc && ost->enc->pix_fmts) {
 | 
			
		||||
        const enum AVPixelFormat *p;
 | 
			
		||||
        AVIOContext *s = NULL;
 | 
			
		||||
@@ -124,10 +122,10 @@ static char *choose_pix_fmts(OutputStream *ost)
 | 
			
		||||
            exit_program(1);
 | 
			
		||||
 | 
			
		||||
        p = ost->enc->pix_fmts;
 | 
			
		||||
        if (ost->enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
 | 
			
		||||
            if (ost->enc_ctx->codec_id == AV_CODEC_ID_MJPEG) {
 | 
			
		||||
        if (ost->st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
 | 
			
		||||
            if (ost->st->codec->codec_id == AV_CODEC_ID_MJPEG) {
 | 
			
		||||
                p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
 | 
			
		||||
            } else if (ost->enc_ctx->codec_id == AV_CODEC_ID_LJPEG) {
 | 
			
		||||
            } else if (ost->st->codec->codec_id == AV_CODEC_ID_LJPEG) {
 | 
			
		||||
                p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
 | 
			
		||||
                                                    AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
 | 
			
		||||
            }
 | 
			
		||||
@@ -149,8 +147,8 @@ static char *choose_pix_fmts(OutputStream *ost)
 | 
			
		||||
#define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name)           \
 | 
			
		||||
static char *choose_ ## var ## s(OutputStream *ost)                            \
 | 
			
		||||
{                                                                              \
 | 
			
		||||
    if (ost->enc_ctx->var != none) {                                           \
 | 
			
		||||
        get_name(ost->enc_ctx->var);                                           \
 | 
			
		||||
    if (ost->st->codec->var != none) {                                         \
 | 
			
		||||
        get_name(ost->st->codec->var);                                         \
 | 
			
		||||
        return av_strdup(name);                                                \
 | 
			
		||||
    } else if (ost->enc && ost->enc->supported_list) {                         \
 | 
			
		||||
        const type *p;                                                         \
 | 
			
		||||
@@ -262,7 +260,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
 | 
			
		||||
        /* find the first unused stream of corresponding type */
 | 
			
		||||
        for (i = 0; i < nb_input_streams; i++) {
 | 
			
		||||
            ist = input_streams[i];
 | 
			
		||||
            if (ist->dec_ctx->codec_type == type && ist->discard)
 | 
			
		||||
            if (ist->st->codec->codec_type == type && ist->discard)
 | 
			
		||||
                break;
 | 
			
		||||
        }
 | 
			
		||||
        if (i == nb_input_streams) {
 | 
			
		||||
@@ -275,7 +273,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
 | 
			
		||||
    av_assert0(ist);
 | 
			
		||||
 | 
			
		||||
    ist->discard         = 0;
 | 
			
		||||
    ist->decoding_needed |= DECODING_FOR_FILTER;
 | 
			
		||||
    ist->decoding_needed++;
 | 
			
		||||
    ist->st->discard = AVDISCARD_NONE;
 | 
			
		||||
 | 
			
		||||
    GROW_ARRAY(fg->inputs, fg->nb_inputs);
 | 
			
		||||
@@ -344,7 +342,7 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
 | 
			
		||||
    char *pix_fmts;
 | 
			
		||||
    OutputStream *ost = ofilter->ost;
 | 
			
		||||
    OutputFile    *of = output_files[ost->file_index];
 | 
			
		||||
    AVCodecContext *codec = ost->enc_ctx;
 | 
			
		||||
    AVCodecContext *codec = ost->st->codec;
 | 
			
		||||
    AVFilterContext *last_filter = out->filter_ctx;
 | 
			
		||||
    int pad_idx = out->pad_idx;
 | 
			
		||||
    int ret;
 | 
			
		||||
@@ -434,7 +432,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
 | 
			
		||||
{
 | 
			
		||||
    OutputStream *ost = ofilter->ost;
 | 
			
		||||
    OutputFile    *of = output_files[ost->file_index];
 | 
			
		||||
    AVCodecContext *codec  = ost->enc_ctx;
 | 
			
		||||
    AVCodecContext *codec  = ost->st->codec;
 | 
			
		||||
    AVFilterContext *last_filter = out->filter_ctx;
 | 
			
		||||
    int pad_idx = out->pad_idx;
 | 
			
		||||
    char *sample_fmts, *sample_rates, *channel_layouts;
 | 
			
		||||
@@ -595,8 +593,8 @@ static int sub2video_prepare(InputStream *ist)
 | 
			
		||||
    /* Compute the size of the canvas for the subtitles stream.
 | 
			
		||||
       If the subtitles codec has set a size, use it. Otherwise use the
 | 
			
		||||
       maximum dimensions of the video streams in the same file. */
 | 
			
		||||
    w = ist->dec_ctx->width;
 | 
			
		||||
    h = ist->dec_ctx->height;
 | 
			
		||||
    w = ist->st->codec->width;
 | 
			
		||||
    h = ist->st->codec->height;
 | 
			
		||||
    if (!(w && h)) {
 | 
			
		||||
        for (i = 0; i < avf->nb_streams; i++) {
 | 
			
		||||
            if (avf->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
 | 
			
		||||
@@ -610,12 +608,12 @@ static int sub2video_prepare(InputStream *ist)
 | 
			
		||||
        }
 | 
			
		||||
        av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
 | 
			
		||||
    }
 | 
			
		||||
    ist->sub2video.w = ist->dec_ctx->width  = ist->resample_width  = w;
 | 
			
		||||
    ist->sub2video.h = ist->dec_ctx->height = ist->resample_height = h;
 | 
			
		||||
    ist->sub2video.w = ist->st->codec->width  = ist->resample_width  = w;
 | 
			
		||||
    ist->sub2video.h = ist->st->codec->height = ist->resample_height = h;
 | 
			
		||||
 | 
			
		||||
    /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
 | 
			
		||||
       palettes for all rectangles are identical or compatible */
 | 
			
		||||
    ist->resample_pix_fmt = ist->dec_ctx->pix_fmt = AV_PIX_FMT_RGB32;
 | 
			
		||||
    ist->resample_pix_fmt = ist->st->codec->pix_fmt = AV_PIX_FMT_RGB32;
 | 
			
		||||
 | 
			
		||||
    ist->sub2video.frame = av_frame_alloc();
 | 
			
		||||
    if (!ist->sub2video.frame)
 | 
			
		||||
@@ -638,7 +636,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
 | 
			
		||||
    char name[255];
 | 
			
		||||
    int ret, pad_idx = 0;
 | 
			
		||||
 | 
			
		||||
    if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
 | 
			
		||||
    if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
 | 
			
		||||
        return AVERROR(EINVAL);
 | 
			
		||||
    }
 | 
			
		||||
@@ -646,7 +644,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
 | 
			
		||||
    if (!fr.num)
 | 
			
		||||
        fr = av_guess_frame_rate(input_files[ist->file_index]->ctx, ist->st, NULL);
 | 
			
		||||
 | 
			
		||||
    if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
 | 
			
		||||
    if (ist->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
 | 
			
		||||
        ret = sub2video_prepare(ist);
 | 
			
		||||
        if (ret < 0)
 | 
			
		||||
            return ret;
 | 
			
		||||
@@ -654,7 +652,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
 | 
			
		||||
 | 
			
		||||
    sar = ist->st->sample_aspect_ratio.num ?
 | 
			
		||||
          ist->st->sample_aspect_ratio :
 | 
			
		||||
          ist->dec_ctx->sample_aspect_ratio;
 | 
			
		||||
          ist->st->codec->sample_aspect_ratio;
 | 
			
		||||
    if(!sar.den)
 | 
			
		||||
        sar = (AVRational){0,1};
 | 
			
		||||
    av_bprint_init(&args, 0, 1);
 | 
			
		||||
@@ -664,7 +662,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
 | 
			
		||||
             ist->resample_height,
 | 
			
		||||
             ist->hwaccel_retrieve_data ? ist->hwaccel_retrieved_pix_fmt : ist->resample_pix_fmt,
 | 
			
		||||
             tb.num, tb.den, sar.num, sar.den,
 | 
			
		||||
             SWS_BILINEAR + ((ist->dec_ctx->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
 | 
			
		||||
             SWS_BILINEAR + ((ist->st->codec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
 | 
			
		||||
    if (fr.num && fr.den)
 | 
			
		||||
        av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
 | 
			
		||||
    snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
 | 
			
		||||
@@ -732,21 +730,21 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
 | 
			
		||||
    char name[255];
 | 
			
		||||
    int ret, pad_idx = 0;
 | 
			
		||||
 | 
			
		||||
    if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
 | 
			
		||||
    if (ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
 | 
			
		||||
        return AVERROR(EINVAL);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
 | 
			
		||||
    av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
 | 
			
		||||
             1, ist->dec_ctx->sample_rate,
 | 
			
		||||
             ist->dec_ctx->sample_rate,
 | 
			
		||||
             av_get_sample_fmt_name(ist->dec_ctx->sample_fmt));
 | 
			
		||||
    if (ist->dec_ctx->channel_layout)
 | 
			
		||||
             1, ist->st->codec->sample_rate,
 | 
			
		||||
             ist->st->codec->sample_rate,
 | 
			
		||||
             av_get_sample_fmt_name(ist->st->codec->sample_fmt));
 | 
			
		||||
    if (ist->st->codec->channel_layout)
 | 
			
		||||
        av_bprintf(&args, ":channel_layout=0x%"PRIx64,
 | 
			
		||||
                   ist->dec_ctx->channel_layout);
 | 
			
		||||
                   ist->st->codec->channel_layout);
 | 
			
		||||
    else
 | 
			
		||||
        av_bprintf(&args, ":channels=%d", ist->dec_ctx->channels);
 | 
			
		||||
        av_bprintf(&args, ":channels=%d", ist->st->codec->channels);
 | 
			
		||||
    snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
 | 
			
		||||
             ist->file_index, ist->st->index);
 | 
			
		||||
 | 
			
		||||
@@ -830,12 +828,6 @@ static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
 | 
			
		||||
    av_freep(&ifilter->name);
 | 
			
		||||
    DESCRIBE_FILTER_LINK(ifilter, in, 1);
 | 
			
		||||
 | 
			
		||||
    if (!ifilter->ist->dec) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR,
 | 
			
		||||
               "No decoder for stream #%d:%d, filtering impossible\n",
 | 
			
		||||
               ifilter->ist->file_index, ifilter->ist->st->index);
 | 
			
		||||
        return AVERROR_DECODER_NOT_FOUND;
 | 
			
		||||
    }
 | 
			
		||||
    switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
 | 
			
		||||
    case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
 | 
			
		||||
    case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
 | 
			
		||||
@@ -880,7 +872,7 @@ int configure_filtergraph(FilterGraph *fg)
 | 
			
		||||
            args[strlen(args) - 1] = '\0';
 | 
			
		||||
        fg->graph->resample_lavr_opts = av_strdup(args);
 | 
			
		||||
 | 
			
		||||
        e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
 | 
			
		||||
        e = av_dict_get(ost->opts, "threads", NULL, 0);
 | 
			
		||||
        if (e)
 | 
			
		||||
            av_opt_set(fg->graph, "threads", e->value, 0);
 | 
			
		||||
    }
 | 
			
		||||
@@ -898,11 +890,8 @@ int configure_filtergraph(FilterGraph *fg)
 | 
			
		||||
        init_input_filter(fg, cur);
 | 
			
		||||
 | 
			
		||||
    for (cur = inputs, i = 0; cur; cur = cur->next, i++)
 | 
			
		||||
        if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
 | 
			
		||||
            avfilter_inout_free(&inputs);
 | 
			
		||||
            avfilter_inout_free(&outputs);
 | 
			
		||||
        if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0)
 | 
			
		||||
            return ret;
 | 
			
		||||
        }
 | 
			
		||||
    avfilter_inout_free(&inputs);
 | 
			
		||||
 | 
			
		||||
    if (!init || simple) {
 | 
			
		||||
@@ -928,16 +917,6 @@ int configure_filtergraph(FilterGraph *fg)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fg->reconfiguration = 1;
 | 
			
		||||
 | 
			
		||||
    for (i = 0; i < fg->nb_outputs; i++) {
 | 
			
		||||
        OutputStream *ost = fg->outputs[i]->ost;
 | 
			
		||||
        if (ost &&
 | 
			
		||||
            ost->enc->type == AVMEDIA_TYPE_AUDIO &&
 | 
			
		||||
            !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
 | 
			
		||||
            av_buffersink_set_frame_size(ost->filter->filter,
 | 
			
		||||
                                         ost->enc_ctx->frame_size);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										250
									
								
								ffmpeg_opt.c
									
									
									
									
									
								
							
							
						
						
									
										250
									
								
								ffmpeg_opt.c
									
									
									
									
									
								
							@@ -66,12 +66,6 @@
 | 
			
		||||
const HWAccel hwaccels[] = {
 | 
			
		||||
#if HAVE_VDPAU_X11
 | 
			
		||||
    { "vdpau", vdpau_init, HWACCEL_VDPAU, AV_PIX_FMT_VDPAU },
 | 
			
		||||
#endif
 | 
			
		||||
#if HAVE_DXVA2_LIB
 | 
			
		||||
    { "dxva2", dxva2_init, HWACCEL_DXVA2, AV_PIX_FMT_DXVA2_VLD },
 | 
			
		||||
#endif
 | 
			
		||||
#if CONFIG_VDA
 | 
			
		||||
    { "vda",   vda_init,   HWACCEL_VDA,   AV_PIX_FMT_VDA },
 | 
			
		||||
#endif
 | 
			
		||||
    { 0 },
 | 
			
		||||
};
 | 
			
		||||
@@ -104,6 +98,8 @@ float max_error_rate  = 2.0/3;
 | 
			
		||||
static int intra_only         = 0;
 | 
			
		||||
static int file_overwrite     = 0;
 | 
			
		||||
static int no_file_overwrite  = 0;
 | 
			
		||||
static int video_discard      = 0;
 | 
			
		||||
static int intra_dc_precision = 8;
 | 
			
		||||
static int do_psnr            = 0;
 | 
			
		||||
static int input_sync;
 | 
			
		||||
static int override_ffserver  = 0;
 | 
			
		||||
@@ -562,7 +558,7 @@ static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *
 | 
			
		||||
 * list of input streams. */
 | 
			
		||||
static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
 | 
			
		||||
{
 | 
			
		||||
    int i, ret;
 | 
			
		||||
    int i;
 | 
			
		||||
 | 
			
		||||
    for (i = 0; i < ic->nb_streams; i++) {
 | 
			
		||||
        AVStream *st = ic->streams[i];
 | 
			
		||||
@@ -571,8 +567,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
 | 
			
		||||
        char *framerate = NULL, *hwaccel = NULL, *hwaccel_device = NULL;
 | 
			
		||||
        char *codec_tag = NULL;
 | 
			
		||||
        char *next;
 | 
			
		||||
        char *discard_str = NULL;
 | 
			
		||||
        const AVOption *discard_opt = av_opt_find(dec, "skip_frame", NULL, 0, 0);
 | 
			
		||||
 | 
			
		||||
        if (!ist)
 | 
			
		||||
            exit_program(1);
 | 
			
		||||
@@ -597,33 +591,13 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        ist->dec = choose_decoder(o, ic, st);
 | 
			
		||||
        ist->decoder_opts = filter_codec_opts(o->g->codec_opts, ist->st->codec->codec_id, ic, st, ist->dec);
 | 
			
		||||
        ist->opts = filter_codec_opts(o->g->codec_opts, ist->st->codec->codec_id, ic, st, ist->dec);
 | 
			
		||||
 | 
			
		||||
        ist->reinit_filters = -1;
 | 
			
		||||
        MATCH_PER_STREAM_OPT(reinit_filters, i, ist->reinit_filters, ic, st);
 | 
			
		||||
 | 
			
		||||
        MATCH_PER_STREAM_OPT(discard, str, discard_str, ic, st);
 | 
			
		||||
        ist->user_set_discard = AVDISCARD_NONE;
 | 
			
		||||
        if (discard_str && av_opt_eval_int(dec, discard_opt, discard_str, &ist->user_set_discard) < 0) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Error parsing discard %s.\n",
 | 
			
		||||
                    discard_str);
 | 
			
		||||
            exit_program(1);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE;
 | 
			
		||||
 | 
			
		||||
        ist->dec_ctx = avcodec_alloc_context3(ist->dec);
 | 
			
		||||
        if (!ist->dec_ctx) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Error allocating the decoder context.\n");
 | 
			
		||||
            exit_program(1);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        ret = avcodec_copy_context(ist->dec_ctx, dec);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Error initializing the decoder context.\n");
 | 
			
		||||
            exit_program(1);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        switch (dec->codec_type) {
 | 
			
		||||
        case AVMEDIA_TYPE_VIDEO:
 | 
			
		||||
            if(!ist->dec)
 | 
			
		||||
@@ -632,9 +606,9 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
 | 
			
		||||
                dec->flags |= CODEC_FLAG_EMU_EDGE;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            ist->resample_height  = ist->dec_ctx->height;
 | 
			
		||||
            ist->resample_width   = ist->dec_ctx->width;
 | 
			
		||||
            ist->resample_pix_fmt = ist->dec_ctx->pix_fmt;
 | 
			
		||||
            ist->resample_height  = dec->height;
 | 
			
		||||
            ist->resample_width   = dec->width;
 | 
			
		||||
            ist->resample_pix_fmt = dec->pix_fmt;
 | 
			
		||||
 | 
			
		||||
            MATCH_PER_STREAM_OPT(frame_rates, str, framerate, ic, st);
 | 
			
		||||
            if (framerate && av_parse_video_rate(&ist->framerate,
 | 
			
		||||
@@ -688,10 +662,10 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
 | 
			
		||||
            MATCH_PER_STREAM_OPT(guess_layout_max, i, ist->guess_layout_max, ic, st);
 | 
			
		||||
            guess_input_channel_layout(ist);
 | 
			
		||||
 | 
			
		||||
            ist->resample_sample_fmt     = ist->dec_ctx->sample_fmt;
 | 
			
		||||
            ist->resample_sample_rate    = ist->dec_ctx->sample_rate;
 | 
			
		||||
            ist->resample_channels       = ist->dec_ctx->channels;
 | 
			
		||||
            ist->resample_channel_layout = ist->dec_ctx->channel_layout;
 | 
			
		||||
            ist->resample_sample_fmt     = dec->sample_fmt;
 | 
			
		||||
            ist->resample_sample_rate    = dec->sample_rate;
 | 
			
		||||
            ist->resample_channels       = dec->channels;
 | 
			
		||||
            ist->resample_channel_layout = dec->channel_layout;
 | 
			
		||||
 | 
			
		||||
            break;
 | 
			
		||||
        case AVMEDIA_TYPE_DATA:
 | 
			
		||||
@@ -702,7 +676,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
 | 
			
		||||
            MATCH_PER_STREAM_OPT(fix_sub_duration, i, ist->fix_sub_duration, ic, st);
 | 
			
		||||
            MATCH_PER_STREAM_OPT(canvas_sizes, str, canvas_size, ic, st);
 | 
			
		||||
            if (canvas_size &&
 | 
			
		||||
                av_parse_video_size(&ist->dec_ctx->width, &ist->dec_ctx->height, canvas_size) < 0) {
 | 
			
		||||
                av_parse_video_size(&dec->width, &dec->height, canvas_size) < 0) {
 | 
			
		||||
                av_log(NULL, AV_LOG_FATAL, "Invalid canvas size: %s.\n", canvas_size);
 | 
			
		||||
                exit_program(1);
 | 
			
		||||
            }
 | 
			
		||||
@@ -785,6 +759,7 @@ static int open_input_file(OptionsContext *o, const char *filename)
 | 
			
		||||
    AVInputFormat *file_iformat = NULL;
 | 
			
		||||
    int err, i, ret;
 | 
			
		||||
    int64_t timestamp;
 | 
			
		||||
    uint8_t buf[128];
 | 
			
		||||
    AVDictionary **opts;
 | 
			
		||||
    AVDictionary *unused_opts = NULL;
 | 
			
		||||
    AVDictionaryEntry *e = NULL;
 | 
			
		||||
@@ -813,7 +788,8 @@ static int open_input_file(OptionsContext *o, const char *filename)
 | 
			
		||||
        exit_program(1);
 | 
			
		||||
    }
 | 
			
		||||
    if (o->nb_audio_sample_rate) {
 | 
			
		||||
        av_dict_set_int(&o->g->format_opts, "sample_rate", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i, 0);
 | 
			
		||||
        snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
 | 
			
		||||
        av_dict_set(&o->g->format_opts, "sample_rate", buf, 0);
 | 
			
		||||
    }
 | 
			
		||||
    if (o->nb_audio_channels) {
 | 
			
		||||
        /* because we set audio_channels based on both the "ac" and
 | 
			
		||||
@@ -822,7 +798,9 @@ static int open_input_file(OptionsContext *o, const char *filename)
 | 
			
		||||
        if (file_iformat && file_iformat->priv_class &&
 | 
			
		||||
            av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
 | 
			
		||||
                        AV_OPT_SEARCH_FAKE_OBJ)) {
 | 
			
		||||
            av_dict_set_int(&o->g->format_opts, "channels", o->audio_channels[o->nb_audio_channels - 1].u.i, 0);
 | 
			
		||||
            snprintf(buf, sizeof(buf), "%d",
 | 
			
		||||
                     o->audio_channels[o->nb_audio_channels - 1].u.i);
 | 
			
		||||
            av_dict_set(&o->g->format_opts, "channels", buf, 0);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    if (o->nb_frame_rates) {
 | 
			
		||||
@@ -868,7 +846,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
 | 
			
		||||
        print_error(filename, err);
 | 
			
		||||
        exit_program(1);
 | 
			
		||||
    }
 | 
			
		||||
    remove_avoptions(&o->g->format_opts, o->g->codec_opts);
 | 
			
		||||
    assert_avoptions(o->g->format_opts);
 | 
			
		||||
 | 
			
		||||
    /* apply forced codec ids */
 | 
			
		||||
@@ -884,10 +861,8 @@ static int open_input_file(OptionsContext *o, const char *filename)
 | 
			
		||||
    ret = avformat_find_stream_info(ic, opts);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
 | 
			
		||||
        if (ic->nb_streams == 0) {
 | 
			
		||||
            avformat_close_input(&ic);
 | 
			
		||||
            exit_program(1);
 | 
			
		||||
        }
 | 
			
		||||
        avformat_close_input(&ic);
 | 
			
		||||
        exit_program(1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    timestamp = (o->start_time == AV_NOPTS_VALUE) ? 0 : o->start_time;
 | 
			
		||||
@@ -930,7 +905,7 @@ static int open_input_file(OptionsContext *o, const char *filename)
 | 
			
		||||
    unused_opts = strip_specifiers(o->g->codec_opts);
 | 
			
		||||
    for (i = f->ist_index; i < nb_input_streams; i++) {
 | 
			
		||||
        e = NULL;
 | 
			
		||||
        while ((e = av_dict_get(input_streams[i]->decoder_opts, "", e,
 | 
			
		||||
        while ((e = av_dict_get(input_streams[i]->opts, "", e,
 | 
			
		||||
                                AV_DICT_IGNORE_SUFFIX)))
 | 
			
		||||
            av_dict_set(&unused_opts, e->key, NULL, 0);
 | 
			
		||||
    }
 | 
			
		||||
@@ -1072,19 +1047,11 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
 | 
			
		||||
    ost->st         = st;
 | 
			
		||||
    st->codec->codec_type = type;
 | 
			
		||||
    choose_encoder(o, oc, ost);
 | 
			
		||||
 | 
			
		||||
    ost->enc_ctx = avcodec_alloc_context3(ost->enc);
 | 
			
		||||
    if (!ost->enc_ctx) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Error allocating the encoding context.\n");
 | 
			
		||||
        exit_program(1);
 | 
			
		||||
    }
 | 
			
		||||
    ost->enc_ctx->codec_type = type;
 | 
			
		||||
 | 
			
		||||
    if (ost->enc) {
 | 
			
		||||
        AVIOContext *s = NULL;
 | 
			
		||||
        char *buf = NULL, *arg = NULL, *preset = NULL;
 | 
			
		||||
 | 
			
		||||
        ost->encoder_opts  = filter_codec_opts(o->g->codec_opts, ost->enc->id, oc, st, ost->enc);
 | 
			
		||||
        ost->opts  = filter_codec_opts(o->g->codec_opts, ost->enc->id, oc, st, ost->enc);
 | 
			
		||||
 | 
			
		||||
        MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
 | 
			
		||||
        if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
 | 
			
		||||
@@ -1099,7 +1066,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
 | 
			
		||||
                    exit_program(1);
 | 
			
		||||
                }
 | 
			
		||||
                *arg++ = 0;
 | 
			
		||||
                av_dict_set(&ost->encoder_opts, buf, arg, AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
                av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
                av_free(buf);
 | 
			
		||||
            } while (!s->eof_reached);
 | 
			
		||||
            avio_close(s);
 | 
			
		||||
@@ -1111,9 +1078,12 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
 | 
			
		||||
            exit_program(1);
 | 
			
		||||
        }
 | 
			
		||||
    } else {
 | 
			
		||||
        ost->encoder_opts = filter_codec_opts(o->g->codec_opts, AV_CODEC_ID_NONE, oc, st, NULL);
 | 
			
		||||
        ost->opts = filter_codec_opts(o->g->codec_opts, AV_CODEC_ID_NONE, oc, st, NULL);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    avcodec_get_context_defaults3(st->codec, ost->enc);
 | 
			
		||||
    st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy
 | 
			
		||||
 | 
			
		||||
    ost->max_frames = INT64_MAX;
 | 
			
		||||
    MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
 | 
			
		||||
    for (i = 0; i<o->nb_max_frames; i++) {
 | 
			
		||||
@@ -1149,17 +1119,17 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
 | 
			
		||||
        uint32_t tag = strtol(codec_tag, &next, 0);
 | 
			
		||||
        if (*next)
 | 
			
		||||
            tag = AV_RL32(codec_tag);
 | 
			
		||||
        ost->enc_ctx->codec_tag = tag;
 | 
			
		||||
        st->codec->codec_tag = tag;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
 | 
			
		||||
    if (qscale >= 0) {
 | 
			
		||||
        ost->enc_ctx->flags |= CODEC_FLAG_QSCALE;
 | 
			
		||||
        ost->enc_ctx->global_quality = FF_QP2LAMBDA * qscale;
 | 
			
		||||
        st->codec->flags |= CODEC_FLAG_QSCALE;
 | 
			
		||||
        st->codec->global_quality = FF_QP2LAMBDA * qscale;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
 | 
			
		||||
        ost->enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
 | 
			
		||||
        st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
 | 
			
		||||
 | 
			
		||||
    av_opt_get_int(o->g->sws_opts, "sws_flags", 0, &ost->sws_flags);
 | 
			
		||||
 | 
			
		||||
@@ -1173,7 +1143,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
 | 
			
		||||
    if (source_index >= 0) {
 | 
			
		||||
        ost->sync_ist = input_streams[source_index];
 | 
			
		||||
        input_streams[source_index]->discard = 0;
 | 
			
		||||
        input_streams[source_index]->st->discard = input_streams[source_index]->user_set_discard;
 | 
			
		||||
        input_streams[source_index]->st->discard = AVDISCARD_NONE;
 | 
			
		||||
    }
 | 
			
		||||
    ost->last_mux_dts = AV_NOPTS_VALUE;
 | 
			
		||||
 | 
			
		||||
@@ -1269,7 +1239,7 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
 | 
			
		||||
 | 
			
		||||
    ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO, source_index);
 | 
			
		||||
    st  = ost->st;
 | 
			
		||||
    video_enc = ost->enc_ctx;
 | 
			
		||||
    video_enc = st->codec;
 | 
			
		||||
 | 
			
		||||
    MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
 | 
			
		||||
    if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
 | 
			
		||||
@@ -1374,6 +1344,7 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
 | 
			
		||||
            if (p) p++;
 | 
			
		||||
        }
 | 
			
		||||
        video_enc->rc_override_count = i;
 | 
			
		||||
        video_enc->intra_dc_precision = intra_dc_precision - 8;
 | 
			
		||||
 | 
			
		||||
        if (do_psnr)
 | 
			
		||||
            video_enc->flags|= CODEC_FLAG_PSNR;
 | 
			
		||||
@@ -1383,11 +1354,11 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
 | 
			
		||||
        if (do_pass) {
 | 
			
		||||
            if (do_pass & 1) {
 | 
			
		||||
                video_enc->flags |= CODEC_FLAG_PASS1;
 | 
			
		||||
                av_dict_set(&ost->encoder_opts, "flags", "+pass1", AV_DICT_APPEND);
 | 
			
		||||
                av_dict_set(&ost->opts, "flags", "+pass1", AV_DICT_APPEND);
 | 
			
		||||
            }
 | 
			
		||||
            if (do_pass & 2) {
 | 
			
		||||
                video_enc->flags |= CODEC_FLAG_PASS2;
 | 
			
		||||
                av_dict_set(&ost->encoder_opts, "flags", "+pass2", AV_DICT_APPEND);
 | 
			
		||||
                av_dict_set(&ost->opts, "flags", "+pass2", AV_DICT_APPEND);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
@@ -1429,7 +1400,7 @@ static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc, in
 | 
			
		||||
    ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO, source_index);
 | 
			
		||||
    st  = ost->st;
 | 
			
		||||
 | 
			
		||||
    audio_enc = ost->enc_ctx;
 | 
			
		||||
    audio_enc = st->codec;
 | 
			
		||||
    audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
 | 
			
		||||
 | 
			
		||||
    MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st);
 | 
			
		||||
@@ -1459,29 +1430,15 @@ static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc, in
 | 
			
		||||
        /* check for channel mapping for this audio stream */
 | 
			
		||||
        for (n = 0; n < o->nb_audio_channel_maps; n++) {
 | 
			
		||||
            AudioChannelMap *map = &o->audio_channel_maps[n];
 | 
			
		||||
            if ((map->ofile_idx   == -1 || ost->file_index == map->ofile_idx) &&
 | 
			
		||||
            InputStream *ist = input_streams[ost->source_index];
 | 
			
		||||
            if ((map->channel_idx == -1 || (ist->file_index == map->file_idx && ist->st->index == map->stream_idx)) &&
 | 
			
		||||
                (map->ofile_idx   == -1 || ost->file_index == map->ofile_idx) &&
 | 
			
		||||
                (map->ostream_idx == -1 || ost->st->index  == map->ostream_idx)) {
 | 
			
		||||
                InputStream *ist;
 | 
			
		||||
 | 
			
		||||
                if (map->channel_idx == -1) {
 | 
			
		||||
                    ist = NULL;
 | 
			
		||||
                } else if (ost->source_index < 0) {
 | 
			
		||||
                    av_log(NULL, AV_LOG_FATAL, "Cannot determine input stream for channel mapping %d.%d\n",
 | 
			
		||||
                           ost->file_index, ost->st->index);
 | 
			
		||||
                    continue;
 | 
			
		||||
                } else {
 | 
			
		||||
                    ist = input_streams[ost->source_index];
 | 
			
		||||
                }
 | 
			
		||||
 | 
			
		||||
                if (!ist || (ist->file_index == map->file_idx && ist->st->index == map->stream_idx)) {
 | 
			
		||||
                    if (av_reallocp_array(&ost->audio_channels_map,
 | 
			
		||||
                                          ost->audio_channels_mapped + 1,
 | 
			
		||||
                                          sizeof(*ost->audio_channels_map)
 | 
			
		||||
                                          ) < 0 )
 | 
			
		||||
                        exit_program(1);
 | 
			
		||||
 | 
			
		||||
                if (ost->audio_channels_mapped < FF_ARRAY_ELEMS(ost->audio_channels_map))
 | 
			
		||||
                    ost->audio_channels_map[ost->audio_channels_mapped++] = map->channel_idx;
 | 
			
		||||
                }
 | 
			
		||||
                else
 | 
			
		||||
                    av_log(NULL, AV_LOG_FATAL, "Max channel mapping for output %d.%d reached\n",
 | 
			
		||||
                           ost->file_index, ost->st->index);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
@@ -1521,7 +1478,7 @@ static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc,
 | 
			
		||||
 | 
			
		||||
    ost = new_output_stream(o, oc, AVMEDIA_TYPE_SUBTITLE, source_index);
 | 
			
		||||
    st  = ost->st;
 | 
			
		||||
    subtitle_enc = ost->enc_ctx;
 | 
			
		||||
    subtitle_enc = st->codec;
 | 
			
		||||
 | 
			
		||||
    subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
 | 
			
		||||
 | 
			
		||||
@@ -1639,8 +1596,7 @@ static int read_ffserver_streams(OptionsContext *o, AVFormatContext *s, const ch
 | 
			
		||||
        if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && !ost->stream_copy)
 | 
			
		||||
            choose_sample_fmt(st, codec);
 | 
			
		||||
        else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && !ost->stream_copy)
 | 
			
		||||
            choose_pixel_fmt(st, st->codec, codec, st->codec->pix_fmt);
 | 
			
		||||
        avcodec_copy_context(ost->enc_ctx, st->codec);
 | 
			
		||||
            choose_pixel_fmt(st, codec, st->codec->pix_fmt);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    avformat_close_input(&ic);
 | 
			
		||||
@@ -1813,7 +1769,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
 | 
			
		||||
                    if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) ost->avfilter = av_strdup("anull");
 | 
			
		||||
                    if(ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) ost->avfilter = av_strdup("null");
 | 
			
		||||
                    ist->discard = 0;
 | 
			
		||||
                    ist->st->discard = ist->user_set_discard;
 | 
			
		||||
                    ist->st->discard = AVDISCARD_NONE;
 | 
			
		||||
                    break;
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
@@ -1827,7 +1783,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
 | 
			
		||||
        /* pick the "best" stream of each type */
 | 
			
		||||
 | 
			
		||||
        /* video: highest resolution */
 | 
			
		||||
        if (!o->video_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_VIDEO) != AV_CODEC_ID_NONE) {
 | 
			
		||||
        if (!o->video_disable && oc->oformat->video_codec != AV_CODEC_ID_NONE) {
 | 
			
		||||
            int area = 0, idx = -1;
 | 
			
		||||
            int qcr = avformat_query_codec(oc->oformat, oc->oformat->video_codec, 0);
 | 
			
		||||
            for (i = 0; i < nb_input_streams; i++) {
 | 
			
		||||
@@ -1849,7 +1805,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        /* audio: most channels */
 | 
			
		||||
        if (!o->audio_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_AUDIO) != AV_CODEC_ID_NONE) {
 | 
			
		||||
        if (!o->audio_disable && oc->oformat->audio_codec != AV_CODEC_ID_NONE) {
 | 
			
		||||
            int channels = 0, idx = -1;
 | 
			
		||||
            for (i = 0; i < nb_input_streams; i++) {
 | 
			
		||||
                ist = input_streams[i];
 | 
			
		||||
@@ -1868,27 +1824,8 @@ static int open_output_file(OptionsContext *o, const char *filename)
 | 
			
		||||
        if (!o->subtitle_disable && (avcodec_find_encoder(oc->oformat->subtitle_codec) || subtitle_codec_name)) {
 | 
			
		||||
            for (i = 0; i < nb_input_streams; i++)
 | 
			
		||||
                if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
 | 
			
		||||
                    AVCodecDescriptor const *input_descriptor =
 | 
			
		||||
                        avcodec_descriptor_get(input_streams[i]->st->codec->codec_id);
 | 
			
		||||
                    AVCodecDescriptor const *output_descriptor = NULL;
 | 
			
		||||
                    AVCodec const *output_codec =
 | 
			
		||||
                        avcodec_find_encoder(oc->oformat->subtitle_codec);
 | 
			
		||||
                    int input_props = 0, output_props = 0;
 | 
			
		||||
                    if (output_codec)
 | 
			
		||||
                        output_descriptor = avcodec_descriptor_get(output_codec->id);
 | 
			
		||||
                    if (input_descriptor)
 | 
			
		||||
                        input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
 | 
			
		||||
                    if (output_descriptor)
 | 
			
		||||
                        output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
 | 
			
		||||
                    if (subtitle_codec_name ||
 | 
			
		||||
                        input_props & output_props ||
 | 
			
		||||
                        // Map dvb teletext which has neither property to any output subtitle encoder
 | 
			
		||||
                        input_descriptor && output_descriptor &&
 | 
			
		||||
                        (!input_descriptor->props ||
 | 
			
		||||
                         !output_descriptor->props)) {
 | 
			
		||||
                        new_subtitle_stream(o, oc, i);
 | 
			
		||||
                        break;
 | 
			
		||||
                    }
 | 
			
		||||
                    new_subtitle_stream(o, oc, i);
 | 
			
		||||
                    break;
 | 
			
		||||
                }
 | 
			
		||||
        }
 | 
			
		||||
        /* do something with data? */
 | 
			
		||||
@@ -2000,7 +1937,7 @@ loop_end:
 | 
			
		||||
    unused_opts = strip_specifiers(o->g->codec_opts);
 | 
			
		||||
    for (i = of->ost_index; i < nb_output_streams; i++) {
 | 
			
		||||
        e = NULL;
 | 
			
		||||
        while ((e = av_dict_get(output_streams[i]->encoder_opts, "", e,
 | 
			
		||||
        while ((e = av_dict_get(output_streams[i]->opts, "", e,
 | 
			
		||||
                                AV_DICT_IGNORE_SUFFIX)))
 | 
			
		||||
            av_dict_set(&unused_opts, e->key, NULL, 0);
 | 
			
		||||
    }
 | 
			
		||||
@@ -2010,13 +1947,8 @@ loop_end:
 | 
			
		||||
        const AVClass *class = avcodec_get_class();
 | 
			
		||||
        const AVOption *option = av_opt_find(&class, e->key, NULL, 0,
 | 
			
		||||
                                             AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
 | 
			
		||||
        const AVClass *fclass = avformat_get_class();
 | 
			
		||||
        const AVOption *foption = av_opt_find(&fclass, e->key, NULL, 0,
 | 
			
		||||
                                              AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
 | 
			
		||||
        if (!option || foption)
 | 
			
		||||
        if (!option)
 | 
			
		||||
            continue;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        if (!(option->flags & AV_OPT_FLAG_ENCODING_PARAM)) {
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR, "Codec AVOption %s (%s) specified for "
 | 
			
		||||
                   "output file #%d (%s) is not an encoding option.\n", e->key,
 | 
			
		||||
@@ -2061,7 +1993,9 @@ loop_end:
 | 
			
		||||
        assert_file_overwrite(filename);
 | 
			
		||||
 | 
			
		||||
    if (o->mux_preload) {
 | 
			
		||||
        av_dict_set_int(&of->opts, "preload", o->mux_preload*AV_TIME_BASE, 0);
 | 
			
		||||
        uint8_t buf[64];
 | 
			
		||||
        snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
 | 
			
		||||
        av_dict_set(&of->opts, "preload", buf, 0);
 | 
			
		||||
    }
 | 
			
		||||
    oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
 | 
			
		||||
 | 
			
		||||
@@ -2114,8 +2048,6 @@ loop_end:
 | 
			
		||||
                continue;
 | 
			
		||||
            ist = input_streams[output_streams[i]->source_index];
 | 
			
		||||
            av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
            if (!output_streams[i]->stream_copy)
 | 
			
		||||
                av_dict_set(&output_streams[i]->st->metadata, "encoder", NULL, 0);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
    /* process manually set metadata */
 | 
			
		||||
@@ -2187,8 +2119,7 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
            for (j = 0; j < nb_input_files; j++) {
 | 
			
		||||
                for (i = 0; i < input_files[j]->nb_streams; i++) {
 | 
			
		||||
                    AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
 | 
			
		||||
                    if (c->codec_type != AVMEDIA_TYPE_VIDEO ||
 | 
			
		||||
                        !c->time_base.num)
 | 
			
		||||
                    if (c->codec_type != AVMEDIA_TYPE_VIDEO)
 | 
			
		||||
                        continue;
 | 
			
		||||
                    fr = c->time_base.den * 1000 / c->time_base.num;
 | 
			
		||||
                    if (fr == 25000) {
 | 
			
		||||
@@ -2221,19 +2152,19 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
 | 
			
		||||
        parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
 | 
			
		||||
        parse_option(o, "r", frame_rates[norm], options);
 | 
			
		||||
        opt_default(NULL, "g", norm == PAL ? "15" : "18");
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
 | 
			
		||||
        opt_default(NULL, "b:v", "1150000");
 | 
			
		||||
        opt_default(NULL, "maxrate", "1150000");
 | 
			
		||||
        opt_default(NULL, "minrate", "1150000");
 | 
			
		||||
        opt_default(NULL, "bufsize", "327680"); // 40*1024*8;
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "b:v", "1150000", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "maxrate", "1150000", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "minrate", "1150000", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "bufsize", "327680", AV_DICT_DONT_OVERWRITE); // 40*1024*8;
 | 
			
		||||
 | 
			
		||||
        opt_default(NULL, "b:a", "224000");
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "b:a", "224000", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
        parse_option(o, "ar", "44100", options);
 | 
			
		||||
        parse_option(o, "ac", "2", options);
 | 
			
		||||
 | 
			
		||||
        opt_default(NULL, "packetsize", "2324");
 | 
			
		||||
        opt_default(NULL, "muxrate", "1411200"); // 2352 * 75 * 8;
 | 
			
		||||
        av_dict_set(&o->g->format_opts, "packetsize", "2324", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
        av_dict_set(&o->g->format_opts, "muxrate", "1411200", AV_DICT_DONT_OVERWRITE); // 2352 * 75 * 8;
 | 
			
		||||
 | 
			
		||||
        /* We have to offset the PTS, so that it is consistent with the SCR.
 | 
			
		||||
           SCR starts at 36000, but the first two packs contain only padding
 | 
			
		||||
@@ -2250,18 +2181,18 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
        parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
 | 
			
		||||
        parse_option(o, "r", frame_rates[norm], options);
 | 
			
		||||
        parse_option(o, "pix_fmt", "yuv420p", options);
 | 
			
		||||
        opt_default(NULL, "g", norm == PAL ? "15" : "18");
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
 | 
			
		||||
        opt_default(NULL, "b:v", "2040000");
 | 
			
		||||
        opt_default(NULL, "maxrate", "2516000");
 | 
			
		||||
        opt_default(NULL, "minrate", "0"); // 1145000;
 | 
			
		||||
        opt_default(NULL, "bufsize", "1835008"); // 224*1024*8;
 | 
			
		||||
        opt_default(NULL, "scan_offset", "1");
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "b:v", "2040000", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "maxrate", "2516000", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "minrate", "0", AV_DICT_DONT_OVERWRITE); // 1145000;
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "bufsize", "1835008", AV_DICT_DONT_OVERWRITE); // 224*1024*8;
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "scan_offset", "1", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
 | 
			
		||||
        opt_default(NULL, "b:a", "224000");
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "b:a", "224000", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
        parse_option(o, "ar", "44100", options);
 | 
			
		||||
 | 
			
		||||
        opt_default(NULL, "packetsize", "2324");
 | 
			
		||||
        av_dict_set(&o->g->format_opts, "packetsize", "2324", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
 | 
			
		||||
    } else if (!strcmp(arg, "dvd")) {
 | 
			
		||||
 | 
			
		||||
@@ -2272,17 +2203,17 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
        parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
 | 
			
		||||
        parse_option(o, "r", frame_rates[norm], options);
 | 
			
		||||
        parse_option(o, "pix_fmt", "yuv420p", options);
 | 
			
		||||
        opt_default(NULL, "g", norm == PAL ? "15" : "18");
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
 | 
			
		||||
        opt_default(NULL, "b:v", "6000000");
 | 
			
		||||
        opt_default(NULL, "maxrate", "9000000");
 | 
			
		||||
        opt_default(NULL, "minrate", "0"); // 1500000;
 | 
			
		||||
        opt_default(NULL, "bufsize", "1835008"); // 224*1024*8;
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "b:v", "6000000", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "maxrate", "9000000", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "minrate", "0", AV_DICT_DONT_OVERWRITE); // 1500000;
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "bufsize", "1835008", AV_DICT_DONT_OVERWRITE); // 224*1024*8;
 | 
			
		||||
 | 
			
		||||
        opt_default(NULL, "packetsize", "2048");  // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
 | 
			
		||||
        opt_default(NULL, "muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
 | 
			
		||||
        av_dict_set(&o->g->format_opts, "packetsize", "2048", AV_DICT_DONT_OVERWRITE);  // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
 | 
			
		||||
        av_dict_set(&o->g->format_opts, "muxrate", "10080000", AV_DICT_DONT_OVERWRITE); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
 | 
			
		||||
 | 
			
		||||
        opt_default(NULL, "b:a", "448000");
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "b:a", "448000", AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
        parse_option(o, "ar", "48000", options);
 | 
			
		||||
 | 
			
		||||
    } else if (!strncmp(arg, "dv", 2)) {
 | 
			
		||||
@@ -2301,10 +2232,6 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
 | 
			
		||||
        return AVERROR(EINVAL);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    av_dict_copy(&o->g->codec_opts,  codec_opts, AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
    av_dict_copy(&o->g->format_opts, format_opts, AV_DICT_DONT_OVERWRITE);
 | 
			
		||||
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -2424,11 +2351,7 @@ static int opt_old2new(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
static int opt_bitrate(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
{
 | 
			
		||||
    OptionsContext *o = optctx;
 | 
			
		||||
 | 
			
		||||
    if(!strcmp(opt, "ab")){
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "b:a", arg, 0);
 | 
			
		||||
        return 0;
 | 
			
		||||
    } else if(!strcmp(opt, "b")){
 | 
			
		||||
    if(!strcmp(opt, "b")){
 | 
			
		||||
        av_log(NULL, AV_LOG_WARNING, "Please use -b:a or -b:v, -b is ambiguous\n");
 | 
			
		||||
        av_dict_set(&o->g->codec_opts, "b:v", arg, 0);
 | 
			
		||||
        return 0;
 | 
			
		||||
@@ -2892,9 +2815,6 @@ const OptionDef options[] = {
 | 
			
		||||
        "print timestamp debugging info" },
 | 
			
		||||
    { "max_error_rate",  HAS_ARG | OPT_FLOAT,                        { &max_error_rate },
 | 
			
		||||
        "maximum error rate", "ratio of errors (0.0: no errors, 1.0: 100% errors) above which ffmpeg returns an error instead of success." },
 | 
			
		||||
    { "discard",        OPT_STRING | HAS_ARG | OPT_SPEC |
 | 
			
		||||
                        OPT_INPUT,                                   { .off = OFFSET(discard) },
 | 
			
		||||
        "discard", "" },
 | 
			
		||||
 | 
			
		||||
    /* video options */
 | 
			
		||||
    { "vframes",      OPT_VIDEO | HAS_ARG  | OPT_PERFILE | OPT_OUTPUT,           { .func_arg = opt_video_frames },
 | 
			
		||||
@@ -2917,6 +2837,8 @@ const OptionDef options[] = {
 | 
			
		||||
        "deprecated use -g 1" },
 | 
			
		||||
    { "vn",           OPT_VIDEO | OPT_BOOL  | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(video_disable) },
 | 
			
		||||
        "disable video" },
 | 
			
		||||
    { "vdt",          OPT_VIDEO | OPT_INT | HAS_ARG | OPT_EXPERT ,               { &video_discard },
 | 
			
		||||
        "discard threshold", "n" },
 | 
			
		||||
    { "rc_override",  OPT_VIDEO | HAS_ARG | OPT_EXPERT  | OPT_STRING | OPT_SPEC |
 | 
			
		||||
                      OPT_OUTPUT,                                                { .off = OFFSET(rc_overrides) },
 | 
			
		||||
        "rate control override for specific intervals", "override" },
 | 
			
		||||
@@ -2956,6 +2878,8 @@ const OptionDef options[] = {
 | 
			
		||||
    { "top",          OPT_VIDEO | HAS_ARG | OPT_EXPERT  | OPT_INT| OPT_SPEC |
 | 
			
		||||
                      OPT_INPUT | OPT_OUTPUT,                                    { .off = OFFSET(top_field_first) },
 | 
			
		||||
        "top=1/bottom=0/auto=-1 field first", "" },
 | 
			
		||||
    { "dc",           OPT_VIDEO | OPT_INT | HAS_ARG | OPT_EXPERT ,               { &intra_dc_precision },
 | 
			
		||||
        "intra_dc_precision", "precision" },
 | 
			
		||||
    { "vtag",         OPT_VIDEO | HAS_ARG | OPT_EXPERT  | OPT_PERFILE |
 | 
			
		||||
                      OPT_OUTPUT,                                                { .func_arg = opt_old2new },
 | 
			
		||||
        "force video tag/fourcc", "fourcc/tag" },
 | 
			
		||||
@@ -2970,8 +2894,6 @@ const OptionDef options[] = {
 | 
			
		||||
    { "force_key_frames", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
 | 
			
		||||
                          OPT_SPEC | OPT_OUTPUT,                                 { .off = OFFSET(forced_key_frames) },
 | 
			
		||||
        "force key frames at specified timestamps", "timestamps" },
 | 
			
		||||
    { "ab",           OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT,            { .func_arg = opt_bitrate },
 | 
			
		||||
        "audio bitrate (please use -b:a)", "bitrate" },
 | 
			
		||||
    { "b",            OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT,            { .func_arg = opt_bitrate },
 | 
			
		||||
        "video bitrate (please use -b:v)", "bitrate" },
 | 
			
		||||
    { "hwaccel",          OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										134
									
								
								ffmpeg_vda.c
									
									
									
									
									
								
							
							
						
						
									
										134
									
								
								ffmpeg_vda.c
									
									
									
									
									
								
							@@ -1,134 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
 * This file is part of FFmpeg.
 | 
			
		||||
 *
 | 
			
		||||
 * FFmpeg is free software; you can redistribute it and/or
 | 
			
		||||
 * modify it under the terms of the GNU Lesser General Public
 | 
			
		||||
 * License as published by the Free Software Foundation; either
 | 
			
		||||
 * version 2.1 of the License, or (at your option) any later version.
 | 
			
		||||
 *
 | 
			
		||||
 * FFmpeg is distributed in the hope that it will be useful,
 | 
			
		||||
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 | 
			
		||||
 * Lesser General Public License for more details.
 | 
			
		||||
 *
 | 
			
		||||
 * You should have received a copy of the GNU Lesser General Public
 | 
			
		||||
 * License along with FFmpeg; if not, write to the Free Software
 | 
			
		||||
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include "libavcodec/avcodec.h"
 | 
			
		||||
#include "libavcodec/vda.h"
 | 
			
		||||
#include "libavutil/imgutils.h"
 | 
			
		||||
 | 
			
		||||
#include "ffmpeg.h"
 | 
			
		||||
 | 
			
		||||
typedef struct VDAContext {
 | 
			
		||||
    AVFrame *tmp_frame;
 | 
			
		||||
} VDAContext;
 | 
			
		||||
 | 
			
		||||
static int vda_retrieve_data(AVCodecContext *s, AVFrame *frame)
 | 
			
		||||
{
 | 
			
		||||
    InputStream *ist = s->opaque;
 | 
			
		||||
    VDAContext  *vda = ist->hwaccel_ctx;
 | 
			
		||||
    CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
 | 
			
		||||
    OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
 | 
			
		||||
    CVReturn err;
 | 
			
		||||
    uint8_t *data[4] = { 0 };
 | 
			
		||||
    int linesize[4] = { 0 };
 | 
			
		||||
    int planes, ret, i;
 | 
			
		||||
 | 
			
		||||
    av_frame_unref(vda->tmp_frame);
 | 
			
		||||
 | 
			
		||||
    switch (pixel_format) {
 | 
			
		||||
    case kCVPixelFormatType_420YpCbCr8Planar: vda->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
 | 
			
		||||
    case kCVPixelFormatType_422YpCbCr8:       vda->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
 | 
			
		||||
    default:
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR,
 | 
			
		||||
               "Unsupported pixel format: %u\n", pixel_format);
 | 
			
		||||
        return AVERROR(ENOSYS);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    vda->tmp_frame->width  = frame->width;
 | 
			
		||||
    vda->tmp_frame->height = frame->height;
 | 
			
		||||
    ret = av_frame_get_buffer(vda->tmp_frame, 32);
 | 
			
		||||
    if (ret < 0)
 | 
			
		||||
        return ret;
 | 
			
		||||
 | 
			
		||||
    err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
 | 
			
		||||
    if (err != kCVReturnSuccess) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
 | 
			
		||||
        return AVERROR_UNKNOWN;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (CVPixelBufferIsPlanar(pixbuf)) {
 | 
			
		||||
 | 
			
		||||
        planes = CVPixelBufferGetPlaneCount(pixbuf);
 | 
			
		||||
        for (i = 0; i < planes; i++) {
 | 
			
		||||
            data[i]     = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
 | 
			
		||||
            linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
 | 
			
		||||
        }
 | 
			
		||||
    } else {
 | 
			
		||||
        data[0] = CVPixelBufferGetBaseAddress(pixbuf);
 | 
			
		||||
        linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    av_image_copy(vda->tmp_frame->data, vda->tmp_frame->linesize,
 | 
			
		||||
                  data, linesize, vda->tmp_frame->format,
 | 
			
		||||
                  frame->width, frame->height);
 | 
			
		||||
 | 
			
		||||
    ret = av_frame_copy_props(vda->tmp_frame, frame);
 | 
			
		||||
    if (ret < 0)
 | 
			
		||||
        return ret;
 | 
			
		||||
 | 
			
		||||
    av_frame_unref(frame);
 | 
			
		||||
    av_frame_move_ref(frame, vda->tmp_frame);
 | 
			
		||||
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void vda_uninit(AVCodecContext *s)
 | 
			
		||||
{
 | 
			
		||||
    InputStream *ist = s->opaque;
 | 
			
		||||
    VDAContext  *vda = ist->hwaccel_ctx;
 | 
			
		||||
 | 
			
		||||
    ist->hwaccel_uninit        = NULL;
 | 
			
		||||
    ist->hwaccel_retrieve_data = NULL;
 | 
			
		||||
 | 
			
		||||
    av_frame_free(&vda->tmp_frame);
 | 
			
		||||
 | 
			
		||||
    av_vda_default_free(s);
 | 
			
		||||
    av_freep(&ist->hwaccel_ctx);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int vda_init(AVCodecContext *s)
 | 
			
		||||
{
 | 
			
		||||
    InputStream *ist = s->opaque;
 | 
			
		||||
    int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
 | 
			
		||||
    VDAContext *vda;
 | 
			
		||||
    int ret;
 | 
			
		||||
 | 
			
		||||
    vda = av_mallocz(sizeof(*vda));
 | 
			
		||||
    if (!vda)
 | 
			
		||||
        return AVERROR(ENOMEM);
 | 
			
		||||
 | 
			
		||||
    ist->hwaccel_ctx           = vda;
 | 
			
		||||
    ist->hwaccel_uninit        = vda_uninit;
 | 
			
		||||
    ist->hwaccel_retrieve_data = vda_retrieve_data;
 | 
			
		||||
 | 
			
		||||
    vda->tmp_frame = av_frame_alloc();
 | 
			
		||||
    if (!vda->tmp_frame) {
 | 
			
		||||
        ret = AVERROR(ENOMEM);
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ret = av_vda_default_init(s);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        av_log(NULL, loglevel, "Error creating VDA decoder.\n");
 | 
			
		||||
        goto fail;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return 0;
 | 
			
		||||
fail:
 | 
			
		||||
    vda_uninit(s);
 | 
			
		||||
    return ret;
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										252
									
								
								ffplay.c
									
									
									
									
									
								
							
							
						
						
									
										252
									
								
								ffplay.c
									
									
									
									
									
								
							@@ -67,13 +67,12 @@ const int program_birth_year = 2003;
 | 
			
		||||
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
 | 
			
		||||
#define MIN_FRAMES 5
 | 
			
		||||
 | 
			
		||||
/* Minimum SDL audio buffer size, in samples. */
 | 
			
		||||
#define SDL_AUDIO_MIN_BUFFER_SIZE 512
 | 
			
		||||
/* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
 | 
			
		||||
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
 | 
			
		||||
/* SDL audio buffer size, in samples. Should be small to have precise
 | 
			
		||||
   A/V sync as SDL does not have hardware buffer fullness info. */
 | 
			
		||||
#define SDL_AUDIO_BUFFER_SIZE 1024
 | 
			
		||||
 | 
			
		||||
/* no AV sync correction is done if below the minimum AV sync threshold */
 | 
			
		||||
#define AV_SYNC_THRESHOLD_MIN 0.04
 | 
			
		||||
#define AV_SYNC_THRESHOLD_MIN 0.01
 | 
			
		||||
/* AV sync correction is done if above the maximum AV sync threshold */
 | 
			
		||||
#define AV_SYNC_THRESHOLD_MAX 0.1
 | 
			
		||||
/* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
 | 
			
		||||
@@ -120,7 +119,7 @@ typedef struct PacketQueue {
 | 
			
		||||
} PacketQueue;
 | 
			
		||||
 | 
			
		||||
#define VIDEO_PICTURE_QUEUE_SIZE 3
 | 
			
		||||
#define SUBPICTURE_QUEUE_SIZE 16
 | 
			
		||||
#define SUBPICTURE_QUEUE_SIZE 4
 | 
			
		||||
 | 
			
		||||
typedef struct VideoPicture {
 | 
			
		||||
    double pts;             // presentation timestamp for this picture
 | 
			
		||||
@@ -203,7 +202,7 @@ typedef struct VideoState {
 | 
			
		||||
    AVStream *audio_st;
 | 
			
		||||
    PacketQueue audioq;
 | 
			
		||||
    int audio_hw_buf_size;
 | 
			
		||||
    uint8_t silence_buf[SDL_AUDIO_MIN_BUFFER_SIZE];
 | 
			
		||||
    uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
 | 
			
		||||
    uint8_t *audio_buf;
 | 
			
		||||
    uint8_t *audio_buf1;
 | 
			
		||||
    unsigned int audio_buf_size; /* in bytes */
 | 
			
		||||
@@ -256,7 +255,7 @@ typedef struct VideoState {
 | 
			
		||||
    int64_t video_current_pos;      // current displayed file pos
 | 
			
		||||
    double max_frame_duration;      // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
 | 
			
		||||
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
 | 
			
		||||
    int pictq_size, pictq_rindex, pictq_windex, pictq_rindex_shown;
 | 
			
		||||
    int pictq_size, pictq_rindex, pictq_windex;
 | 
			
		||||
    SDL_mutex *pictq_mutex;
 | 
			
		||||
    SDL_cond *pictq_cond;
 | 
			
		||||
#if !CONFIG_AVFILTER
 | 
			
		||||
@@ -269,7 +268,6 @@ typedef struct VideoState {
 | 
			
		||||
    int step;
 | 
			
		||||
 | 
			
		||||
#if CONFIG_AVFILTER
 | 
			
		||||
    int vfilter_idx;
 | 
			
		||||
    AVFilterContext *in_video_filter;   // the first filter in the video chain
 | 
			
		||||
    AVFilterContext *out_video_filter;  // the last filter in the video chain
 | 
			
		||||
    AVFilterContext *in_audio_filter;   // the first filter in the audio chain
 | 
			
		||||
@@ -310,6 +308,7 @@ static int workaround_bugs = 1;
 | 
			
		||||
static int fast = 0;
 | 
			
		||||
static int genpts = 0;
 | 
			
		||||
static int lowres = 0;
 | 
			
		||||
static int error_concealment = 3;
 | 
			
		||||
static int decoder_reorder_pts = -1;
 | 
			
		||||
static int autoexit;
 | 
			
		||||
static int exit_on_keydown;
 | 
			
		||||
@@ -325,11 +324,9 @@ double rdftspeed = 0.02;
 | 
			
		||||
static int64_t cursor_last_shown;
 | 
			
		||||
static int cursor_hidden = 0;
 | 
			
		||||
#if CONFIG_AVFILTER
 | 
			
		||||
static const char **vfilters_list = NULL;
 | 
			
		||||
static int nb_vfilters = 0;
 | 
			
		||||
static char *vfilters = NULL;
 | 
			
		||||
static char *afilters = NULL;
 | 
			
		||||
#endif
 | 
			
		||||
static int autorotate = 1;
 | 
			
		||||
 | 
			
		||||
/* current context */
 | 
			
		||||
static int is_full_screen;
 | 
			
		||||
@@ -342,15 +339,6 @@ static AVPacket flush_pkt;
 | 
			
		||||
 | 
			
		||||
static SDL_Surface *screen;
 | 
			
		||||
 | 
			
		||||
#if CONFIG_AVFILTER
 | 
			
		||||
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
 | 
			
		||||
{
 | 
			
		||||
    GROW_ARRAY(vfilters_list, nb_vfilters);
 | 
			
		||||
    vfilters_list[nb_vfilters - 1] = arg;
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static inline
 | 
			
		||||
int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
 | 
			
		||||
                   enum AVSampleFormat fmt2, int64_t channel_count2)
 | 
			
		||||
@@ -441,7 +429,7 @@ static void packet_queue_flush(PacketQueue *q)
 | 
			
		||||
    MyAVPacketList *pkt, *pkt1;
 | 
			
		||||
 | 
			
		||||
    SDL_LockMutex(q->mutex);
 | 
			
		||||
    for (pkt = q->first_pkt; pkt; pkt = pkt1) {
 | 
			
		||||
    for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
 | 
			
		||||
        pkt1 = pkt->next;
 | 
			
		||||
        av_free_packet(&pkt->pkt);
 | 
			
		||||
        av_freep(&pkt);
 | 
			
		||||
@@ -808,21 +796,19 @@ static void free_subpicture(SubPicture *sp)
 | 
			
		||||
    avsubtitle_free(&sp->sub);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void calculate_display_rect(SDL_Rect *rect,
 | 
			
		||||
                                   int scr_xleft, int scr_ytop, int scr_width, int scr_height,
 | 
			
		||||
                                   int pic_width, int pic_height, AVRational pic_sar)
 | 
			
		||||
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
 | 
			
		||||
{
 | 
			
		||||
    float aspect_ratio;
 | 
			
		||||
    int width, height, x, y;
 | 
			
		||||
 | 
			
		||||
    if (pic_sar.num == 0)
 | 
			
		||||
    if (vp->sar.num == 0)
 | 
			
		||||
        aspect_ratio = 0;
 | 
			
		||||
    else
 | 
			
		||||
        aspect_ratio = av_q2d(pic_sar);
 | 
			
		||||
        aspect_ratio = av_q2d(vp->sar);
 | 
			
		||||
 | 
			
		||||
    if (aspect_ratio <= 0.0)
 | 
			
		||||
        aspect_ratio = 1.0;
 | 
			
		||||
    aspect_ratio *= (float)pic_width / (float)pic_height;
 | 
			
		||||
    aspect_ratio *= (float)vp->width / (float)vp->height;
 | 
			
		||||
 | 
			
		||||
    /* XXX: we suppose the screen has a 1.0 pixel ratio */
 | 
			
		||||
    height = scr_height;
 | 
			
		||||
@@ -847,7 +833,7 @@ static void video_image_display(VideoState *is)
 | 
			
		||||
    SDL_Rect rect;
 | 
			
		||||
    int i;
 | 
			
		||||
 | 
			
		||||
    vp = &is->pictq[(is->pictq_rindex + is->pictq_rindex_shown) % VIDEO_PICTURE_QUEUE_SIZE];
 | 
			
		||||
    vp = &is->pictq[is->pictq_rindex];
 | 
			
		||||
    if (vp->bmp) {
 | 
			
		||||
        if (is->subtitle_st) {
 | 
			
		||||
            if (is->subpq_size > 0) {
 | 
			
		||||
@@ -873,7 +859,7 @@ static void video_image_display(VideoState *is)
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
 | 
			
		||||
        calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
 | 
			
		||||
 | 
			
		||||
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
 | 
			
		||||
 | 
			
		||||
@@ -913,7 +899,7 @@ static void video_audio_display(VideoState *s)
 | 
			
		||||
        /* to be more precise, we take into account the time spent since
 | 
			
		||||
           the last buffer computation */
 | 
			
		||||
        if (audio_callback_time) {
 | 
			
		||||
            time_diff = av_gettime_relative() - audio_callback_time;
 | 
			
		||||
            time_diff = av_gettime() - audio_callback_time;
 | 
			
		||||
            delay -= (time_diff * s->audio_tgt.freq) / 1000000;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
@@ -991,7 +977,7 @@ static void video_audio_display(VideoState *s)
 | 
			
		||||
            av_free(s->rdft_data);
 | 
			
		||||
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
 | 
			
		||||
            s->rdft_bits = rdft_bits;
 | 
			
		||||
            s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
 | 
			
		||||
            s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
 | 
			
		||||
        }
 | 
			
		||||
        {
 | 
			
		||||
            FFTSample *data[2];
 | 
			
		||||
@@ -1065,7 +1051,7 @@ static void do_exit(VideoState *is)
 | 
			
		||||
    av_lockmgr_register(NULL);
 | 
			
		||||
    uninit_opts();
 | 
			
		||||
#if CONFIG_AVFILTER
 | 
			
		||||
    av_freep(&vfilters_list);
 | 
			
		||||
    av_freep(&vfilters);
 | 
			
		||||
#endif
 | 
			
		||||
    avformat_network_deinit();
 | 
			
		||||
    if (show_status)
 | 
			
		||||
@@ -1080,10 +1066,10 @@ static void sigterm_handler(int sig)
 | 
			
		||||
    exit(123);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void set_default_window_size(int width, int height, AVRational sar)
 | 
			
		||||
static void set_default_window_size(VideoPicture *vp)
 | 
			
		||||
{
 | 
			
		||||
    SDL_Rect rect;
 | 
			
		||||
    calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
 | 
			
		||||
    calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
 | 
			
		||||
    default_width  = rect.w;
 | 
			
		||||
    default_height = rect.h;
 | 
			
		||||
}
 | 
			
		||||
@@ -1097,7 +1083,7 @@ static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp
 | 
			
		||||
    else                flags |= SDL_RESIZABLE;
 | 
			
		||||
 | 
			
		||||
    if (vp && vp->width)
 | 
			
		||||
        set_default_window_size(vp->width, vp->height, vp->sar);
 | 
			
		||||
        set_default_window_size(vp);
 | 
			
		||||
 | 
			
		||||
    if (is_full_screen && fs_screen_width) {
 | 
			
		||||
        w = fs_screen_width;
 | 
			
		||||
@@ -1146,7 +1132,7 @@ static double get_clock(Clock *c)
 | 
			
		||||
    if (c->paused) {
 | 
			
		||||
        return c->pts;
 | 
			
		||||
    } else {
 | 
			
		||||
        double time = av_gettime_relative() / 1000000.0;
 | 
			
		||||
        double time = av_gettime() / 1000000.0;
 | 
			
		||||
        return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@@ -1161,7 +1147,7 @@ static void set_clock_at(Clock *c, double pts, int serial, double time)
 | 
			
		||||
 | 
			
		||||
static void set_clock(Clock *c, double pts, int serial)
 | 
			
		||||
{
 | 
			
		||||
    double time = av_gettime_relative() / 1000000.0;
 | 
			
		||||
    double time = av_gettime() / 1000000.0;
 | 
			
		||||
    set_clock_at(c, pts, serial, time);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -1254,7 +1240,7 @@ static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_by
 | 
			
		||||
static void stream_toggle_pause(VideoState *is)
 | 
			
		||||
{
 | 
			
		||||
    if (is->paused) {
 | 
			
		||||
        is->frame_timer += av_gettime_relative() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
 | 
			
		||||
        is->frame_timer += av_gettime() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
 | 
			
		||||
        if (is->read_pause_return != AVERROR(ENOSYS)) {
 | 
			
		||||
            is->vidclk.paused = 0;
 | 
			
		||||
        }
 | 
			
		||||
@@ -1320,23 +1306,7 @@ static double vp_duration(VideoState *is, VideoPicture *vp, VideoPicture *nextvp
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* return the number of undisplayed pictures in the queue */
 | 
			
		||||
static int pictq_nb_remaining(VideoState *is) {
 | 
			
		||||
    return is->pictq_size - is->pictq_rindex_shown;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* jump back to the previous picture if available by resetting rindex_shown */
 | 
			
		||||
static int pictq_prev_picture(VideoState *is) {
 | 
			
		||||
    int ret = is->pictq_rindex_shown;
 | 
			
		||||
    is->pictq_rindex_shown = 0;
 | 
			
		||||
    return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void pictq_next_picture(VideoState *is) {
 | 
			
		||||
    if (!is->pictq_rindex_shown) {
 | 
			
		||||
        is->pictq_rindex_shown = 1;
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
    /* update queue size and signal for next picture */
 | 
			
		||||
    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
 | 
			
		||||
        is->pictq_rindex = 0;
 | 
			
		||||
@@ -1347,6 +1317,25 @@ static void pictq_next_picture(VideoState *is) {
 | 
			
		||||
    SDL_UnlockMutex(is->pictq_mutex);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int pictq_prev_picture(VideoState *is) {
 | 
			
		||||
    VideoPicture *prevvp;
 | 
			
		||||
    int ret = 0;
 | 
			
		||||
    /* update queue size and signal for the previous picture */
 | 
			
		||||
    prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
 | 
			
		||||
    if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
 | 
			
		||||
        SDL_LockMutex(is->pictq_mutex);
 | 
			
		||||
        if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE) {
 | 
			
		||||
            if (--is->pictq_rindex == -1)
 | 
			
		||||
                is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
 | 
			
		||||
            is->pictq_size++;
 | 
			
		||||
            ret = 1;
 | 
			
		||||
        }
 | 
			
		||||
        SDL_CondSignal(is->pictq_cond);
 | 
			
		||||
        SDL_UnlockMutex(is->pictq_mutex);
 | 
			
		||||
    }
 | 
			
		||||
    return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
 | 
			
		||||
    /* update current video pts */
 | 
			
		||||
    set_clock(&is->vidclk, pts, serial);
 | 
			
		||||
@@ -1366,7 +1355,7 @@ static void video_refresh(void *opaque, double *remaining_time)
 | 
			
		||||
        check_external_clock_speed(is);
 | 
			
		||||
 | 
			
		||||
    if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
 | 
			
		||||
        time = av_gettime_relative() / 1000000.0;
 | 
			
		||||
        time = av_gettime() / 1000000.0;
 | 
			
		||||
        if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
 | 
			
		||||
            video_display(is);
 | 
			
		||||
            is->last_vis_time = time;
 | 
			
		||||
@@ -1379,15 +1368,15 @@ static void video_refresh(void *opaque, double *remaining_time)
 | 
			
		||||
        if (is->force_refresh)
 | 
			
		||||
            redisplay = pictq_prev_picture(is);
 | 
			
		||||
retry:
 | 
			
		||||
        if (pictq_nb_remaining(is) == 0) {
 | 
			
		||||
        if (is->pictq_size == 0) {
 | 
			
		||||
            // nothing to do, no picture to display in the queue
 | 
			
		||||
        } else {
 | 
			
		||||
            double last_duration, duration, delay;
 | 
			
		||||
            VideoPicture *vp, *lastvp;
 | 
			
		||||
 | 
			
		||||
            /* dequeue the picture */
 | 
			
		||||
            lastvp = &is->pictq[is->pictq_rindex];
 | 
			
		||||
            vp = &is->pictq[(is->pictq_rindex + is->pictq_rindex_shown) % VIDEO_PICTURE_QUEUE_SIZE];
 | 
			
		||||
            vp = &is->pictq[is->pictq_rindex];
 | 
			
		||||
            lastvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
 | 
			
		||||
 | 
			
		||||
            if (vp->serial != is->videoq.serial) {
 | 
			
		||||
                pictq_next_picture(is);
 | 
			
		||||
@@ -1397,7 +1386,7 @@ retry:
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            if (lastvp->serial != vp->serial && !redisplay)
 | 
			
		||||
                is->frame_timer = av_gettime_relative() / 1000000.0;
 | 
			
		||||
                is->frame_timer = av_gettime() / 1000000.0;
 | 
			
		||||
 | 
			
		||||
            if (is->paused)
 | 
			
		||||
                goto display;
 | 
			
		||||
@@ -1409,7 +1398,7 @@ retry:
 | 
			
		||||
            else
 | 
			
		||||
                delay = compute_target_delay(last_duration, is);
 | 
			
		||||
 | 
			
		||||
            time= av_gettime_relative()/1000000.0;
 | 
			
		||||
            time= av_gettime()/1000000.0;
 | 
			
		||||
            if (time < is->frame_timer + delay && !redisplay) {
 | 
			
		||||
                *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
 | 
			
		||||
                return;
 | 
			
		||||
@@ -1424,8 +1413,8 @@ retry:
 | 
			
		||||
                update_video_pts(is, vp->pts, vp->pos, vp->serial);
 | 
			
		||||
            SDL_UnlockMutex(is->pictq_mutex);
 | 
			
		||||
 | 
			
		||||
            if (pictq_nb_remaining(is) > 1) {
 | 
			
		||||
                VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + is->pictq_rindex_shown + 1) % VIDEO_PICTURE_QUEUE_SIZE];
 | 
			
		||||
            if (is->pictq_size > 1) {
 | 
			
		||||
                VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
 | 
			
		||||
                duration = vp_duration(is, vp, nextvp);
 | 
			
		||||
                if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
 | 
			
		||||
                    if (!redisplay)
 | 
			
		||||
@@ -1483,7 +1472,7 @@ display:
 | 
			
		||||
        int aqsize, vqsize, sqsize;
 | 
			
		||||
        double av_diff;
 | 
			
		||||
 | 
			
		||||
        cur_time = av_gettime_relative();
 | 
			
		||||
        cur_time = av_gettime();
 | 
			
		||||
        if (!last_time || (cur_time - last_time) >= 30000) {
 | 
			
		||||
            aqsize = 0;
 | 
			
		||||
            vqsize = 0;
 | 
			
		||||
@@ -1581,7 +1570,8 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double
 | 
			
		||||
    /* wait until we have space to put a new picture */
 | 
			
		||||
    SDL_LockMutex(is->pictq_mutex);
 | 
			
		||||
 | 
			
		||||
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
 | 
			
		||||
    /* keep the last already displayed picture in the queue */
 | 
			
		||||
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 1 &&
 | 
			
		||||
           !is->videoq.abort_request) {
 | 
			
		||||
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
 | 
			
		||||
    }
 | 
			
		||||
@@ -1652,7 +1642,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double
 | 
			
		||||
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
 | 
			
		||||
            vp->width, vp->height, src_frame->format, vp->width, vp->height,
 | 
			
		||||
            AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
 | 
			
		||||
        if (!is->img_convert_ctx) {
 | 
			
		||||
        if (is->img_convert_ctx == NULL) {
 | 
			
		||||
            av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
 | 
			
		||||
            exit(1);
 | 
			
		||||
        }
 | 
			
		||||
@@ -1783,7 +1773,7 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
 | 
			
		||||
    char sws_flags_str[128];
 | 
			
		||||
    char buffersrc_args[256];
 | 
			
		||||
    int ret;
 | 
			
		||||
    AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
 | 
			
		||||
    AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
 | 
			
		||||
    AVCodecContext *codec = is->video_st->codec;
 | 
			
		||||
    AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
 | 
			
		||||
 | 
			
		||||
@@ -1814,49 +1804,16 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
 | 
			
		||||
    if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts,  AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
 | 
			
		||||
        goto fail;
 | 
			
		||||
 | 
			
		||||
    last_filter = filt_out;
 | 
			
		||||
 | 
			
		||||
/* Note: this macro adds a filter before the lastly added filter, so the
 | 
			
		||||
 * processing order of the filters is in reverse */
 | 
			
		||||
#define INSERT_FILT(name, arg) do {                                         \
 | 
			
		||||
    AVFilterContext *filt_ctx;                                              \
 | 
			
		||||
                                                                            \
 | 
			
		||||
    ret = avfilter_graph_create_filter(&filt_ctx,                           \
 | 
			
		||||
                                       avfilter_get_by_name(name),          \
 | 
			
		||||
                                       "ffplay_" name, arg, NULL, graph);   \
 | 
			
		||||
    if (ret < 0)                                                            \
 | 
			
		||||
        goto fail;                                                          \
 | 
			
		||||
                                                                            \
 | 
			
		||||
    ret = avfilter_link(filt_ctx, 0, last_filter, 0);                       \
 | 
			
		||||
    if (ret < 0)                                                            \
 | 
			
		||||
        goto fail;                                                          \
 | 
			
		||||
                                                                            \
 | 
			
		||||
    last_filter = filt_ctx;                                                 \
 | 
			
		||||
} while (0)
 | 
			
		||||
 | 
			
		||||
    /* SDL YUV code is not handling odd width/height for some driver
 | 
			
		||||
     * combinations, therefore we crop the picture to an even width/height. */
 | 
			
		||||
    INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
 | 
			
		||||
    if ((ret = avfilter_graph_create_filter(&filt_crop,
 | 
			
		||||
                                            avfilter_get_by_name("crop"),
 | 
			
		||||
                                            "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
 | 
			
		||||
        goto fail;
 | 
			
		||||
    if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
 | 
			
		||||
        goto fail;
 | 
			
		||||
 | 
			
		||||
    if (autorotate) {
 | 
			
		||||
        AVDictionaryEntry *rotate_tag = av_dict_get(is->video_st->metadata, "rotate", NULL, 0);
 | 
			
		||||
        if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
 | 
			
		||||
            if (!strcmp(rotate_tag->value, "90")) {
 | 
			
		||||
                INSERT_FILT("transpose", "clock");
 | 
			
		||||
            } else if (!strcmp(rotate_tag->value, "180")) {
 | 
			
		||||
                INSERT_FILT("hflip", NULL);
 | 
			
		||||
                INSERT_FILT("vflip", NULL);
 | 
			
		||||
            } else if (!strcmp(rotate_tag->value, "270")) {
 | 
			
		||||
                INSERT_FILT("transpose", "cclock");
 | 
			
		||||
            } else {
 | 
			
		||||
                char rotate_buf[64];
 | 
			
		||||
                snprintf(rotate_buf, sizeof(rotate_buf), "%s*PI/180", rotate_tag->value);
 | 
			
		||||
                INSERT_FILT("rotate", rotate_buf);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
 | 
			
		||||
    if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
 | 
			
		||||
        goto fail;
 | 
			
		||||
 | 
			
		||||
    is->in_video_filter  = filt_src;
 | 
			
		||||
@@ -1962,7 +1919,6 @@ static int video_thread(void *arg)
 | 
			
		||||
    int last_h = 0;
 | 
			
		||||
    enum AVPixelFormat last_format = -2;
 | 
			
		||||
    int last_serial = -1;
 | 
			
		||||
    int last_vfilter_idx = 0;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
    for (;;) {
 | 
			
		||||
@@ -1981,8 +1937,7 @@ static int video_thread(void *arg)
 | 
			
		||||
        if (   last_w != frame->width
 | 
			
		||||
            || last_h != frame->height
 | 
			
		||||
            || last_format != frame->format
 | 
			
		||||
            || last_serial != serial
 | 
			
		||||
            || last_vfilter_idx != is->vfilter_idx) {
 | 
			
		||||
            || last_serial != serial) {
 | 
			
		||||
            av_log(NULL, AV_LOG_DEBUG,
 | 
			
		||||
                   "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
 | 
			
		||||
                   last_w, last_h,
 | 
			
		||||
@@ -1991,7 +1946,7 @@ static int video_thread(void *arg)
 | 
			
		||||
                   (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
 | 
			
		||||
            avfilter_graph_free(&graph);
 | 
			
		||||
            graph = avfilter_graph_alloc();
 | 
			
		||||
            if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
 | 
			
		||||
            if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
 | 
			
		||||
                SDL_Event event;
 | 
			
		||||
                event.type = FF_QUIT_EVENT;
 | 
			
		||||
                event.user.data1 = is;
 | 
			
		||||
@@ -2004,7 +1959,6 @@ static int video_thread(void *arg)
 | 
			
		||||
            last_h = frame->height;
 | 
			
		||||
            last_format = frame->format;
 | 
			
		||||
            last_serial = serial;
 | 
			
		||||
            last_vfilter_idx = is->vfilter_idx;
 | 
			
		||||
            frame_rate = filt_out->inputs[0]->frame_rate;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
@@ -2013,7 +1967,7 @@ static int video_thread(void *arg)
 | 
			
		||||
            goto the_end;
 | 
			
		||||
 | 
			
		||||
        while (ret >= 0) {
 | 
			
		||||
            is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
 | 
			
		||||
            is->frame_last_returned_time = av_gettime() / 1000000.0;
 | 
			
		||||
 | 
			
		||||
            ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
 | 
			
		||||
            if (ret < 0) {
 | 
			
		||||
@@ -2023,7 +1977,7 @@ static int video_thread(void *arg)
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
 | 
			
		||||
            is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
 | 
			
		||||
            if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
 | 
			
		||||
                is->frame_last_filter_delay = 0;
 | 
			
		||||
            tb = filt_out->inputs[0]->time_base;
 | 
			
		||||
@@ -2424,7 +2378,7 @@ static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
 | 
			
		||||
    VideoState *is = opaque;
 | 
			
		||||
    int audio_size, len1;
 | 
			
		||||
 | 
			
		||||
    audio_callback_time = av_gettime_relative();
 | 
			
		||||
    audio_callback_time = av_gettime();
 | 
			
		||||
 | 
			
		||||
    while (len > 0) {
 | 
			
		||||
        if (is->audio_buf_index >= is->audio_buf_size) {
 | 
			
		||||
@@ -2461,8 +2415,6 @@ static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb
 | 
			
		||||
    SDL_AudioSpec wanted_spec, spec;
 | 
			
		||||
    const char *env;
 | 
			
		||||
    static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
 | 
			
		||||
    static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
 | 
			
		||||
    int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
 | 
			
		||||
 | 
			
		||||
    env = SDL_getenv("SDL_AUDIO_CHANNELS");
 | 
			
		||||
    if (env) {
 | 
			
		||||
@@ -2473,32 +2425,24 @@ static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb
 | 
			
		||||
        wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
 | 
			
		||||
        wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
 | 
			
		||||
    }
 | 
			
		||||
    wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
 | 
			
		||||
    wanted_spec.channels = wanted_nb_channels;
 | 
			
		||||
    wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
 | 
			
		||||
    wanted_spec.freq = wanted_sample_rate;
 | 
			
		||||
    if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
 | 
			
		||||
        return -1;
 | 
			
		||||
    }
 | 
			
		||||
    while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
 | 
			
		||||
        next_sample_rate_idx--;
 | 
			
		||||
    wanted_spec.format = AUDIO_S16SYS;
 | 
			
		||||
    wanted_spec.silence = 0;
 | 
			
		||||
    wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
 | 
			
		||||
    wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
 | 
			
		||||
    wanted_spec.callback = sdl_audio_callback;
 | 
			
		||||
    wanted_spec.userdata = opaque;
 | 
			
		||||
    while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
 | 
			
		||||
        av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
 | 
			
		||||
               wanted_spec.channels, wanted_spec.freq, SDL_GetError());
 | 
			
		||||
        av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
 | 
			
		||||
        wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
 | 
			
		||||
        if (!wanted_spec.channels) {
 | 
			
		||||
            wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
 | 
			
		||||
            wanted_spec.channels = wanted_nb_channels;
 | 
			
		||||
            if (!wanted_spec.freq) {
 | 
			
		||||
                av_log(NULL, AV_LOG_ERROR,
 | 
			
		||||
                       "No more combinations to try, audio open failed\n");
 | 
			
		||||
                return -1;
 | 
			
		||||
            }
 | 
			
		||||
            av_log(NULL, AV_LOG_ERROR,
 | 
			
		||||
                   "No more channel combinations to try, audio open failed\n");
 | 
			
		||||
            return -1;
 | 
			
		||||
        }
 | 
			
		||||
        wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
 | 
			
		||||
    }
 | 
			
		||||
@@ -2572,6 +2516,7 @@ static int stream_component_open(VideoState *is, int stream_index)
 | 
			
		||||
        stream_lowres = av_codec_get_max_lowres(codec);
 | 
			
		||||
    }
 | 
			
		||||
    av_codec_set_lowres(avctx, stream_lowres);
 | 
			
		||||
    avctx->error_concealment = error_concealment;
 | 
			
		||||
 | 
			
		||||
    if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
 | 
			
		||||
    if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
 | 
			
		||||
@@ -2582,7 +2527,7 @@ static int stream_component_open(VideoState *is, int stream_index)
 | 
			
		||||
    if (!av_dict_get(opts, "threads", NULL, 0))
 | 
			
		||||
        av_dict_set(&opts, "threads", "auto", 0);
 | 
			
		||||
    if (stream_lowres)
 | 
			
		||||
        av_dict_set_int(&opts, "lowres", stream_lowres, 0);
 | 
			
		||||
        av_dict_set(&opts, "lowres", av_asprintf("%d", stream_lowres), AV_DICT_DONT_STRDUP_VAL);
 | 
			
		||||
    if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
 | 
			
		||||
        av_dict_set(&opts, "refcounted_frames", "1", 0);
 | 
			
		||||
    if (avcodec_open2(avctx, codec, &opts) < 0)
 | 
			
		||||
@@ -2629,7 +2574,7 @@ static int stream_component_open(VideoState *is, int stream_index)
 | 
			
		||||
        is->audio_diff_avg_count = 0;
 | 
			
		||||
        /* since we do not have a precise anough audio fifo fullness,
 | 
			
		||||
           we correct audio sync only if larger than this threshold */
 | 
			
		||||
        is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
 | 
			
		||||
        is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec;
 | 
			
		||||
 | 
			
		||||
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
 | 
			
		||||
        memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
 | 
			
		||||
@@ -2807,8 +2752,6 @@ static int read_thread(void *arg)
 | 
			
		||||
    if (genpts)
 | 
			
		||||
        ic->flags |= AVFMT_FLAG_GENPTS;
 | 
			
		||||
 | 
			
		||||
    av_format_inject_global_side_data(ic);
 | 
			
		||||
 | 
			
		||||
    opts = setup_find_stream_info_opts(ic, codec_opts);
 | 
			
		||||
    orig_nb_streams = ic->nb_streams;
 | 
			
		||||
 | 
			
		||||
@@ -2824,7 +2767,7 @@ static int read_thread(void *arg)
 | 
			
		||||
    av_freep(&opts);
 | 
			
		||||
 | 
			
		||||
    if (ic->pb)
 | 
			
		||||
        ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
 | 
			
		||||
        ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
 | 
			
		||||
 | 
			
		||||
    if (seek_by_bytes < 0)
 | 
			
		||||
        seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
 | 
			
		||||
@@ -2879,9 +2822,9 @@ static int read_thread(void *arg)
 | 
			
		||||
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
 | 
			
		||||
        AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
 | 
			
		||||
        AVCodecContext *avctx = st->codec;
 | 
			
		||||
        AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
 | 
			
		||||
        if (avctx->width)
 | 
			
		||||
            set_default_window_size(avctx->width, avctx->height, sar);
 | 
			
		||||
        VideoPicture vp = {.width = avctx->width, .height = avctx->height, .sar = av_guess_sample_aspect_ratio(ic, st, NULL)};
 | 
			
		||||
        if (vp.width)
 | 
			
		||||
            set_default_window_size(&vp);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* open the streams */
 | 
			
		||||
@@ -2992,7 +2935,7 @@ static int read_thread(void *arg)
 | 
			
		||||
        }
 | 
			
		||||
        if (!is->paused &&
 | 
			
		||||
            (!is->audio_st || is->audio_finished == is->audioq.serial) &&
 | 
			
		||||
            (!is->video_st || (is->video_finished == is->videoq.serial && pictq_nb_remaining(is) == 0))) {
 | 
			
		||||
            (!is->video_st || (is->video_finished == is->videoq.serial && is->pictq_size == 0))) {
 | 
			
		||||
            if (loop != 1 && (!loop || --loop)) {
 | 
			
		||||
                stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
 | 
			
		||||
            } else if (autoexit) {
 | 
			
		||||
@@ -3013,7 +2956,7 @@ static int read_thread(void *arg)
 | 
			
		||||
        }
 | 
			
		||||
        ret = av_read_frame(ic, pkt);
 | 
			
		||||
        if (ret < 0) {
 | 
			
		||||
            if (ret == AVERROR_EOF || avio_feof(ic->pb))
 | 
			
		||||
            if (ret == AVERROR_EOF || url_feof(ic->pb))
 | 
			
		||||
                eof = 1;
 | 
			
		||||
            if (ic->pb && ic->pb->error)
 | 
			
		||||
                break;
 | 
			
		||||
@@ -3177,11 +3120,6 @@ static void stream_cycle_channel(VideoState *is, int codec_type)
 | 
			
		||||
 the_end:
 | 
			
		||||
    if (p && stream_index != -1)
 | 
			
		||||
        stream_index = p->stream_index[stream_index];
 | 
			
		||||
    av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
 | 
			
		||||
           av_get_media_type_string(codec_type),
 | 
			
		||||
           old_index,
 | 
			
		||||
           stream_index);
 | 
			
		||||
 | 
			
		||||
    stream_component_close(is, old_index);
 | 
			
		||||
    stream_component_open(is, stream_index);
 | 
			
		||||
}
 | 
			
		||||
@@ -3219,7 +3157,7 @@ static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
 | 
			
		||||
    double remaining_time = 0.0;
 | 
			
		||||
    SDL_PumpEvents();
 | 
			
		||||
    while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
 | 
			
		||||
        if (!cursor_hidden && av_gettime_relative() - cursor_last_shown > CURSOR_HIDE_DELAY) {
 | 
			
		||||
        if (!cursor_hidden && av_gettime() - cursor_last_shown > CURSOR_HIDE_DELAY) {
 | 
			
		||||
            SDL_ShowCursor(0);
 | 
			
		||||
            cursor_hidden = 1;
 | 
			
		||||
        }
 | 
			
		||||
@@ -3305,17 +3243,7 @@ static void event_loop(VideoState *cur_stream)
 | 
			
		||||
                stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
 | 
			
		||||
                break;
 | 
			
		||||
            case SDLK_w:
 | 
			
		||||
#if CONFIG_AVFILTER
 | 
			
		||||
                if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
 | 
			
		||||
                    if (++cur_stream->vfilter_idx >= nb_vfilters)
 | 
			
		||||
                        cur_stream->vfilter_idx = 0;
 | 
			
		||||
                } else {
 | 
			
		||||
                    cur_stream->vfilter_idx = 0;
 | 
			
		||||
                    toggle_audio_display(cur_stream);
 | 
			
		||||
                }
 | 
			
		||||
#else
 | 
			
		||||
                toggle_audio_display(cur_stream);
 | 
			
		||||
#endif
 | 
			
		||||
                break;
 | 
			
		||||
            case SDLK_PAGEUP:
 | 
			
		||||
                if (cur_stream->ic->nb_chapters <= 1) {
 | 
			
		||||
@@ -3383,7 +3311,7 @@ static void event_loop(VideoState *cur_stream)
 | 
			
		||||
                SDL_ShowCursor(1);
 | 
			
		||||
                cursor_hidden = 0;
 | 
			
		||||
            }
 | 
			
		||||
            cursor_last_shown = av_gettime_relative();
 | 
			
		||||
            cursor_last_shown = av_gettime();
 | 
			
		||||
            if (event.type == SDL_MOUSEBUTTONDOWN) {
 | 
			
		||||
                x = event.button.x;
 | 
			
		||||
            } else {
 | 
			
		||||
@@ -3571,6 +3499,7 @@ static const OptionDef options[] = {
 | 
			
		||||
    { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
 | 
			
		||||
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
 | 
			
		||||
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
 | 
			
		||||
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
 | 
			
		||||
    { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
 | 
			
		||||
    { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
 | 
			
		||||
    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
 | 
			
		||||
@@ -3580,7 +3509,7 @@ static const OptionDef options[] = {
 | 
			
		||||
    { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
 | 
			
		||||
    { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
 | 
			
		||||
#if CONFIG_AVFILTER
 | 
			
		||||
    { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
 | 
			
		||||
    { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "set video filters", "filter_graph" },
 | 
			
		||||
    { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
 | 
			
		||||
#endif
 | 
			
		||||
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
 | 
			
		||||
@@ -3591,7 +3520,6 @@ static const OptionDef options[] = {
 | 
			
		||||
    { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, {    &audio_codec_name }, "force audio decoder",    "decoder_name" },
 | 
			
		||||
    { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
 | 
			
		||||
    { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, {    &video_codec_name }, "force video decoder",    "decoder_name" },
 | 
			
		||||
    { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
 | 
			
		||||
    { NULL, },
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
@@ -3624,7 +3552,7 @@ void show_help_default(const char *opt, const char *arg)
 | 
			
		||||
           "v                   cycle video channel\n"
 | 
			
		||||
           "t                   cycle subtitle channel in the current program\n"
 | 
			
		||||
           "c                   cycle program\n"
 | 
			
		||||
           "w                   cycle video filters or show modes\n"
 | 
			
		||||
           "w                   show audio waves\n"
 | 
			
		||||
           "s                   activate frame-step mode\n"
 | 
			
		||||
           "left/right          seek backward/forward 10 seconds\n"
 | 
			
		||||
           "down/up             seek backward/forward 1 minute\n"
 | 
			
		||||
@@ -3697,7 +3625,7 @@ int main(int argc, char **argv)
 | 
			
		||||
        flags &= ~SDL_INIT_AUDIO;
 | 
			
		||||
    if (display_disable)
 | 
			
		||||
        SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
 | 
			
		||||
#if !defined(_WIN32) && !defined(__APPLE__)
 | 
			
		||||
#if !defined(__MINGW32__) && !defined(__APPLE__)
 | 
			
		||||
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
 | 
			
		||||
#endif
 | 
			
		||||
    if (SDL_Init (flags)) {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										80
									
								
								ffprobe.c
									
									
									
									
									
								
							
							
						
						
									
										80
									
								
								ffprobe.c
									
									
									
									
									
								
							@@ -33,7 +33,6 @@
 | 
			
		||||
#include "libavutil/avassert.h"
 | 
			
		||||
#include "libavutil/avstring.h"
 | 
			
		||||
#include "libavutil/bprint.h"
 | 
			
		||||
#include "libavutil/hash.h"
 | 
			
		||||
#include "libavutil/opt.h"
 | 
			
		||||
#include "libavutil/pixdesc.h"
 | 
			
		||||
#include "libavutil/dict.h"
 | 
			
		||||
@@ -81,7 +80,6 @@ static int show_private_data            = 1;
 | 
			
		||||
 | 
			
		||||
static char *print_format;
 | 
			
		||||
static char *stream_specifier;
 | 
			
		||||
static char *show_data_hash;
 | 
			
		||||
 | 
			
		||||
typedef struct {
 | 
			
		||||
    int id;             ///< identifier
 | 
			
		||||
@@ -125,8 +123,6 @@ typedef enum {
 | 
			
		||||
    SECTION_ID_FRAME,
 | 
			
		||||
    SECTION_ID_FRAMES,
 | 
			
		||||
    SECTION_ID_FRAME_TAGS,
 | 
			
		||||
    SECTION_ID_FRAME_SIDE_DATA_LIST,
 | 
			
		||||
    SECTION_ID_FRAME_SIDE_DATA,
 | 
			
		||||
    SECTION_ID_LIBRARY_VERSION,
 | 
			
		||||
    SECTION_ID_LIBRARY_VERSIONS,
 | 
			
		||||
    SECTION_ID_PACKET,
 | 
			
		||||
@@ -156,10 +152,8 @@ static struct section sections[] = {
 | 
			
		||||
    [SECTION_ID_FORMAT] =             { SECTION_ID_FORMAT, "format", 0, { SECTION_ID_FORMAT_TAGS, -1 } },
 | 
			
		||||
    [SECTION_ID_FORMAT_TAGS] =        { SECTION_ID_FORMAT_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "format_tags" },
 | 
			
		||||
    [SECTION_ID_FRAMES] =             { SECTION_ID_FRAMES, "frames", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME, SECTION_ID_SUBTITLE, -1 } },
 | 
			
		||||
    [SECTION_ID_FRAME] =              { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, SECTION_ID_FRAME_SIDE_DATA_LIST, -1 } },
 | 
			
		||||
    [SECTION_ID_FRAME] =              { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, -1 } },
 | 
			
		||||
    [SECTION_ID_FRAME_TAGS] =         { SECTION_ID_FRAME_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "frame_tags" },
 | 
			
		||||
    [SECTION_ID_FRAME_SIDE_DATA_LIST] ={ SECTION_ID_FRAME_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA, -1 } },
 | 
			
		||||
    [SECTION_ID_FRAME_SIDE_DATA] =     { SECTION_ID_FRAME_SIDE_DATA, "side_data", 0, { -1 } },
 | 
			
		||||
    [SECTION_ID_LIBRARY_VERSIONS] =   { SECTION_ID_LIBRARY_VERSIONS, "library_versions", SECTION_FLAG_IS_ARRAY, { SECTION_ID_LIBRARY_VERSION, -1 } },
 | 
			
		||||
    [SECTION_ID_LIBRARY_VERSION] =    { SECTION_ID_LIBRARY_VERSION, "library_version", 0, { -1 } },
 | 
			
		||||
    [SECTION_ID_PACKETS] =            { SECTION_ID_PACKETS, "packets", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
 | 
			
		||||
@@ -189,8 +183,6 @@ static const OptionDef *options;
 | 
			
		||||
static const char *input_filename;
 | 
			
		||||
static AVInputFormat *iformat = NULL;
 | 
			
		||||
 | 
			
		||||
static struct AVHashContext *hash;
 | 
			
		||||
 | 
			
		||||
static const char *const binary_unit_prefixes [] = { "", "Ki", "Mi", "Gi", "Ti", "Pi" };
 | 
			
		||||
static const char *const decimal_unit_prefixes[] = { "", "K" , "M" , "G" , "T" , "P"  };
 | 
			
		||||
 | 
			
		||||
@@ -689,21 +681,6 @@ static void writer_print_data(WriterContext *wctx, const char *name,
 | 
			
		||||
    av_bprint_finalize(&bp, NULL);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void writer_print_data_hash(WriterContext *wctx, const char *name,
 | 
			
		||||
                                   uint8_t *data, int size)
 | 
			
		||||
{
 | 
			
		||||
    char *p, buf[AV_HASH_MAX_SIZE * 2 + 64] = { 0 };
 | 
			
		||||
 | 
			
		||||
    if (!hash)
 | 
			
		||||
        return;
 | 
			
		||||
    av_hash_init(hash);
 | 
			
		||||
    av_hash_update(hash, data, size);
 | 
			
		||||
    snprintf(buf, sizeof(buf), "%s:", av_hash_get_name(hash));
 | 
			
		||||
    p = buf + strlen(buf);
 | 
			
		||||
    av_hash_final_hex(hash, p, buf + sizeof(buf) - p);
 | 
			
		||||
    writer_print_string(wctx, name, buf, 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define MAX_REGISTERED_WRITERS_NB 64
 | 
			
		||||
 | 
			
		||||
static const Writer *registered_writers[MAX_REGISTERED_WRITERS_NB + 1];
 | 
			
		||||
@@ -1711,7 +1688,6 @@ static void show_packet(WriterContext *w, AVFormatContext *fmt_ctx, AVPacket *pk
 | 
			
		||||
    print_fmt("flags", "%c",      pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_');
 | 
			
		||||
    if (do_show_data)
 | 
			
		||||
        writer_print_data(w, "data", pkt->data, pkt->size);
 | 
			
		||||
    writer_print_data_hash(w, "data_hash", pkt->data, pkt->size);
 | 
			
		||||
    writer_print_section_footer(w);
 | 
			
		||||
 | 
			
		||||
    av_bprint_finalize(&pbuf, NULL);
 | 
			
		||||
@@ -1746,7 +1722,6 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
 | 
			
		||||
{
 | 
			
		||||
    AVBPrint pbuf;
 | 
			
		||||
    const char *s;
 | 
			
		||||
    int i;
 | 
			
		||||
 | 
			
		||||
    av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
 | 
			
		||||
 | 
			
		||||
@@ -1809,20 +1784,6 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
 | 
			
		||||
    }
 | 
			
		||||
    if (do_show_frame_tags)
 | 
			
		||||
        show_tags(w, av_frame_get_metadata(frame), SECTION_ID_FRAME_TAGS);
 | 
			
		||||
    if (frame->nb_side_data) {
 | 
			
		||||
        writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_LIST);
 | 
			
		||||
        for (i = 0; i < frame->nb_side_data; i++) {
 | 
			
		||||
            AVFrameSideData *sd = frame->side_data[i];
 | 
			
		||||
            const char *name;
 | 
			
		||||
 | 
			
		||||
            writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA);
 | 
			
		||||
            name = av_frame_side_data_name(sd->type);
 | 
			
		||||
            print_str("side_data_type", name ? name : "unknown");
 | 
			
		||||
            print_int("side_data_size", sd->size);
 | 
			
		||||
            writer_print_section_footer(w);
 | 
			
		||||
        }
 | 
			
		||||
        writer_print_section_footer(w);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    writer_print_section_footer(w);
 | 
			
		||||
 | 
			
		||||
@@ -2030,7 +1991,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
 | 
			
		||||
    const char *s;
 | 
			
		||||
    AVRational sar, dar;
 | 
			
		||||
    AVBPrint pbuf;
 | 
			
		||||
    const AVCodecDescriptor *cd;
 | 
			
		||||
    int ret = 0;
 | 
			
		||||
 | 
			
		||||
    av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
 | 
			
		||||
@@ -2048,12 +2008,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
 | 
			
		||||
                if (dec->long_name) print_str    ("codec_long_name", dec->long_name);
 | 
			
		||||
                else                print_str_opt("codec_long_name", "unknown");
 | 
			
		||||
            }
 | 
			
		||||
        } else if ((cd = avcodec_descriptor_get(stream->codec->codec_id))) {
 | 
			
		||||
            print_str_opt("codec_name", cd->name);
 | 
			
		||||
            if (!do_bitexact) {
 | 
			
		||||
                print_str_opt("codec_long_name",
 | 
			
		||||
                              cd->long_name ? cd->long_name : "unknown");
 | 
			
		||||
            }
 | 
			
		||||
        } else {
 | 
			
		||||
            print_str_opt("codec_name", "unknown");
 | 
			
		||||
            if (!do_bitexact) {
 | 
			
		||||
@@ -2097,13 +2051,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
 | 
			
		||||
            if (s) print_str    ("pix_fmt", s);
 | 
			
		||||
            else   print_str_opt("pix_fmt", "unknown");
 | 
			
		||||
            print_int("level",   dec_ctx->level);
 | 
			
		||||
            if (dec_ctx->color_range != AVCOL_RANGE_UNSPECIFIED)
 | 
			
		||||
                print_str    ("color_range", dec_ctx->color_range == AVCOL_RANGE_MPEG ? "tv": "pc");
 | 
			
		||||
            else
 | 
			
		||||
                print_str_opt("color_range", "N/A");
 | 
			
		||||
            s = av_get_colorspace_name(dec_ctx->colorspace);
 | 
			
		||||
            if (s) print_str    ("color_space", s);
 | 
			
		||||
            else   print_str_opt("color_space", "unknown");
 | 
			
		||||
            if (dec_ctx->timecode_frame_start >= 0) {
 | 
			
		||||
                char tcbuf[AV_TIMECODE_STR_SIZE];
 | 
			
		||||
                av_timecode_make_mpeg_tc_string(tcbuf, dec_ctx->timecode_frame_start);
 | 
			
		||||
@@ -2168,10 +2115,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
 | 
			
		||||
    print_time("duration",    stream->duration, &stream->time_base);
 | 
			
		||||
    if (dec_ctx->bit_rate > 0) print_val    ("bit_rate", dec_ctx->bit_rate, unit_bit_per_second_str);
 | 
			
		||||
    else                       print_str_opt("bit_rate", "N/A");
 | 
			
		||||
    if (dec_ctx->rc_max_rate > 0) print_val ("max_bit_rate", dec_ctx->rc_max_rate, unit_bit_per_second_str);
 | 
			
		||||
    else                       print_str_opt("max_bit_rate", "N/A");
 | 
			
		||||
    if (dec_ctx->bits_per_raw_sample > 0) print_fmt("bits_per_raw_sample", "%d", dec_ctx->bits_per_raw_sample);
 | 
			
		||||
    else                       print_str_opt("bits_per_raw_sample", "N/A");
 | 
			
		||||
    if (stream->nb_frames) print_fmt    ("nb_frames", "%"PRId64, stream->nb_frames);
 | 
			
		||||
    else                   print_str_opt("nb_frames", "N/A");
 | 
			
		||||
    if (nb_streams_frames[stream_idx])  print_fmt    ("nb_read_frames", "%"PRIu64, nb_streams_frames[stream_idx]);
 | 
			
		||||
@@ -2181,8 +2124,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
 | 
			
		||||
    if (do_show_data)
 | 
			
		||||
        writer_print_data(w, "extradata", dec_ctx->extradata,
 | 
			
		||||
                                          dec_ctx->extradata_size);
 | 
			
		||||
    writer_print_data_hash(w, "extradata_hash", dec_ctx->extradata,
 | 
			
		||||
                                                dec_ctx->extradata_size);
 | 
			
		||||
 | 
			
		||||
    /* Print disposition information */
 | 
			
		||||
#define PRINT_DISPOSITION(flagname, name) do {                                \
 | 
			
		||||
@@ -2792,7 +2733,7 @@ static int parse_read_intervals(const char *intervals_spec)
 | 
			
		||||
            n++;
 | 
			
		||||
    n++;
 | 
			
		||||
 | 
			
		||||
    read_intervals = av_malloc_array(n, sizeof(*read_intervals));
 | 
			
		||||
    read_intervals = av_malloc(n * sizeof(*read_intervals));
 | 
			
		||||
    if (!read_intervals) {
 | 
			
		||||
        ret = AVERROR(ENOMEM);
 | 
			
		||||
        goto end;
 | 
			
		||||
@@ -2911,7 +2852,6 @@ static const OptionDef real_options[] = {
 | 
			
		||||
    { "select_streams", OPT_STRING | HAS_ARG, {(void*)&stream_specifier}, "select the specified streams", "stream_specifier" },
 | 
			
		||||
    { "sections", OPT_EXIT, {.func_arg = opt_sections}, "print sections structure and section information, and exit" },
 | 
			
		||||
    { "show_data",    OPT_BOOL, {(void*)&do_show_data}, "show packets data" },
 | 
			
		||||
    { "show_data_hash", OPT_STRING | HAS_ARG, {(void*)&show_data_hash}, "show packets data hash" },
 | 
			
		||||
    { "show_error",   0, {(void*)&opt_show_error},  "show probing error" },
 | 
			
		||||
    { "show_format",  0, {(void*)&opt_show_format}, "show format/container info" },
 | 
			
		||||
    { "show_frames",  0, {(void*)&opt_show_frames}, "show frames info" },
 | 
			
		||||
@@ -3015,21 +2955,6 @@ int main(int argc, char **argv)
 | 
			
		||||
    w_name = av_strtok(print_format, "=", &buf);
 | 
			
		||||
    w_args = buf;
 | 
			
		||||
 | 
			
		||||
    if (show_data_hash) {
 | 
			
		||||
        if ((ret = av_hash_alloc(&hash, show_data_hash)) < 0) {
 | 
			
		||||
            if (ret == AVERROR(EINVAL)) {
 | 
			
		||||
                const char *n;
 | 
			
		||||
                av_log(NULL, AV_LOG_ERROR,
 | 
			
		||||
                       "Unknown hash algorithm '%s'\nKnown algorithms:",
 | 
			
		||||
                       show_data_hash);
 | 
			
		||||
                for (i = 0; (n = av_hash_names(i)); i++)
 | 
			
		||||
                    av_log(NULL, AV_LOG_ERROR, " %s", n);
 | 
			
		||||
                av_log(NULL, AV_LOG_ERROR, "\n");
 | 
			
		||||
            }
 | 
			
		||||
            goto end;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    w = writer_get_by_name(w_name);
 | 
			
		||||
    if (!w) {
 | 
			
		||||
        av_log(NULL, AV_LOG_ERROR, "Unknown output format with name '%s'\n", w_name);
 | 
			
		||||
@@ -3069,7 +2994,6 @@ int main(int argc, char **argv)
 | 
			
		||||
end:
 | 
			
		||||
    av_freep(&print_format);
 | 
			
		||||
    av_freep(&read_intervals);
 | 
			
		||||
    av_hash_freep(&hash);
 | 
			
		||||
 | 
			
		||||
    uninit_opts();
 | 
			
		||||
    for (i = 0; i < FF_ARRAY_ELEMS(sections); i++)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										200
									
								
								ffserver.c
									
									
									
									
									
								
							
							
						
						
									
										200
									
								
								ffserver.c
									
									
									
									
									
								
							@@ -38,7 +38,6 @@
 | 
			
		||||
#include "libavformat/rtpdec.h"
 | 
			
		||||
#include "libavformat/rtpproto.h"
 | 
			
		||||
#include "libavformat/rtsp.h"
 | 
			
		||||
#include "libavformat/rtspcodes.h"
 | 
			
		||||
#include "libavformat/avio_internal.h"
 | 
			
		||||
#include "libavformat/internal.h"
 | 
			
		||||
#include "libavformat/url.h"
 | 
			
		||||
@@ -56,9 +55,7 @@
 | 
			
		||||
#include "libavutil/time.h"
 | 
			
		||||
 | 
			
		||||
#include <stdarg.h>
 | 
			
		||||
#if HAVE_UNISTD_H
 | 
			
		||||
#include <unistd.h>
 | 
			
		||||
#endif
 | 
			
		||||
#include <fcntl.h>
 | 
			
		||||
#include <sys/ioctl.h>
 | 
			
		||||
#if HAVE_POLL_H
 | 
			
		||||
@@ -91,7 +88,7 @@ enum HTTPState {
 | 
			
		||||
    RTSPSTATE_SEND_PACKET,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static const char * const http_state[] = {
 | 
			
		||||
static const char *http_state[] = {
 | 
			
		||||
    "HTTP_WAIT_REQUEST",
 | 
			
		||||
    "HTTP_SEND_HEADER",
 | 
			
		||||
 | 
			
		||||
@@ -225,7 +222,7 @@ typedef struct FFStream {
 | 
			
		||||
    IPAddressACL *acl;
 | 
			
		||||
    char dynamic_acl[1024];
 | 
			
		||||
    int nb_streams;
 | 
			
		||||
    int prebuffer;      /* Number of milliseconds early to start */
 | 
			
		||||
    int prebuffer;      /* Number of millseconds early to start */
 | 
			
		||||
    int64_t max_time;      /* Number of milliseconds to run */
 | 
			
		||||
    int send_on_key;
 | 
			
		||||
    AVStream *streams[MAX_STREAMS];
 | 
			
		||||
@@ -558,8 +555,7 @@ static int socket_open_listen(struct sockaddr_in *my_addr)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    tmp = 1;
 | 
			
		||||
    if (setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR, &tmp, sizeof(tmp)))
 | 
			
		||||
        av_log(NULL, AV_LOG_WARNING, "setsockopt SO_REUSEADDR failed\n");
 | 
			
		||||
    setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR, &tmp, sizeof(tmp));
 | 
			
		||||
 | 
			
		||||
    my_addr->sin_family = AF_INET;
 | 
			
		||||
    if (bind (server_fd, (struct sockaddr *) my_addr, sizeof (*my_addr)) < 0) {
 | 
			
		||||
@@ -575,9 +571,7 @@ static int socket_open_listen(struct sockaddr_in *my_addr)
 | 
			
		||||
        closesocket(server_fd);
 | 
			
		||||
        return -1;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (ff_socket_nonblock(server_fd, 1) < 0)
 | 
			
		||||
        av_log(NULL, AV_LOG_WARNING, "ff_socket_nonblock failed\n");
 | 
			
		||||
    ff_socket_nonblock(server_fd, 1);
 | 
			
		||||
 | 
			
		||||
    return server_fd;
 | 
			
		||||
}
 | 
			
		||||
@@ -592,7 +586,7 @@ static void start_multicast(void)
 | 
			
		||||
    int default_port, stream_index;
 | 
			
		||||
 | 
			
		||||
    default_port = 6000;
 | 
			
		||||
    for(stream = first_stream; stream; stream = stream->next) {
 | 
			
		||||
    for(stream = first_stream; stream != NULL; stream = stream->next) {
 | 
			
		||||
        if (stream->is_multicast) {
 | 
			
		||||
            unsigned random0 = av_lfg_get(&random_state);
 | 
			
		||||
            unsigned random1 = av_lfg_get(&random_state);
 | 
			
		||||
@@ -646,31 +640,25 @@ static int http_server(void)
 | 
			
		||||
    struct pollfd *poll_table, *poll_entry;
 | 
			
		||||
    HTTPContext *c, *c_next;
 | 
			
		||||
 | 
			
		||||
    if(!(poll_table = av_mallocz_array(nb_max_http_connections + 2, sizeof(*poll_table)))) {
 | 
			
		||||
    if(!(poll_table = av_mallocz((nb_max_http_connections + 2)*sizeof(*poll_table)))) {
 | 
			
		||||
        http_log("Impossible to allocate a poll table handling %d connections.\n", nb_max_http_connections);
 | 
			
		||||
        return -1;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (my_http_addr.sin_port) {
 | 
			
		||||
        server_fd = socket_open_listen(&my_http_addr);
 | 
			
		||||
        if (server_fd < 0) {
 | 
			
		||||
            av_free(poll_table);
 | 
			
		||||
        if (server_fd < 0)
 | 
			
		||||
            return -1;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (my_rtsp_addr.sin_port) {
 | 
			
		||||
        rtsp_server_fd = socket_open_listen(&my_rtsp_addr);
 | 
			
		||||
        if (rtsp_server_fd < 0) {
 | 
			
		||||
            av_free(poll_table);
 | 
			
		||||
            closesocket(server_fd);
 | 
			
		||||
        if (rtsp_server_fd < 0)
 | 
			
		||||
            return -1;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (!rtsp_server_fd && !server_fd) {
 | 
			
		||||
        http_log("HTTP and RTSP disabled.\n");
 | 
			
		||||
        av_free(poll_table);
 | 
			
		||||
        return -1;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -696,7 +684,7 @@ static int http_server(void)
 | 
			
		||||
        /* wait for events on each HTTP handle */
 | 
			
		||||
        c = first_http_ctx;
 | 
			
		||||
        delay = 1000;
 | 
			
		||||
        while (c) {
 | 
			
		||||
        while (c != NULL) {
 | 
			
		||||
            int fd;
 | 
			
		||||
            fd = c->fd;
 | 
			
		||||
            switch(c->state) {
 | 
			
		||||
@@ -749,10 +737,8 @@ static int http_server(void)
 | 
			
		||||
        do {
 | 
			
		||||
            ret = poll(poll_table, poll_entry - poll_table, delay);
 | 
			
		||||
            if (ret < 0 && ff_neterrno() != AVERROR(EAGAIN) &&
 | 
			
		||||
                ff_neterrno() != AVERROR(EINTR)) {
 | 
			
		||||
                av_free(poll_table);
 | 
			
		||||
                ff_neterrno() != AVERROR(EINTR))
 | 
			
		||||
                return -1;
 | 
			
		||||
            }
 | 
			
		||||
        } while (ret < 0);
 | 
			
		||||
 | 
			
		||||
        cur_time = av_gettime() / 1000;
 | 
			
		||||
@@ -763,7 +749,7 @@ static int http_server(void)
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        /* now handle the events */
 | 
			
		||||
        for(c = first_http_ctx; c; c = c_next) {
 | 
			
		||||
        for(c = first_http_ctx; c != NULL; c = c_next) {
 | 
			
		||||
            c_next = c->next;
 | 
			
		||||
            if (handle_connection(c) < 0) {
 | 
			
		||||
                log_connection(c);
 | 
			
		||||
@@ -815,8 +801,7 @@ static void http_send_too_busy_reply(int fd)
 | 
			
		||||
                       "</body></html>\r\n",
 | 
			
		||||
                       nb_connections, nb_max_connections);
 | 
			
		||||
    av_assert0(len < sizeof(buffer));
 | 
			
		||||
    if (send(fd, buffer, len, 0) < len)
 | 
			
		||||
        av_log(NULL, AV_LOG_WARNING, "Could not send too-busy reply, send() failed\n");
 | 
			
		||||
    send(fd, buffer, len, 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -834,8 +819,7 @@ static void new_connection(int server_fd, int is_rtsp)
 | 
			
		||||
        http_log("error during accept %s\n", strerror(errno));
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
    if (ff_socket_nonblock(fd, 1) < 0)
 | 
			
		||||
        av_log(NULL, AV_LOG_WARNING, "ff_socket_nonblock failed\n");
 | 
			
		||||
    ff_socket_nonblock(fd, 1);
 | 
			
		||||
 | 
			
		||||
    if (nb_connections >= nb_max_connections) {
 | 
			
		||||
        http_send_too_busy_reply(fd);
 | 
			
		||||
@@ -881,7 +865,7 @@ static void close_connection(HTTPContext *c)
 | 
			
		||||
 | 
			
		||||
    /* remove connection from list */
 | 
			
		||||
    cp = &first_http_ctx;
 | 
			
		||||
    while (*cp) {
 | 
			
		||||
    while ((*cp) != NULL) {
 | 
			
		||||
        c1 = *cp;
 | 
			
		||||
        if (c1 == c)
 | 
			
		||||
            *cp = c->next;
 | 
			
		||||
@@ -890,7 +874,7 @@ static void close_connection(HTTPContext *c)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* remove references, if any (XXX: do it faster) */
 | 
			
		||||
    for(c1 = first_http_ctx; c1; c1 = c1->next) {
 | 
			
		||||
    for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) {
 | 
			
		||||
        if (c1->rtsp_c == c)
 | 
			
		||||
            c1->rtsp_c = NULL;
 | 
			
		||||
    }
 | 
			
		||||
@@ -1258,13 +1242,24 @@ static int modify_current_stream(HTTPContext *c, char *rates)
 | 
			
		||||
    return action_required;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* XXX: factorize in utils.c ? */
 | 
			
		||||
/* XXX: take care with different space meaning */
 | 
			
		||||
static void skip_spaces(const char **pp)
 | 
			
		||||
{
 | 
			
		||||
    const char *p;
 | 
			
		||||
    p = *pp;
 | 
			
		||||
    while (*p == ' ' || *p == '\t')
 | 
			
		||||
        p++;
 | 
			
		||||
    *pp = p;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void get_word(char *buf, int buf_size, const char **pp)
 | 
			
		||||
{
 | 
			
		||||
    const char *p;
 | 
			
		||||
    char *q;
 | 
			
		||||
 | 
			
		||||
    p = *pp;
 | 
			
		||||
    p += strspn(p, SPACE_CHARS);
 | 
			
		||||
    skip_spaces(&p);
 | 
			
		||||
    q = buf;
 | 
			
		||||
    while (!av_isspace(*p) && *p != '\0') {
 | 
			
		||||
        if ((q - buf) < buf_size - 1)
 | 
			
		||||
@@ -1329,7 +1324,7 @@ static void parse_acl_row(FFStream *stream, FFStream* feed, IPAddressACL *ext_ac
 | 
			
		||||
    get_arg(arg, sizeof(arg), &p);
 | 
			
		||||
 | 
			
		||||
    if (resolve_host(&acl.first, arg) != 0) {
 | 
			
		||||
        fprintf(stderr, "%s:%d: ACL refers to invalid host or IP address '%s'\n",
 | 
			
		||||
        fprintf(stderr, "%s:%d: ACL refers to invalid host or ip address '%s'\n",
 | 
			
		||||
                filename, line_num, arg);
 | 
			
		||||
        errors++;
 | 
			
		||||
    } else
 | 
			
		||||
@@ -1339,7 +1334,7 @@ static void parse_acl_row(FFStream *stream, FFStream* feed, IPAddressACL *ext_ac
 | 
			
		||||
 | 
			
		||||
    if (arg[0]) {
 | 
			
		||||
        if (resolve_host(&acl.last, arg) != 0) {
 | 
			
		||||
            fprintf(stderr, "%s:%d: ACL refers to invalid host or IP address '%s'\n",
 | 
			
		||||
            fprintf(stderr, "%s:%d: ACL refers to invalid host or ip address '%s'\n",
 | 
			
		||||
                    filename, line_num, arg);
 | 
			
		||||
            errors++;
 | 
			
		||||
        }
 | 
			
		||||
@@ -1369,8 +1364,7 @@ static void parse_acl_row(FFStream *stream, FFStream* feed, IPAddressACL *ext_ac
 | 
			
		||||
                naclp = &(*naclp)->next;
 | 
			
		||||
 | 
			
		||||
            *naclp = nacl;
 | 
			
		||||
        } else
 | 
			
		||||
            av_free(nacl);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -1475,7 +1469,7 @@ static void compute_real_filename(char *filename, int max_size)
 | 
			
		||||
    p = strrchr(file1, '.');
 | 
			
		||||
    if (p)
 | 
			
		||||
        *p = '\0';
 | 
			
		||||
    for(stream = first_stream; stream; stream = stream->next) {
 | 
			
		||||
    for(stream = first_stream; stream != NULL; stream = stream->next) {
 | 
			
		||||
        av_strlcpy(file2, stream->filename, sizeof(file2));
 | 
			
		||||
        p = strrchr(file2, '.');
 | 
			
		||||
        if (p)
 | 
			
		||||
@@ -1584,12 +1578,12 @@ static int http_parse_request(HTTPContext *c)
 | 
			
		||||
        av_strlcpy(filename, "index.html", sizeof(filename) - 1);
 | 
			
		||||
 | 
			
		||||
    stream = first_stream;
 | 
			
		||||
    while (stream) {
 | 
			
		||||
    while (stream != NULL) {
 | 
			
		||||
        if (!strcmp(stream->filename, filename) && validate_acl(stream, c))
 | 
			
		||||
            break;
 | 
			
		||||
        stream = stream->next;
 | 
			
		||||
    }
 | 
			
		||||
    if (!stream) {
 | 
			
		||||
    if (stream == NULL) {
 | 
			
		||||
        snprintf(msg, sizeof(msg), "File '%s' not found", url);
 | 
			
		||||
        http_log("File '%s' not found\n", url);
 | 
			
		||||
        goto send_error;
 | 
			
		||||
@@ -1731,7 +1725,7 @@ static int http_parse_request(HTTPContext *c)
 | 
			
		||||
                                *p = '\0';
 | 
			
		||||
                            snprintf(q, c->buffer_size,
 | 
			
		||||
                                          "HTTP/1.0 200 RTSP Redirect follows\r\n"
 | 
			
		||||
                                          /* XXX: incorrect MIME type ? */
 | 
			
		||||
                                          /* XXX: incorrect mime type ? */
 | 
			
		||||
                                          "Content-type: application/x-rtsp\r\n"
 | 
			
		||||
                                          "\r\n"
 | 
			
		||||
                                          "rtsp://%s:%d/%s\r\n", hostname, ntohs(my_rtsp_addr.sin_port), filename);
 | 
			
		||||
@@ -1752,10 +1746,7 @@ static int http_parse_request(HTTPContext *c)
 | 
			
		||||
                            q += strlen(q);
 | 
			
		||||
 | 
			
		||||
                            len = sizeof(my_addr);
 | 
			
		||||
 | 
			
		||||
                            /* XXX: Should probably fail? */
 | 
			
		||||
                            if (getsockname(c->fd, (struct sockaddr *)&my_addr, &len))
 | 
			
		||||
                                http_log("getsockname() failed\n");
 | 
			
		||||
                            getsockname(c->fd, (struct sockaddr *)&my_addr, &len);
 | 
			
		||||
 | 
			
		||||
                            /* XXX: should use a dynamic buffer */
 | 
			
		||||
                            sdp_data_size = prepare_sdp_description(stream,
 | 
			
		||||
@@ -1795,7 +1786,7 @@ static int http_parse_request(HTTPContext *c)
 | 
			
		||||
        /* if post, it means a feed is being sent */
 | 
			
		||||
        if (!stream->is_feed) {
 | 
			
		||||
            /* However it might be a status report from WMP! Let us log the
 | 
			
		||||
             * data as it might come handy one day. */
 | 
			
		||||
             * data as it might come in handy one day. */
 | 
			
		||||
            const char *logline = 0;
 | 
			
		||||
            int client_id = 0;
 | 
			
		||||
 | 
			
		||||
@@ -1963,7 +1954,7 @@ static void compute_status(HTTPContext *c)
 | 
			
		||||
    avio_printf(pb, "<table cellspacing=0 cellpadding=4>\n");
 | 
			
		||||
    avio_printf(pb, "<tr><th valign=top>Path<th align=left>Served<br>Conns<th><br>bytes<th valign=top>Format<th>Bit rate<br>kbits/s<th align=left>Video<br>kbits/s<th><br>Codec<th align=left>Audio<br>kbits/s<th><br>Codec<th align=left valign=top>Feed\n");
 | 
			
		||||
    stream = first_stream;
 | 
			
		||||
    while (stream) {
 | 
			
		||||
    while (stream != NULL) {
 | 
			
		||||
        char sfilename[1024];
 | 
			
		||||
        char *eosf;
 | 
			
		||||
 | 
			
		||||
@@ -2052,7 +2043,7 @@ static void compute_status(HTTPContext *c)
 | 
			
		||||
    avio_printf(pb, "</table>\n");
 | 
			
		||||
 | 
			
		||||
    stream = first_stream;
 | 
			
		||||
    while (stream) {
 | 
			
		||||
    while (stream != NULL) {
 | 
			
		||||
        if (stream->feed == stream) {
 | 
			
		||||
            avio_printf(pb, "<h2>Feed %s</h2>", stream->filename);
 | 
			
		||||
            if (stream->pid) {
 | 
			
		||||
@@ -2130,7 +2121,7 @@ static void compute_status(HTTPContext *c)
 | 
			
		||||
    avio_printf(pb, "<tr><th>#<th>File<th>IP<th>Proto<th>State<th>Target bits/sec<th>Actual bits/sec<th>Bytes transferred\n");
 | 
			
		||||
    c1 = first_http_ctx;
 | 
			
		||||
    i = 0;
 | 
			
		||||
    while (c1) {
 | 
			
		||||
    while (c1 != NULL) {
 | 
			
		||||
        int bitrate;
 | 
			
		||||
        int j;
 | 
			
		||||
 | 
			
		||||
@@ -2286,7 +2277,7 @@ static int http_prepare_data(HTTPContext *c)
 | 
			
		||||
        c->fmt_ctx = *ctx;
 | 
			
		||||
        av_freep(&ctx);
 | 
			
		||||
        av_dict_copy(&(c->fmt_ctx.metadata), c->stream->metadata, 0);
 | 
			
		||||
        c->fmt_ctx.streams = av_mallocz_array(c->stream->nb_streams, sizeof(AVStream *));
 | 
			
		||||
        c->fmt_ctx.streams = av_mallocz(sizeof(AVStream *) * c->stream->nb_streams);
 | 
			
		||||
 | 
			
		||||
        for(i=0;i<c->stream->nb_streams;i++) {
 | 
			
		||||
            AVStream *src;
 | 
			
		||||
@@ -2317,7 +2308,7 @@ static int http_prepare_data(HTTPContext *c)
 | 
			
		||||
        c->fmt_ctx.pb->seekable = 0;
 | 
			
		||||
 | 
			
		||||
        /*
 | 
			
		||||
         * HACK to avoid MPEG-PS muxer to spit many underflow errors
 | 
			
		||||
         * HACK to avoid mpeg ps muxer to spit many underflow errors
 | 
			
		||||
         * Default value from FFmpeg
 | 
			
		||||
         * Try to set it using configuration option
 | 
			
		||||
         */
 | 
			
		||||
@@ -2748,11 +2739,8 @@ static int http_receive_data(HTTPContext *c)
 | 
			
		||||
        /* a packet has been received : write it in the store, except
 | 
			
		||||
           if header */
 | 
			
		||||
        if (c->data_count > FFM_PACKET_SIZE) {
 | 
			
		||||
            /* XXX: use llseek or url_seek
 | 
			
		||||
             * XXX: Should probably fail? */
 | 
			
		||||
            if (lseek(c->feed_fd, feed->feed_write_index, SEEK_SET) == -1)
 | 
			
		||||
                http_log("Seek to %"PRId64" failed\n", feed->feed_write_index);
 | 
			
		||||
 | 
			
		||||
            /* XXX: use llseek or url_seek */
 | 
			
		||||
            lseek(c->feed_fd, feed->feed_write_index, SEEK_SET);
 | 
			
		||||
            if (write(c->feed_fd, c->buffer, FFM_PACKET_SIZE) < 0) {
 | 
			
		||||
                http_log("Error writing to feed file: %s\n", strerror(errno));
 | 
			
		||||
                goto fail;
 | 
			
		||||
@@ -2774,7 +2762,7 @@ static int http_receive_data(HTTPContext *c)
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            /* wake up any waiting connections */
 | 
			
		||||
            for(c1 = first_http_ctx; c1; c1 = c1->next) {
 | 
			
		||||
            for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) {
 | 
			
		||||
                if (c1->state == HTTPSTATE_WAIT_FEED &&
 | 
			
		||||
                    c1->stream->feed == c->stream->feed)
 | 
			
		||||
                    c1->state = HTTPSTATE_SEND_DATA;
 | 
			
		||||
@@ -2830,7 +2818,7 @@ static int http_receive_data(HTTPContext *c)
 | 
			
		||||
    c->stream->feed_opened = 0;
 | 
			
		||||
    close(c->feed_fd);
 | 
			
		||||
    /* wake up any waiting connections to stop waiting for feed */
 | 
			
		||||
    for(c1 = first_http_ctx; c1; c1 = c1->next) {
 | 
			
		||||
    for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) {
 | 
			
		||||
        if (c1->state == HTTPSTATE_WAIT_FEED &&
 | 
			
		||||
            c1->stream->feed == c->stream->feed)
 | 
			
		||||
            c1->state = HTTPSTATE_SEND_DATA_TRAILER;
 | 
			
		||||
@@ -2848,9 +2836,44 @@ static void rtsp_reply_header(HTTPContext *c, enum RTSPStatusCode error_number)
 | 
			
		||||
    struct tm *tm;
 | 
			
		||||
    char buf2[32];
 | 
			
		||||
 | 
			
		||||
    str = RTSP_STATUS_CODE2STRING(error_number);
 | 
			
		||||
    if (!str)
 | 
			
		||||
    switch(error_number) {
 | 
			
		||||
    case RTSP_STATUS_OK:
 | 
			
		||||
        str = "OK";
 | 
			
		||||
        break;
 | 
			
		||||
    case RTSP_STATUS_METHOD:
 | 
			
		||||
        str = "Method Not Allowed";
 | 
			
		||||
        break;
 | 
			
		||||
    case RTSP_STATUS_BANDWIDTH:
 | 
			
		||||
        str = "Not Enough Bandwidth";
 | 
			
		||||
        break;
 | 
			
		||||
    case RTSP_STATUS_SESSION:
 | 
			
		||||
        str = "Session Not Found";
 | 
			
		||||
        break;
 | 
			
		||||
    case RTSP_STATUS_STATE:
 | 
			
		||||
        str = "Method Not Valid in This State";
 | 
			
		||||
        break;
 | 
			
		||||
    case RTSP_STATUS_AGGREGATE:
 | 
			
		||||
        str = "Aggregate operation not allowed";
 | 
			
		||||
        break;
 | 
			
		||||
    case RTSP_STATUS_ONLY_AGGREGATE:
 | 
			
		||||
        str = "Only aggregate operation allowed";
 | 
			
		||||
        break;
 | 
			
		||||
    case RTSP_STATUS_TRANSPORT:
 | 
			
		||||
        str = "Unsupported transport";
 | 
			
		||||
        break;
 | 
			
		||||
    case RTSP_STATUS_INTERNAL:
 | 
			
		||||
        str = "Internal Server Error";
 | 
			
		||||
        break;
 | 
			
		||||
    case RTSP_STATUS_SERVICE:
 | 
			
		||||
        str = "Service Unavailable";
 | 
			
		||||
        break;
 | 
			
		||||
    case RTSP_STATUS_VERSION:
 | 
			
		||||
        str = "RTSP Version not supported";
 | 
			
		||||
        break;
 | 
			
		||||
    default:
 | 
			
		||||
        str = "Unknown Error";
 | 
			
		||||
        break;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    avio_printf(c->pb, "RTSP/1.0 %d %s\r\n", error_number, str);
 | 
			
		||||
    avio_printf(c->pb, "CSeq: %d\r\n", c->seq);
 | 
			
		||||
@@ -2966,10 +2989,8 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
 | 
			
		||||
    AVDictionaryEntry *entry = av_dict_get(stream->metadata, "title", NULL, 0);
 | 
			
		||||
    int i;
 | 
			
		||||
 | 
			
		||||
    *pbuffer = NULL;
 | 
			
		||||
 | 
			
		||||
    avc =  avformat_alloc_context();
 | 
			
		||||
    if (!avc || !rtp_format) {
 | 
			
		||||
    if (avc == NULL || !rtp_format) {
 | 
			
		||||
        return -1;
 | 
			
		||||
    }
 | 
			
		||||
    avc->oformat = rtp_format;
 | 
			
		||||
@@ -3004,7 +3025,7 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
 | 
			
		||||
    av_free(avc);
 | 
			
		||||
    av_free(avs);
 | 
			
		||||
 | 
			
		||||
    return *pbuffer ? strlen(*pbuffer) : AVERROR(ENOMEM);
 | 
			
		||||
    return strlen(*pbuffer);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void rtsp_cmd_options(HTTPContext *c, const char *url)
 | 
			
		||||
@@ -3026,13 +3047,13 @@ static void rtsp_cmd_describe(HTTPContext *c, const char *url)
 | 
			
		||||
    socklen_t len;
 | 
			
		||||
    struct sockaddr_in my_addr;
 | 
			
		||||
 | 
			
		||||
    /* find which URL is asked */
 | 
			
		||||
    /* find which url is asked */
 | 
			
		||||
    av_url_split(NULL, 0, NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
 | 
			
		||||
    path = path1;
 | 
			
		||||
    if (*path == '/')
 | 
			
		||||
        path++;
 | 
			
		||||
 | 
			
		||||
    for(stream = first_stream; stream; stream = stream->next) {
 | 
			
		||||
    for(stream = first_stream; stream != NULL; stream = stream->next) {
 | 
			
		||||
        if (!stream->is_feed &&
 | 
			
		||||
            stream->fmt && !strcmp(stream->fmt->name, "rtp") &&
 | 
			
		||||
            !strcmp(path, stream->filename)) {
 | 
			
		||||
@@ -3040,11 +3061,11 @@ static void rtsp_cmd_describe(HTTPContext *c, const char *url)
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    /* no stream found */
 | 
			
		||||
    rtsp_reply_error(c, RTSP_STATUS_NOT_FOUND);
 | 
			
		||||
    rtsp_reply_error(c, RTSP_STATUS_SERVICE); /* XXX: right error ? */
 | 
			
		||||
    return;
 | 
			
		||||
 | 
			
		||||
 found:
 | 
			
		||||
    /* prepare the media description in SDP format */
 | 
			
		||||
    /* prepare the media description in sdp format */
 | 
			
		||||
 | 
			
		||||
    /* get the host IP */
 | 
			
		||||
    len = sizeof(my_addr);
 | 
			
		||||
@@ -3070,7 +3091,7 @@ static HTTPContext *find_rtp_session(const char *session_id)
 | 
			
		||||
    if (session_id[0] == '\0')
 | 
			
		||||
        return NULL;
 | 
			
		||||
 | 
			
		||||
    for(c = first_http_ctx; c; c = c->next) {
 | 
			
		||||
    for(c = first_http_ctx; c != NULL; c = c->next) {
 | 
			
		||||
        if (!strcmp(c->session_id, session_id))
 | 
			
		||||
            return c;
 | 
			
		||||
    }
 | 
			
		||||
@@ -3103,14 +3124,14 @@ static void rtsp_cmd_setup(HTTPContext *c, const char *url,
 | 
			
		||||
    struct sockaddr_in dest_addr;
 | 
			
		||||
    RTSPActionServerSetup setup;
 | 
			
		||||
 | 
			
		||||
    /* find which URL is asked */
 | 
			
		||||
    /* find which url is asked */
 | 
			
		||||
    av_url_split(NULL, 0, NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
 | 
			
		||||
    path = path1;
 | 
			
		||||
    if (*path == '/')
 | 
			
		||||
        path++;
 | 
			
		||||
 | 
			
		||||
    /* now check each stream */
 | 
			
		||||
    for(stream = first_stream; stream; stream = stream->next) {
 | 
			
		||||
    for(stream = first_stream; stream != NULL; stream = stream->next) {
 | 
			
		||||
        if (!stream->is_feed &&
 | 
			
		||||
            stream->fmt && !strcmp(stream->fmt->name, "rtp")) {
 | 
			
		||||
            /* accept aggregate filenames only if single stream */
 | 
			
		||||
@@ -3145,7 +3166,7 @@ static void rtsp_cmd_setup(HTTPContext *c, const char *url,
 | 
			
		||||
                 random0, random1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* find RTP session, and create it if none found */
 | 
			
		||||
    /* find rtp session, and create it if none found */
 | 
			
		||||
    rtp_c = find_rtp_session(h->session_id);
 | 
			
		||||
    if (!rtp_c) {
 | 
			
		||||
        /* always prefer UDP */
 | 
			
		||||
@@ -3234,7 +3255,7 @@ static void rtsp_cmd_setup(HTTPContext *c, const char *url,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/* find an RTP connection by using the session ID. Check consistency
 | 
			
		||||
/* find an rtp connection by using the session ID. Check consistency
 | 
			
		||||
   with filename */
 | 
			
		||||
static HTTPContext *find_rtp_session_with_url(const char *url,
 | 
			
		||||
                                              const char *session_id)
 | 
			
		||||
@@ -3249,7 +3270,7 @@ static HTTPContext *find_rtp_session_with_url(const char *url,
 | 
			
		||||
    if (!rtp_c)
 | 
			
		||||
        return NULL;
 | 
			
		||||
 | 
			
		||||
    /* find which URL is asked */
 | 
			
		||||
    /* find which url is asked */
 | 
			
		||||
    av_url_split(NULL, 0, NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
 | 
			
		||||
    path = path1;
 | 
			
		||||
    if (*path == '/')
 | 
			
		||||
@@ -3416,7 +3437,7 @@ static int rtp_new_av_stream(HTTPContext *c,
 | 
			
		||||
    if (!st)
 | 
			
		||||
        goto fail;
 | 
			
		||||
    ctx->nb_streams = 1;
 | 
			
		||||
    ctx->streams = av_mallocz_array(ctx->nb_streams, sizeof(AVStream *));
 | 
			
		||||
    ctx->streams = av_mallocz(sizeof(AVStream *) * ctx->nb_streams);
 | 
			
		||||
    if (!ctx->streams)
 | 
			
		||||
      goto fail;
 | 
			
		||||
    ctx->streams[0] = st;
 | 
			
		||||
@@ -3480,7 +3501,6 @@ static int rtp_new_av_stream(HTTPContext *c,
 | 
			
		||||
    fail:
 | 
			
		||||
        if (h)
 | 
			
		||||
            ffurl_close(h);
 | 
			
		||||
        av_free(st);
 | 
			
		||||
        av_free(ctx);
 | 
			
		||||
        return -1;
 | 
			
		||||
    }
 | 
			
		||||
@@ -3571,7 +3591,7 @@ static void remove_stream(FFStream *stream)
 | 
			
		||||
{
 | 
			
		||||
    FFStream **ps;
 | 
			
		||||
    ps = &first_stream;
 | 
			
		||||
    while (*ps) {
 | 
			
		||||
    while (*ps != NULL) {
 | 
			
		||||
        if (*ps == stream)
 | 
			
		||||
            *ps = (*ps)->next;
 | 
			
		||||
        else
 | 
			
		||||
@@ -3579,7 +3599,7 @@ static void remove_stream(FFStream *stream)
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* specific MPEG4 handling : we extract the raw parameters */
 | 
			
		||||
/* specific mpeg4 handling : we extract the raw parameters */
 | 
			
		||||
static void extract_mpeg4_header(AVFormatContext *infile)
 | 
			
		||||
{
 | 
			
		||||
    int mpeg4_count, i, size;
 | 
			
		||||
@@ -3637,7 +3657,7 @@ static void build_file_streams(void)
 | 
			
		||||
    int i, ret;
 | 
			
		||||
 | 
			
		||||
    /* gather all streams */
 | 
			
		||||
    for(stream = first_stream; stream; stream = stream_next) {
 | 
			
		||||
    for(stream = first_stream; stream != NULL; stream = stream_next) {
 | 
			
		||||
        AVFormatContext *infile = NULL;
 | 
			
		||||
        stream_next = stream->next;
 | 
			
		||||
        if (stream->stream_type == STREAM_TYPE_LIVE &&
 | 
			
		||||
@@ -3689,7 +3709,7 @@ static void build_feed_streams(void)
 | 
			
		||||
    int i;
 | 
			
		||||
 | 
			
		||||
    /* gather all streams */
 | 
			
		||||
    for(stream = first_stream; stream; stream = stream->next) {
 | 
			
		||||
    for(stream = first_stream; stream != NULL; stream = stream->next) {
 | 
			
		||||
        feed = stream->feed;
 | 
			
		||||
        if (feed) {
 | 
			
		||||
            if (stream->is_feed) {
 | 
			
		||||
@@ -3704,7 +3724,7 @@ static void build_feed_streams(void)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* create feed files if needed */
 | 
			
		||||
    for(feed = first_feed; feed; feed = feed->next_feed) {
 | 
			
		||||
    for(feed = first_feed; feed != NULL; feed = feed->next_feed) {
 | 
			
		||||
        int fd;
 | 
			
		||||
 | 
			
		||||
        if (avio_check(feed->feed_filename, AVIO_FLAG_READ) > 0) {
 | 
			
		||||
@@ -3804,7 +3824,7 @@ static void build_feed_streams(void)
 | 
			
		||||
                http_log("Container doesn't support the required parameters\n");
 | 
			
		||||
                exit(1);
 | 
			
		||||
            }
 | 
			
		||||
            /* XXX: need better API */
 | 
			
		||||
            /* XXX: need better api */
 | 
			
		||||
            av_freep(&s->priv_data);
 | 
			
		||||
            avio_close(s->pb);
 | 
			
		||||
            s->streams = NULL;
 | 
			
		||||
@@ -3836,7 +3856,7 @@ static void compute_bandwidth(void)
 | 
			
		||||
    int i;
 | 
			
		||||
    FFStream *stream;
 | 
			
		||||
 | 
			
		||||
    for(stream = first_stream; stream; stream = stream->next) {
 | 
			
		||||
    for(stream = first_stream; stream != NULL; stream = stream->next) {
 | 
			
		||||
        bandwidth = 0;
 | 
			
		||||
        for(i=0;i<stream->nb_streams;i++) {
 | 
			
		||||
            AVStream *st = stream->streams[i];
 | 
			
		||||
@@ -4070,20 +4090,14 @@ static int parse_ffconfig(const char *filename)
 | 
			
		||||
 | 
			
		||||
        get_arg(cmd, sizeof(cmd), &p);
 | 
			
		||||
 | 
			
		||||
        if (!av_strcasecmp(cmd, "Port") || !av_strcasecmp(cmd, "HTTPPort")) {
 | 
			
		||||
            if (!av_strcasecmp(cmd, "Port"))
 | 
			
		||||
                WARNING("Port option is deprecated, use HTTPPort instead\n");
 | 
			
		||||
        if (!av_strcasecmp(cmd, "Port")) {
 | 
			
		||||
            get_arg(arg, sizeof(arg), &p);
 | 
			
		||||
            val = atoi(arg);
 | 
			
		||||
            if (val < 1 || val > 65536) {
 | 
			
		||||
                ERROR("Invalid port: %s\n", arg);
 | 
			
		||||
                ERROR("Invalid_port: %s\n", arg);
 | 
			
		||||
            }
 | 
			
		||||
            if (val < 1024)
 | 
			
		||||
                WARNING("Trying to use IETF assigned system port: %d\n", val);
 | 
			
		||||
            my_http_addr.sin_port = htons(val);
 | 
			
		||||
        } else if (!av_strcasecmp(cmd, "HTTPBindAddress") || !av_strcasecmp(cmd, "BindAddress")) {
 | 
			
		||||
            if (!av_strcasecmp(cmd, "BindAddress"))
 | 
			
		||||
                WARNING("BindAddress option is deprecated, use HTTPBindAddress instead\n");
 | 
			
		||||
        } else if (!av_strcasecmp(cmd, "BindAddress")) {
 | 
			
		||||
            get_arg(arg, sizeof(arg), &p);
 | 
			
		||||
            if (resolve_host(&my_http_addr.sin_addr, arg) != 0) {
 | 
			
		||||
                ERROR("%s:%d: Invalid host/IP address: %s\n", arg);
 | 
			
		||||
@@ -4289,7 +4303,7 @@ static int parse_ffconfig(const char *filename)
 | 
			
		||||
                FFStream *sfeed;
 | 
			
		||||
 | 
			
		||||
                sfeed = first_feed;
 | 
			
		||||
                while (sfeed) {
 | 
			
		||||
                while (sfeed != NULL) {
 | 
			
		||||
                    if (!strcmp(sfeed->filename, arg))
 | 
			
		||||
                        break;
 | 
			
		||||
                    sfeed = sfeed->next_feed;
 | 
			
		||||
@@ -4307,7 +4321,7 @@ static int parse_ffconfig(const char *filename)
 | 
			
		||||
                    stream->fmt = NULL;
 | 
			
		||||
                } else {
 | 
			
		||||
                    stream->stream_type = STREAM_TYPE_LIVE;
 | 
			
		||||
                    /* JPEG cannot be used here, so use single frame MJPEG */
 | 
			
		||||
                    /* jpeg cannot be used here, so use single frame jpeg */
 | 
			
		||||
                    if (!strcmp(arg, "jpeg"))
 | 
			
		||||
                        strcpy(arg, "mjpeg");
 | 
			
		||||
                    stream->fmt = ffserver_guess_format(arg, NULL, NULL);
 | 
			
		||||
 
 | 
			
		||||
@@ -38,15 +38,15 @@ static av_cold int zero12v_decode_init(AVCodecContext *avctx)
 | 
			
		||||
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
 | 
			
		||||
                                int *got_frame, AVPacket *avpkt)
 | 
			
		||||
{
 | 
			
		||||
    int line, ret;
 | 
			
		||||
    int line = 0, ret;
 | 
			
		||||
    const int width = avctx->width;
 | 
			
		||||
    AVFrame *pic = data;
 | 
			
		||||
    uint16_t *y, *u, *v;
 | 
			
		||||
    const uint8_t *line_end, *src = avpkt->data;
 | 
			
		||||
    int stride = avctx->width * 8 / 3;
 | 
			
		||||
 | 
			
		||||
    if (width <= 1 || avctx->height <= 0) {
 | 
			
		||||
        av_log(avctx, AV_LOG_ERROR, "Dimensions %dx%d not supported.\n", width, avctx->height);
 | 
			
		||||
    if (width == 1) {
 | 
			
		||||
        av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n");
 | 
			
		||||
        return AVERROR_INVALIDDATA;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -67,45 +67,45 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
 | 
			
		||||
    pic->pict_type = AV_PICTURE_TYPE_I;
 | 
			
		||||
    pic->key_frame = 1;
 | 
			
		||||
 | 
			
		||||
    y = (uint16_t *)pic->data[0];
 | 
			
		||||
    u = (uint16_t *)pic->data[1];
 | 
			
		||||
    v = (uint16_t *)pic->data[2];
 | 
			
		||||
    line_end = avpkt->data + stride;
 | 
			
		||||
    for (line = 0; line < avctx->height; line++) {
 | 
			
		||||
        uint16_t y_temp[6] = {0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000};
 | 
			
		||||
        uint16_t u_temp[3] = {0x8000, 0x8000, 0x8000};
 | 
			
		||||
        uint16_t v_temp[3] = {0x8000, 0x8000, 0x8000};
 | 
			
		||||
        int x;
 | 
			
		||||
        y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
 | 
			
		||||
        u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
 | 
			
		||||
        v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
 | 
			
		||||
 | 
			
		||||
        for (x = 0; x < width; x += 6) {
 | 
			
		||||
            uint32_t t;
 | 
			
		||||
 | 
			
		||||
            if (width - x < 6 || line_end - src < 16) {
 | 
			
		||||
                y = y_temp;
 | 
			
		||||
                u = u_temp;
 | 
			
		||||
                v = v_temp;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            if (line_end - src < 4)
 | 
			
		||||
                break;
 | 
			
		||||
 | 
			
		||||
            t = AV_RL32(src);
 | 
			
		||||
    while (line++ < avctx->height) {
 | 
			
		||||
        while (1) {
 | 
			
		||||
            uint32_t t = AV_RL32(src);
 | 
			
		||||
            src += 4;
 | 
			
		||||
            *u++ = t <<  6 & 0xFFC0;
 | 
			
		||||
            *y++ = t >>  4 & 0xFFC0;
 | 
			
		||||
            *v++ = t >> 14 & 0xFFC0;
 | 
			
		||||
 | 
			
		||||
            if (line_end - src < 4)
 | 
			
		||||
            if (src >= line_end - 1) {
 | 
			
		||||
                *y = 0x80;
 | 
			
		||||
                src++;
 | 
			
		||||
                line_end += stride;
 | 
			
		||||
                y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
 | 
			
		||||
                u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
 | 
			
		||||
                v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            t = AV_RL32(src);
 | 
			
		||||
            src += 4;
 | 
			
		||||
            *y++ = t <<  6 & 0xFFC0;
 | 
			
		||||
            *u++ = t >>  4 & 0xFFC0;
 | 
			
		||||
            *y++ = t >> 14 & 0xFFC0;
 | 
			
		||||
 | 
			
		||||
            if (line_end - src < 4)
 | 
			
		||||
            if (src >= line_end - 2) {
 | 
			
		||||
                if (!(width & 1)) {
 | 
			
		||||
                    *y = 0x80;
 | 
			
		||||
                    src += 2;
 | 
			
		||||
                }
 | 
			
		||||
                line_end += stride;
 | 
			
		||||
                y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
 | 
			
		||||
                u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
 | 
			
		||||
                v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            t = AV_RL32(src);
 | 
			
		||||
            src += 4;
 | 
			
		||||
@@ -113,8 +113,15 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
 | 
			
		||||
            *y++ = t >>  4 & 0xFFC0;
 | 
			
		||||
            *u++ = t >> 14 & 0xFFC0;
 | 
			
		||||
 | 
			
		||||
            if (line_end - src < 4)
 | 
			
		||||
            if (src >= line_end - 1) {
 | 
			
		||||
                *y = 0x80;
 | 
			
		||||
                src++;
 | 
			
		||||
                line_end += stride;
 | 
			
		||||
                y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
 | 
			
		||||
                u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
 | 
			
		||||
                v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            t = AV_RL32(src);
 | 
			
		||||
            src += 4;
 | 
			
		||||
@@ -122,21 +129,18 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
 | 
			
		||||
            *v++ = t >>  4 & 0xFFC0;
 | 
			
		||||
            *y++ = t >> 14 & 0xFFC0;
 | 
			
		||||
 | 
			
		||||
            if (width - x < 6)
 | 
			
		||||
            if (src >= line_end - 2) {
 | 
			
		||||
                if (width & 1) {
 | 
			
		||||
                    *y = 0x80;
 | 
			
		||||
                    src += 2;
 | 
			
		||||
                }
 | 
			
		||||
                line_end += stride;
 | 
			
		||||
                y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
 | 
			
		||||
                u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
 | 
			
		||||
                v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (x < width) {
 | 
			
		||||
            y = x   + (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
 | 
			
		||||
            u = x/2 + (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
 | 
			
		||||
            v = x/2 + (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
 | 
			
		||||
            memcpy(y, y_temp, sizeof(*y) * (width - x));
 | 
			
		||||
            memcpy(u, u_temp, sizeof(*u) * (width - x + 1) / 2);
 | 
			
		||||
            memcpy(v, v_temp, sizeof(*v) * (width - x + 1) / 2);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        line_end += stride;
 | 
			
		||||
        src = line_end - stride;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    *got_frame = 1;
 | 
			
		||||
 
 | 
			
		||||
@@ -24,16 +24,13 @@
 | 
			
		||||
 * 4XM codec.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include <inttypes.h>
 | 
			
		||||
 | 
			
		||||
#include "libavutil/avassert.h"
 | 
			
		||||
#include "libavutil/frame.h"
 | 
			
		||||
#include "libavutil/imgutils.h"
 | 
			
		||||
#include "libavutil/intreadwrite.h"
 | 
			
		||||
#include "avcodec.h"
 | 
			
		||||
#include "blockdsp.h"
 | 
			
		||||
#include "bswapdsp.h"
 | 
			
		||||
#include "bytestream.h"
 | 
			
		||||
#include "dsputil.h"
 | 
			
		||||
#include "get_bits.h"
 | 
			
		||||
#include "internal.h"
 | 
			
		||||
 | 
			
		||||
@@ -134,8 +131,7 @@ typedef struct CFrameBuffer {
 | 
			
		||||
 | 
			
		||||
typedef struct FourXContext {
 | 
			
		||||
    AVCodecContext *avctx;
 | 
			
		||||
    BlockDSPContext bdsp;
 | 
			
		||||
    BswapDSPContext bbdsp;
 | 
			
		||||
    DSPContext dsp;
 | 
			
		||||
    uint16_t *frame_buffer;
 | 
			
		||||
    uint16_t *last_frame_buffer;
 | 
			
		||||
    GetBitContext pre_gb;          ///< ac/dc prefix
 | 
			
		||||
@@ -460,8 +456,8 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
 | 
			
		||||
                          bitstream_size);
 | 
			
		||||
    if (!f->bitstream_buffer)
 | 
			
		||||
        return AVERROR(ENOMEM);
 | 
			
		||||
    f->bbdsp.bswap_buf(f->bitstream_buffer, (const uint32_t *) (buf + extra),
 | 
			
		||||
                       bitstream_size / 4);
 | 
			
		||||
    f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)(buf + extra),
 | 
			
		||||
                     bitstream_size / 4);
 | 
			
		||||
    init_get_bits(&f->gb, f->bitstream_buffer, 8 * bitstream_size);
 | 
			
		||||
 | 
			
		||||
    wordstream_offset = extra + bitstream_size;
 | 
			
		||||
@@ -594,7 +590,7 @@ static int decode_i_mb(FourXContext *f)
 | 
			
		||||
    int ret;
 | 
			
		||||
    int i;
 | 
			
		||||
 | 
			
		||||
    f->bdsp.clear_blocks(f->block[0]);
 | 
			
		||||
    f->dsp.clear_blocks(f->block[0]);
 | 
			
		||||
 | 
			
		||||
    for (i = 0; i < 6; i++)
 | 
			
		||||
        if ((ret = decode_i_block(f, f->block[i])) < 0)
 | 
			
		||||
@@ -799,8 +795,8 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
 | 
			
		||||
                          prestream_size);
 | 
			
		||||
    if (!f->bitstream_buffer)
 | 
			
		||||
        return AVERROR(ENOMEM);
 | 
			
		||||
    f->bbdsp.bswap_buf(f->bitstream_buffer, (const uint32_t *) prestream,
 | 
			
		||||
                       prestream_size / 4);
 | 
			
		||||
    f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)prestream,
 | 
			
		||||
                     prestream_size / 4);
 | 
			
		||||
    init_get_bits(&f->pre_gb, f->bitstream_buffer, 8 * prestream_size);
 | 
			
		||||
 | 
			
		||||
    f->last_dc = 0 * 128 * 8 * 8;
 | 
			
		||||
@@ -835,7 +831,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
 | 
			
		||||
    av_assert0(avctx->width % 16 == 0 && avctx->height % 16 == 0);
 | 
			
		||||
 | 
			
		||||
    if (buf_size < AV_RL32(buf + 4) + 8) {
 | 
			
		||||
        av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %"PRIu32"\n",
 | 
			
		||||
        av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d\n",
 | 
			
		||||
               buf_size, AV_RL32(buf + 4));
 | 
			
		||||
        return AVERROR_INVALIDDATA;
 | 
			
		||||
    }
 | 
			
		||||
@@ -1000,8 +996,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    f->version = AV_RL32(avctx->extradata) >> 16;
 | 
			
		||||
    ff_blockdsp_init(&f->bdsp, avctx);
 | 
			
		||||
    ff_bswapdsp_init(&f->bbdsp);
 | 
			
		||||
    ff_dsputil_init(&f->dsp, avctx);
 | 
			
		||||
    f->avctx = avctx;
 | 
			
		||||
    init_vlcs(f);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -150,7 +150,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
 | 
			
		||||
        c->planemap[0] = 0; // 1st plane is palette indexes
 | 
			
		||||
        break;
 | 
			
		||||
    case 24:
 | 
			
		||||
        avctx->pix_fmt = ff_get_format(avctx, pixfmt_rgb24);
 | 
			
		||||
        avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24);
 | 
			
		||||
        c->planes      = 3;
 | 
			
		||||
        c->planemap[0] = 2; // 1st plane is red
 | 
			
		||||
        c->planemap[1] = 1; // 2nd plane is green
 | 
			
		||||
 
 | 
			
		||||
@@ -101,7 +101,7 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
 | 
			
		||||
        }
 | 
			
		||||
        if (avpkt->size < (hdr_size + 1) * avctx->channels) {
 | 
			
		||||
            av_log(avctx, AV_LOG_ERROR, "packet size is too small\n");
 | 
			
		||||
            return AVERROR_INVALIDDATA;
 | 
			
		||||
            return AVERROR(EINVAL);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        esc->fib_acc[0] = avpkt->data[1] + 128;
 | 
			
		||||
@@ -124,7 +124,7 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
 | 
			
		||||
    }
 | 
			
		||||
    if (!esc->data[0]) {
 | 
			
		||||
        av_log(avctx, AV_LOG_ERROR, "unexpected empty packet\n");
 | 
			
		||||
        return AVERROR_INVALIDDATA;
 | 
			
		||||
        return AVERROR(EINVAL);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* decode next piece of data from the buffer */
 | 
			
		||||
 
 | 
			
		||||
@@ -1,10 +1,10 @@
 | 
			
		||||
include $(SUBDIR)../config.mak
 | 
			
		||||
 | 
			
		||||
NAME = avcodec
 | 
			
		||||
FFLIBS = avutil
 | 
			
		||||
 | 
			
		||||
HEADERS = avcodec.h                                                     \
 | 
			
		||||
          avfft.h                                                       \
 | 
			
		||||
          dv_profile.h                                                  \
 | 
			
		||||
          dxva2.h                                                       \
 | 
			
		||||
          old_codec_ids.h                                               \
 | 
			
		||||
          vaapi.h                                                       \
 | 
			
		||||
@@ -15,13 +15,11 @@ HEADERS = avcodec.h                                                     \
 | 
			
		||||
 | 
			
		||||
OBJS = allcodecs.o                                                      \
 | 
			
		||||
       audioconvert.o                                                   \
 | 
			
		||||
       avdct.o                                                          \
 | 
			
		||||
       avpacket.o                                                       \
 | 
			
		||||
       avpicture.o                                                      \
 | 
			
		||||
       bitstream.o                                                      \
 | 
			
		||||
       bitstream_filter.o                                               \
 | 
			
		||||
       codec_desc.o                                                     \
 | 
			
		||||
       dv_profile.o                                                     \
 | 
			
		||||
       fmtconvert.o                                                     \
 | 
			
		||||
       imgconvert.o                                                     \
 | 
			
		||||
       mathtables.o                                                     \
 | 
			
		||||
@@ -32,21 +30,18 @@ OBJS = allcodecs.o                                                      \
 | 
			
		||||
       resample2.o                                                      \
 | 
			
		||||
       utils.o                                                          \
 | 
			
		||||
 | 
			
		||||
# subsystems
 | 
			
		||||
# parts needed for many different codecs
 | 
			
		||||
OBJS-$(CONFIG_AANDCTTABLES)            += aandcttab.o
 | 
			
		||||
OBJS-$(CONFIG_AC3DSP)                  += ac3dsp.o
 | 
			
		||||
OBJS-$(CONFIG_AUDIO_FRAME_QUEUE)       += audio_frame_queue.o
 | 
			
		||||
OBJS-$(CONFIG_AUDIODSP)                += audiodsp.o
 | 
			
		||||
OBJS-$(CONFIG_BLOCKDSP)                += blockdsp.o
 | 
			
		||||
OBJS-$(CONFIG_BSWAPDSP)                += bswapdsp.o
 | 
			
		||||
OBJS-$(CONFIG_CABAC)                   += cabac.o
 | 
			
		||||
OBJS-$(CONFIG_CRYSTALHD)               += crystalhd.o
 | 
			
		||||
OBJS-$(CONFIG_DCT)                     += dct.o dct32_fixed.o dct32_float.o
 | 
			
		||||
OBJS-$(CONFIG_DXVA2)                   += dxva2.o
 | 
			
		||||
OBJS-$(CONFIG_DSPUTIL)                 += dsputil.o faanidct.o          \
 | 
			
		||||
                                          simple_idct.o jrevdct.o
 | 
			
		||||
OBJS-$(CONFIG_ENCODERS)                += faandct.o jfdctfst.o jfdctint.o
 | 
			
		||||
OBJS-$(CONFIG_ERROR_RESILIENCE)        += error_resilience.o
 | 
			
		||||
OBJS-$(CONFIG_EXIF)                    += exif.o tiff_common.o
 | 
			
		||||
OBJS-$(CONFIG_FAANDCT)                 += faandct.o
 | 
			
		||||
OBJS-$(CONFIG_FAANIDCT)                += faanidct.o
 | 
			
		||||
OBJS-$(CONFIG_FDCTDSP)                 += fdctdsp.o jfdctfst.o jfdctint.o
 | 
			
		||||
FFT-OBJS-$(CONFIG_HARDCODED_TABLES)    += cos_tables.o cos_fixed_tables.o
 | 
			
		||||
OBJS-$(CONFIG_FFT)                     += avfft.o fft_fixed.o fft_float.o \
 | 
			
		||||
                                          fft_fixed_32.o fft_init_table.o \
 | 
			
		||||
@@ -59,42 +54,30 @@ OBJS-$(CONFIG_H264PRED)                += h264pred.o
 | 
			
		||||
OBJS-$(CONFIG_H264QPEL)                += h264qpel.o
 | 
			
		||||
OBJS-$(CONFIG_HPELDSP)                 += hpeldsp.o
 | 
			
		||||
OBJS-$(CONFIG_HUFFMAN)                 += huffman.o
 | 
			
		||||
OBJS-$(CONFIG_HUFFYUVDSP)              += huffyuvdsp.o
 | 
			
		||||
OBJS-$(CONFIG_HUFFYUVENCDSP)           += huffyuvencdsp.o
 | 
			
		||||
OBJS-$(CONFIG_IDCTDSP)                 += idctdsp.o simple_idct.o jrevdct.o
 | 
			
		||||
OBJS-$(CONFIG_IIRFILTER)               += iirfilter.o
 | 
			
		||||
OBJS-$(CONFIG_INTRAX8)                 += intrax8.o intrax8dsp.o
 | 
			
		||||
OBJS-$(CONFIG_LIBXVID)                 += libxvid_rc.o
 | 
			
		||||
OBJS-$(CONFIG_LLAUDDSP)                += lossless_audiodsp.o
 | 
			
		||||
OBJS-$(CONFIG_LLVIDDSP)                += lossless_videodsp.o
 | 
			
		||||
OBJS-$(CONFIG_LPC)                     += lpc.o
 | 
			
		||||
OBJS-$(CONFIG_LSP)                     += lsp.o
 | 
			
		||||
OBJS-$(CONFIG_MDCT)                    += mdct_fixed.o mdct_float.o mdct_fixed_32.o
 | 
			
		||||
OBJS-$(CONFIG_ME_CMP)                  += me_cmp.o dsputil_compat.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG_ER)                 += mpeg_er.o
 | 
			
		||||
OBJS-$(CONFIG_MPEGAUDIO)               += mpegaudio.o mpegaudiodata.o   \
 | 
			
		||||
                                          mpegaudiodecheader.o
 | 
			
		||||
OBJS-$(CONFIG_MPEGAUDIODSP)            += mpegaudiodsp.o                \
 | 
			
		||||
                                          mpegaudiodsp_data.o           \
 | 
			
		||||
                                          mpegaudiodsp_fixed.o          \
 | 
			
		||||
                                          mpegaudiodsp_float.o
 | 
			
		||||
OBJS-$(CONFIG_MPEGVIDEO)               += mpegvideo.o mpegvideodsp.o    \
 | 
			
		||||
                                          mpegvideo_motion.o mpegutils.o
 | 
			
		||||
OBJS-$(CONFIG_MPEGVIDEO)               += mpegvideo.o mpegvideo_motion.o
 | 
			
		||||
OBJS-$(CONFIG_MPEGVIDEOENC)            += mpegvideo_enc.o mpeg12data.o  \
 | 
			
		||||
                                          motion_est.o ratecontrol.o    \
 | 
			
		||||
                                          mpegvideoencdsp.o
 | 
			
		||||
OBJS-$(CONFIG_PIXBLOCKDSP)             += pixblockdsp.o
 | 
			
		||||
OBJS-$(CONFIG_QPELDSP)                 += qpeldsp.o
 | 
			
		||||
                                          motion_est.o ratecontrol.o
 | 
			
		||||
OBJS-$(CONFIG_RANGECODER)              += rangecoder.o
 | 
			
		||||
RDFT-OBJS-$(CONFIG_HARDCODED_TABLES)   += sin_tables.o
 | 
			
		||||
OBJS-$(CONFIG_RDFT)                    += rdft.o $(RDFT-OBJS-yes)
 | 
			
		||||
OBJS-$(CONFIG_SHARED)                  += log2_tab.o
 | 
			
		||||
OBJS-$(CONFIG_SINEWIN)                 += sinewin.o
 | 
			
		||||
OBJS-$(CONFIG_STARTCODE)               += startcode.o
 | 
			
		||||
OBJS-$(CONFIG_TPELDSP)                 += tpeldsp.o
 | 
			
		||||
OBJS-$(CONFIG_VAAPI)                   += vaapi.o
 | 
			
		||||
OBJS-$(CONFIG_VDPAU)                   += vdpau.o
 | 
			
		||||
OBJS-$(CONFIG_VIDEODSP)                += videodsp.o
 | 
			
		||||
OBJS-$(CONFIG_VP3DSP)                  += vp3dsp.o
 | 
			
		||||
OBJS-$(CONFIG_WMA_FREQS)               += wma_freqs.o
 | 
			
		||||
 | 
			
		||||
# decoders/encoders
 | 
			
		||||
OBJS-$(CONFIG_ZERO12V_DECODER)         += 012v.o
 | 
			
		||||
@@ -105,18 +88,16 @@ OBJS-$(CONFIG_AAC_DECODER)             += aacdec.o aactab.o aacsbr.o aacps.o \
 | 
			
		||||
                                          sbrdsp.o aacpsdsp.o
 | 
			
		||||
OBJS-$(CONFIG_AAC_ENCODER)             += aacenc.o aaccoder.o    \
 | 
			
		||||
                                          aacpsy.o aactab.o      \
 | 
			
		||||
                                          psymodel.o mpeg4audio.o kbdwin.o
 | 
			
		||||
                                          psymodel.o iirfilter.o \
 | 
			
		||||
                                          mpeg4audio.o kbdwin.o
 | 
			
		||||
OBJS-$(CONFIG_AASC_DECODER)            += aasc.o msrledec.o
 | 
			
		||||
OBJS-$(CONFIG_AC3_DECODER)             += ac3dec_float.o ac3dec_data.o ac3.o kbdwin.o
 | 
			
		||||
OBJS-$(CONFIG_AC3_FIXED_DECODER)       += ac3dec_fixed.o ac3dec_data.o ac3.o kbdwin.o
 | 
			
		||||
OBJS-$(CONFIG_AC3_DECODER)             += ac3dec.o ac3dec_data.o ac3.o kbdwin.o
 | 
			
		||||
OBJS-$(CONFIG_AC3_ENCODER)             += ac3enc_float.o ac3enc.o ac3tab.o \
 | 
			
		||||
                                          ac3.o kbdwin.o
 | 
			
		||||
OBJS-$(CONFIG_AC3_FIXED_ENCODER)       += ac3enc_fixed.o ac3enc.o ac3tab.o ac3.o
 | 
			
		||||
OBJS-$(CONFIG_AIC_DECODER)             += aic.o
 | 
			
		||||
OBJS-$(CONFIG_ALAC_DECODER)            += alac.o alac_data.o
 | 
			
		||||
OBJS-$(CONFIG_ALAC_ENCODER)            += alacenc.o alac_data.o
 | 
			
		||||
OBJS-$(CONFIG_ALIAS_PIX_DECODER)       += aliaspixdec.o
 | 
			
		||||
OBJS-$(CONFIG_ALIAS_PIX_ENCODER)       += aliaspixenc.o
 | 
			
		||||
OBJS-$(CONFIG_ALS_DECODER)             += alsdec.o bgmc.o mpeg4audio.o
 | 
			
		||||
OBJS-$(CONFIG_AMRNB_DECODER)           += amrnbdec.o celp_filters.o   \
 | 
			
		||||
                                          celp_math.o acelp_filters.o \
 | 
			
		||||
@@ -126,7 +107,8 @@ OBJS-$(CONFIG_AMRWB_DECODER)           += amrwbdec.o celp_filters.o   \
 | 
			
		||||
                                          celp_math.o acelp_filters.o \
 | 
			
		||||
                                          acelp_vectors.o             \
 | 
			
		||||
                                          acelp_pitch_delay.o
 | 
			
		||||
OBJS-$(CONFIG_AMV_ENCODER)             += mjpegenc.o mjpeg.o mjpegenc_common.o \
 | 
			
		||||
OBJS-$(CONFIG_AMV_DECODER)             += sp5xdec.o mjpegdec.o mjpeg.o
 | 
			
		||||
OBJS-$(CONFIG_AMV_ENCODER)             += mjpegenc.o mjpeg.o           \
 | 
			
		||||
                                          mpegvideo_enc.o motion_est.o \
 | 
			
		||||
                                          ratecontrol.o mpeg12data.o   \
 | 
			
		||||
                                          mpegvideo.o
 | 
			
		||||
@@ -158,14 +140,14 @@ OBJS-$(CONFIG_AYUV_ENCODER)            += v408enc.o
 | 
			
		||||
OBJS-$(CONFIG_BETHSOFTVID_DECODER)     += bethsoftvideo.o
 | 
			
		||||
OBJS-$(CONFIG_BFI_DECODER)             += bfi.o
 | 
			
		||||
OBJS-$(CONFIG_BINK_DECODER)            += bink.o binkdsp.o
 | 
			
		||||
OBJS-$(CONFIG_BINKAUDIO_DCT_DECODER)   += binkaudio.o
 | 
			
		||||
OBJS-$(CONFIG_BINKAUDIO_RDFT_DECODER)  += binkaudio.o
 | 
			
		||||
OBJS-$(CONFIG_BINKAUDIO_DCT_DECODER)   += binkaudio.o wma.o wma_common.o
 | 
			
		||||
OBJS-$(CONFIG_BINKAUDIO_RDFT_DECODER)  += binkaudio.o wma.o wma_common.o
 | 
			
		||||
OBJS-$(CONFIG_BINTEXT_DECODER)         += bintext.o cga_data.o
 | 
			
		||||
OBJS-$(CONFIG_BMP_DECODER)             += bmp.o msrledec.o
 | 
			
		||||
OBJS-$(CONFIG_BMP_ENCODER)             += bmpenc.o
 | 
			
		||||
OBJS-$(CONFIG_BMV_AUDIO_DECODER)       += bmvaudio.o
 | 
			
		||||
OBJS-$(CONFIG_BMV_VIDEO_DECODER)       += bmvvideo.o
 | 
			
		||||
OBJS-$(CONFIG_BRENDER_PIX_DECODER)     += brenderpix.o
 | 
			
		||||
OBJS-$(CONFIG_BMV_VIDEO_DECODER)       += bmv.o
 | 
			
		||||
OBJS-$(CONFIG_BMV_AUDIO_DECODER)       += bmv.o
 | 
			
		||||
OBJS-$(CONFIG_BRENDER_PIX_DECODER)     += brender_pix.o
 | 
			
		||||
OBJS-$(CONFIG_C93_DECODER)             += c93.o
 | 
			
		||||
OBJS-$(CONFIG_CAVS_DECODER)            += cavs.o cavsdec.o cavsdsp.o \
 | 
			
		||||
                                          cavsdata.o mpeg12data.o
 | 
			
		||||
@@ -173,8 +155,8 @@ OBJS-$(CONFIG_CDGRAPHICS_DECODER)      += cdgraphics.o
 | 
			
		||||
OBJS-$(CONFIG_CDXL_DECODER)            += cdxl.o
 | 
			
		||||
OBJS-$(CONFIG_CINEPAK_DECODER)         += cinepak.o
 | 
			
		||||
OBJS-$(CONFIG_CINEPAK_ENCODER)         += cinepakenc.o elbg.o
 | 
			
		||||
OBJS-$(CONFIG_CLJR_DECODER)            += cljrdec.o
 | 
			
		||||
OBJS-$(CONFIG_CLJR_ENCODER)            += cljrenc.o
 | 
			
		||||
OBJS-$(CONFIG_CLJR_DECODER)            += cljr.o
 | 
			
		||||
OBJS-$(CONFIG_CLJR_ENCODER)            += cljr.o
 | 
			
		||||
OBJS-$(CONFIG_CLLC_DECODER)            += cllc.o
 | 
			
		||||
OBJS-$(CONFIG_COOK_DECODER)            += cook.o
 | 
			
		||||
OBJS-$(CONFIG_COMFORTNOISE_DECODER)    += cngdec.o celp_filters.o
 | 
			
		||||
@@ -192,28 +174,25 @@ OBJS-$(CONFIG_DNXHD_DECODER)           += dnxhddec.o dnxhddata.o
 | 
			
		||||
OBJS-$(CONFIG_DNXHD_ENCODER)           += dnxhdenc.o dnxhddata.o
 | 
			
		||||
OBJS-$(CONFIG_DPX_DECODER)             += dpx.o
 | 
			
		||||
OBJS-$(CONFIG_DPX_ENCODER)             += dpxenc.o
 | 
			
		||||
OBJS-$(CONFIG_DSD_LSBF_DECODER)        += dsddec.o
 | 
			
		||||
OBJS-$(CONFIG_DSD_MSBF_DECODER)        += dsddec.o
 | 
			
		||||
OBJS-$(CONFIG_DSD_LSBF_PLANAR_DECODER) += dsddec.o
 | 
			
		||||
OBJS-$(CONFIG_DSD_MSBF_PLANAR_DECODER) += dsddec.o
 | 
			
		||||
OBJS-$(CONFIG_DSICINAUDIO_DECODER)     += dsicinaudio.o
 | 
			
		||||
OBJS-$(CONFIG_DSICINVIDEO_DECODER)     += dsicinvideo.o
 | 
			
		||||
OBJS-$(CONFIG_DSICINAUDIO_DECODER)     += dsicinav.o
 | 
			
		||||
OBJS-$(CONFIG_DSICINVIDEO_DECODER)     += dsicinav.o
 | 
			
		||||
OBJS-$(CONFIG_DVBSUB_DECODER)          += dvbsubdec.o
 | 
			
		||||
OBJS-$(CONFIG_DVBSUB_ENCODER)          += dvbsub.o
 | 
			
		||||
OBJS-$(CONFIG_DVDSUB_DECODER)          += dvdsubdec.o
 | 
			
		||||
OBJS-$(CONFIG_DVDSUB_ENCODER)          += dvdsubenc.o
 | 
			
		||||
OBJS-$(CONFIG_DVVIDEO_DECODER)         += dvdec.o dv.o dvdata.o
 | 
			
		||||
OBJS-$(CONFIG_DVVIDEO_ENCODER)         += dvenc.o dv.o dvdata.o
 | 
			
		||||
OBJS-$(CONFIG_DVVIDEO_DECODER)         += dvdec.o dv.o dvdata.o dv_profile.o
 | 
			
		||||
OBJS-$(CONFIG_DVVIDEO_ENCODER)         += dvenc.o dv.o dvdata.o dv_profile.o
 | 
			
		||||
OBJS-$(CONFIG_DXA_DECODER)             += dxa.o
 | 
			
		||||
OBJS-$(CONFIG_DXTORY_DECODER)          += dxtory.o
 | 
			
		||||
OBJS-$(CONFIG_EAC3_DECODER)            += eac3_data.o
 | 
			
		||||
OBJS-$(CONFIG_EAC3_DECODER)            += eac3dec.o eac3_data.o
 | 
			
		||||
OBJS-$(CONFIG_EAC3_ENCODER)            += eac3enc.o eac3_data.o
 | 
			
		||||
OBJS-$(CONFIG_EACMV_DECODER)           += eacmv.o
 | 
			
		||||
OBJS-$(CONFIG_EAMAD_DECODER)           += eamad.o eaidct.o mpeg12.o \
 | 
			
		||||
                                          mpeg12data.o
 | 
			
		||||
OBJS-$(CONFIG_EATGQ_DECODER)           += eatgq.o eaidct.o
 | 
			
		||||
OBJS-$(CONFIG_EATGV_DECODER)           += eatgv.o
 | 
			
		||||
OBJS-$(CONFIG_EATQI_DECODER)           += eatqi.o eaidct.o
 | 
			
		||||
OBJS-$(CONFIG_EATQI_DECODER)           += eatqi.o eaidct.o mpeg12dec.o  \
 | 
			
		||||
                                          mpeg12.o mpeg12data.o
 | 
			
		||||
OBJS-$(CONFIG_EIGHTBPS_DECODER)        += 8bps.o
 | 
			
		||||
OBJS-$(CONFIG_EIGHTSVX_EXP_DECODER)    += 8svx.o
 | 
			
		||||
OBJS-$(CONFIG_EIGHTSVX_FIB_DECODER)    += 8svx.o
 | 
			
		||||
@@ -223,6 +202,8 @@ OBJS-$(CONFIG_EVRC_DECODER)            += evrcdec.o acelp_vectors.o lsp.o
 | 
			
		||||
OBJS-$(CONFIG_EXR_DECODER)             += exr.o
 | 
			
		||||
OBJS-$(CONFIG_FFV1_DECODER)            += ffv1dec.o ffv1.o
 | 
			
		||||
OBJS-$(CONFIG_FFV1_ENCODER)            += ffv1enc.o ffv1.o
 | 
			
		||||
OBJS-$(CONFIG_FFVHUFF_DECODER)         += huffyuv.o huffyuvdec.o
 | 
			
		||||
OBJS-$(CONFIG_FFVHUFF_ENCODER)         += huffyuv.o huffyuvenc.o
 | 
			
		||||
OBJS-$(CONFIG_FFWAVESYNTH_DECODER)     += ffwavesynth.o
 | 
			
		||||
OBJS-$(CONFIG_FIC_DECODER)             += fic.o
 | 
			
		||||
OBJS-$(CONFIG_FLAC_DECODER)            += flacdec.o flacdata.o flac.o flacdsp.o
 | 
			
		||||
@@ -251,17 +232,18 @@ OBJS-$(CONFIG_H263_DECODER)            += h263dec.o h263.o ituh263dec.o        \
 | 
			
		||||
                                          intelh263dec.o
 | 
			
		||||
OBJS-$(CONFIG_H263_ENCODER)            += mpeg4videoenc.o mpeg4video.o  \
 | 
			
		||||
                                          h263.o ituh263enc.o flvenc.o
 | 
			
		||||
OBJS-$(CONFIG_H264_DECODER)            += h264.o h264_cabac.o h264_cavlc.o \
 | 
			
		||||
                                          h264_direct.o h264_loopfilter.o  \
 | 
			
		||||
                                          h264_mb.o h264_picture.o h264_ps.o \
 | 
			
		||||
                                          h264_refs.o h264_sei.o h264_slice.o
 | 
			
		||||
OBJS-$(CONFIG_H264_DECODER)            += h264.o                               \
 | 
			
		||||
                                          h264_loopfilter.o h264_direct.o      \
 | 
			
		||||
                                          cabac.o h264_sei.o h264_ps.o         \
 | 
			
		||||
                                          h264_refs.o h264_cavlc.o h264_cabac.o
 | 
			
		||||
OBJS-$(CONFIG_H264_VDA_DECODER)        += vda_h264_dec.o
 | 
			
		||||
OBJS-$(CONFIG_HEVC_DECODER)            += hevc.o hevc_mvs.o hevc_ps.o hevc_sei.o \
 | 
			
		||||
                                          hevc_cabac.o hevc_refs.o hevcpred.o    \
 | 
			
		||||
                                          hevcdsp.o hevc_filter.o
 | 
			
		||||
                                          hevcdsp.o hevc_filter.o cabac.o
 | 
			
		||||
OBJS-$(CONFIG_HNM4_VIDEO_DECODER)      += hnm4video.o
 | 
			
		||||
OBJS-$(CONFIG_HUFFYUV_DECODER)         += huffyuv.o huffyuvdec.o
 | 
			
		||||
OBJS-$(CONFIG_HUFFYUV_ENCODER)         += huffyuv.o huffyuvenc.o
 | 
			
		||||
OBJS-$(CONFIG_IAC_DECODER)             += imc.o
 | 
			
		||||
OBJS-$(CONFIG_IDCIN_DECODER)           += idcinvideo.o
 | 
			
		||||
OBJS-$(CONFIG_IDF_DECODER)             += bintext.o cga_data.o
 | 
			
		||||
OBJS-$(CONFIG_IFF_BYTERUN1_DECODER)    += iff.o
 | 
			
		||||
@@ -278,13 +260,14 @@ OBJS-$(CONFIG_JPEG2000_ENCODER)        += j2kenc.o mqcenc.o mqc.o jpeg2000.o \
 | 
			
		||||
                                          jpeg2000dwt.o
 | 
			
		||||
OBJS-$(CONFIG_JPEG2000_DECODER)        += jpeg2000dec.o jpeg2000.o      \
 | 
			
		||||
                                          jpeg2000dwt.o mqcdec.o mqc.o
 | 
			
		||||
OBJS-$(CONFIG_JPEGLS_DECODER)          += jpeglsdec.o jpegls.o
 | 
			
		||||
OBJS-$(CONFIG_JPEGLS_DECODER)          += jpeglsdec.o jpegls.o \
 | 
			
		||||
                                          mjpegdec.o mjpeg.o
 | 
			
		||||
OBJS-$(CONFIG_JPEGLS_ENCODER)          += jpeglsenc.o jpegls.o
 | 
			
		||||
OBJS-$(CONFIG_JV_DECODER)              += jvdec.o
 | 
			
		||||
OBJS-$(CONFIG_KGV1_DECODER)            += kgv1dec.o
 | 
			
		||||
OBJS-$(CONFIG_KMVC_DECODER)            += kmvc.o
 | 
			
		||||
OBJS-$(CONFIG_LAGARITH_DECODER)        += lagarith.o lagarithrac.o
 | 
			
		||||
OBJS-$(CONFIG_LJPEG_ENCODER)           += ljpegenc.o mjpeg.o mjpegenc_common.o
 | 
			
		||||
OBJS-$(CONFIG_LJPEG_ENCODER)           += ljpegenc.o mjpegenc.o mjpeg.o
 | 
			
		||||
OBJS-$(CONFIG_LOCO_DECODER)            += loco.o
 | 
			
		||||
OBJS-$(CONFIG_MACE3_DECODER)           += mace.o
 | 
			
		||||
OBJS-$(CONFIG_MACE6_DECODER)           += mace.o
 | 
			
		||||
@@ -294,8 +277,8 @@ OBJS-$(CONFIG_METASOUND_DECODER)       += metasound.o metasound_data.o \
 | 
			
		||||
OBJS-$(CONFIG_MICRODVD_DECODER)        += microdvddec.o ass.o
 | 
			
		||||
OBJS-$(CONFIG_MIMIC_DECODER)           += mimic.o
 | 
			
		||||
OBJS-$(CONFIG_MJPEG_DECODER)           += mjpegdec.o mjpeg.o
 | 
			
		||||
OBJS-$(CONFIG_MJPEG_ENCODER)           += mjpegenc.o mjpeg.o mjpegenc_common.o
 | 
			
		||||
OBJS-$(CONFIG_MJPEGB_DECODER)          += mjpegbdec.o
 | 
			
		||||
OBJS-$(CONFIG_MJPEG_ENCODER)           += mjpegenc.o mjpeg.o
 | 
			
		||||
OBJS-$(CONFIG_MJPEGB_DECODER)          += mjpegbdec.o mjpegdec.o mjpeg.o
 | 
			
		||||
OBJS-$(CONFIG_MLP_DECODER)             += mlpdec.o mlpdsp.o
 | 
			
		||||
OBJS-$(CONFIG_MMVIDEO_DECODER)         += mmvideo.o
 | 
			
		||||
OBJS-$(CONFIG_MOTIONPIXELS_DECODER)    += motionpixels.o
 | 
			
		||||
@@ -317,12 +300,12 @@ OBJS-$(CONFIG_MP3ON4_DECODER)          += mpegaudiodec_fixed.o mpeg4audio.o
 | 
			
		||||
OBJS-$(CONFIG_MP3ON4FLOAT_DECODER)     += mpegaudiodec_float.o mpeg4audio.o
 | 
			
		||||
OBJS-$(CONFIG_MPC7_DECODER)            += mpc7.o mpc.o
 | 
			
		||||
OBJS-$(CONFIG_MPC8_DECODER)            += mpc8.o mpc.o
 | 
			
		||||
OBJS-$(CONFIG_MPEGVIDEO_DECODER)       += mpeg12dec.o mpeg12.o mpeg12data.o
 | 
			
		||||
OBJS-$(CONFIG_MPEGVIDEO_DECODER)       += mpeg12.o mpeg12data.o \
 | 
			
		||||
                                          mpegvideo.o error_resilience.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG1VIDEO_DECODER)      += mpeg12dec.o mpeg12.o mpeg12data.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG1VIDEO_ENCODER)      += mpeg12enc.o mpeg12.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG2VIDEO_DECODER)      += mpeg12dec.o mpeg12.o mpeg12data.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG2VIDEO_ENCODER)      += mpeg12enc.o mpeg12.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG4_DECODER)           += xvididct.o
 | 
			
		||||
OBJS-$(CONFIG_MPL2_DECODER)            += mpl2dec.o ass.o
 | 
			
		||||
OBJS-$(CONFIG_MSMPEG4V1_DECODER)       += msmpeg4dec.o msmpeg4.o msmpeg4data.o
 | 
			
		||||
OBJS-$(CONFIG_MSMPEG4V2_DECODER)       += msmpeg4dec.o msmpeg4.o msmpeg4data.o
 | 
			
		||||
@@ -339,16 +322,12 @@ OBJS-$(CONFIG_MSZH_DECODER)            += lcldec.o
 | 
			
		||||
OBJS-$(CONFIG_MTS2_DECODER)            += mss4.o mss34dsp.o
 | 
			
		||||
OBJS-$(CONFIG_MVC1_DECODER)            += mvcdec.o
 | 
			
		||||
OBJS-$(CONFIG_MVC2_DECODER)            += mvcdec.o
 | 
			
		||||
OBJS-$(CONFIG_MXPEG_DECODER)           += mxpegdec.o
 | 
			
		||||
OBJS-$(CONFIG_MXPEG_DECODER)           += mxpegdec.o mjpegdec.o mjpeg.o
 | 
			
		||||
OBJS-$(CONFIG_NELLYMOSER_DECODER)      += nellymoserdec.o nellymoser.o
 | 
			
		||||
OBJS-$(CONFIG_NELLYMOSER_ENCODER)      += nellymoserenc.o nellymoser.o
 | 
			
		||||
OBJS-$(CONFIG_NUV_DECODER)             += nuv.o rtjpeg.o
 | 
			
		||||
OBJS-$(CONFIG_ON2AVC_DECODER)          += on2avc.o on2avcdata.o
 | 
			
		||||
OBJS-$(CONFIG_OPUS_DECODER)            += opusdec.o opus.o opus_celt.o \
 | 
			
		||||
                                          opus_imdct.o opus_silk.o     \
 | 
			
		||||
                                          vorbis_data.o
 | 
			
		||||
OBJS-$(CONFIG_PAF_AUDIO_DECODER)       += pafaudio.o
 | 
			
		||||
OBJS-$(CONFIG_PAF_VIDEO_DECODER)       += pafvideo.o
 | 
			
		||||
OBJS-$(CONFIG_PAF_VIDEO_DECODER)       += paf.o
 | 
			
		||||
OBJS-$(CONFIG_PAF_AUDIO_DECODER)       += paf.o
 | 
			
		||||
OBJS-$(CONFIG_PAM_DECODER)             += pnmdec.o pnm.o
 | 
			
		||||
OBJS-$(CONFIG_PAM_ENCODER)             += pamenc.o
 | 
			
		||||
OBJS-$(CONFIG_PBM_DECODER)             += pnmdec.o pnm.o
 | 
			
		||||
@@ -426,7 +405,7 @@ OBJS-$(CONFIG_SOL_DPCM_DECODER)        += dpcm.o
 | 
			
		||||
OBJS-$(CONFIG_SONIC_DECODER)           += sonic.o
 | 
			
		||||
OBJS-$(CONFIG_SONIC_ENCODER)           += sonic.o
 | 
			
		||||
OBJS-$(CONFIG_SONIC_LS_ENCODER)        += sonic.o
 | 
			
		||||
OBJS-$(CONFIG_SP5X_DECODER)            += sp5xdec.o
 | 
			
		||||
OBJS-$(CONFIG_SP5X_DECODER)            += sp5xdec.o mjpegdec.o mjpeg.o
 | 
			
		||||
OBJS-$(CONFIG_SRT_DECODER)             += srtdec.o ass.o
 | 
			
		||||
OBJS-$(CONFIG_SRT_ENCODER)             += srtenc.o ass_split.o
 | 
			
		||||
OBJS-$(CONFIG_SUBRIP_DECODER)          += srtdec.o ass.o
 | 
			
		||||
@@ -438,13 +417,17 @@ OBJS-$(CONFIG_SUNRAST_ENCODER)         += sunrastenc.o
 | 
			
		||||
OBJS-$(CONFIG_SVQ1_DECODER)            += svq1dec.o svq1.o svq13.o h263.o
 | 
			
		||||
OBJS-$(CONFIG_SVQ1_ENCODER)            += svq1enc.o svq1.o    \
 | 
			
		||||
                                          h263.o ituh263enc.o
 | 
			
		||||
OBJS-$(CONFIG_SVQ3_DECODER)            += svq3.o svq13.o mpegutils.o
 | 
			
		||||
OBJS-$(CONFIG_SVQ3_DECODER)            += svq3.o svq13.o h263.o h264.o        \
 | 
			
		||||
                                          h264_loopfilter.o h264_direct.o     \
 | 
			
		||||
                                          h264_sei.o h264_ps.o h264_refs.o    \
 | 
			
		||||
                                          h264_cavlc.o h264_cabac.o cabac.o
 | 
			
		||||
OBJS-$(CONFIG_TEXT_DECODER)            += textdec.o ass.o
 | 
			
		||||
OBJS-$(CONFIG_TAK_DECODER)             += takdec.o tak.o
 | 
			
		||||
OBJS-$(CONFIG_TARGA_DECODER)           += targa.o
 | 
			
		||||
OBJS-$(CONFIG_TARGA_ENCODER)           += targaenc.o rle.o
 | 
			
		||||
OBJS-$(CONFIG_TARGA_Y216_DECODER)      += targa_y216dec.o
 | 
			
		||||
OBJS-$(CONFIG_THEORA_DECODER)          += xiph.o
 | 
			
		||||
OBJS-$(CONFIG_THP_DECODER)             += mjpegdec.o mjpeg.o
 | 
			
		||||
OBJS-$(CONFIG_TIERTEXSEQVIDEO_DECODER) += tiertexseqv.o
 | 
			
		||||
OBJS-$(CONFIG_TIFF_DECODER)            += tiff.o lzw.o faxcompr.o tiff_data.o tiff_common.o
 | 
			
		||||
OBJS-$(CONFIG_TIFF_ENCODER)            += tiffenc.o rle.o lzwenc.o tiff_data.o
 | 
			
		||||
@@ -477,8 +460,8 @@ OBJS-$(CONFIG_VC1_DECODER)             += vc1dec.o vc1.o vc1data.o vc1dsp.o \
 | 
			
		||||
                                          msmpeg4dec.o msmpeg4.o msmpeg4data.o \
 | 
			
		||||
                                          wmv2dsp.o
 | 
			
		||||
OBJS-$(CONFIG_VCR1_DECODER)            += vcr1.o
 | 
			
		||||
OBJS-$(CONFIG_VMDAUDIO_DECODER)        += vmdaudio.o
 | 
			
		||||
OBJS-$(CONFIG_VMDVIDEO_DECODER)        += vmdvideo.o
 | 
			
		||||
OBJS-$(CONFIG_VMDAUDIO_DECODER)        += vmdav.o
 | 
			
		||||
OBJS-$(CONFIG_VMDVIDEO_DECODER)        += vmdav.o
 | 
			
		||||
OBJS-$(CONFIG_VMNC_DECODER)            += vmnc.o
 | 
			
		||||
OBJS-$(CONFIG_VORBIS_DECODER)          += vorbisdec.o vorbisdsp.o vorbis.o \
 | 
			
		||||
                                          vorbis_data.o xiph.o
 | 
			
		||||
@@ -489,7 +472,6 @@ OBJS-$(CONFIG_VP5_DECODER)             += vp5.o vp56.o vp56data.o vp56dsp.o \
 | 
			
		||||
                                          vp56rac.o
 | 
			
		||||
OBJS-$(CONFIG_VP6_DECODER)             += vp6.o vp56.o vp56data.o vp56dsp.o \
 | 
			
		||||
                                          vp6dsp.o vp56rac.o
 | 
			
		||||
OBJS-$(CONFIG_VP7_DECODER)             += vp8.o vp8dsp.o vp56rac.o
 | 
			
		||||
OBJS-$(CONFIG_VP8_DECODER)             += vp8.o vp8dsp.o vp56rac.o
 | 
			
		||||
OBJS-$(CONFIG_VP9_DECODER)             += vp9.o vp9dsp.o vp56rac.o
 | 
			
		||||
OBJS-$(CONFIG_VPLAYER_DECODER)         += textdec.o ass.o
 | 
			
		||||
@@ -498,8 +480,7 @@ OBJS-$(CONFIG_WAVPACK_DECODER)         += wavpack.o
 | 
			
		||||
OBJS-$(CONFIG_WAVPACK_ENCODER)         += wavpackenc.o
 | 
			
		||||
OBJS-$(CONFIG_WEBP_DECODER)            += vp8.o vp8dsp.o vp56rac.o
 | 
			
		||||
OBJS-$(CONFIG_WEBP_DECODER)            += webp.o exif.o tiff_common.o
 | 
			
		||||
OBJS-$(CONFIG_WEBVTT_DECODER)          += webvttdec.o ass.o
 | 
			
		||||
OBJS-$(CONFIG_WEBVTT_ENCODER)          += webvttenc.o ass_split.o
 | 
			
		||||
OBJS-$(CONFIG_WEBVTT_DECODER)          += webvttdec.o
 | 
			
		||||
OBJS-$(CONFIG_WMALOSSLESS_DECODER)     += wmalosslessdec.o wma_common.o
 | 
			
		||||
OBJS-$(CONFIG_WMAPRO_DECODER)          += wmaprodec.o wma.o wma_common.o
 | 
			
		||||
OBJS-$(CONFIG_WMAV1_DECODER)           += wmadec.o wma.o wma_common.o aactab.o
 | 
			
		||||
@@ -510,7 +491,6 @@ OBJS-$(CONFIG_WMAVOICE_DECODER)        += wmavoice.o \
 | 
			
		||||
                                          celp_filters.o \
 | 
			
		||||
                                          acelp_vectors.o acelp_filters.o
 | 
			
		||||
OBJS-$(CONFIG_WMV1_DECODER)            += msmpeg4dec.o msmpeg4.o msmpeg4data.o
 | 
			
		||||
OBJS-$(CONFIG_WMV1_ENCODER)            += msmpeg4enc.o
 | 
			
		||||
OBJS-$(CONFIG_WMV2_DECODER)            += wmv2dec.o wmv2.o wmv2dsp.o \
 | 
			
		||||
                                          msmpeg4dec.o msmpeg4.o msmpeg4data.o
 | 
			
		||||
OBJS-$(CONFIG_WMV2_ENCODER)            += wmv2enc.o wmv2.o wmv2dsp.o \
 | 
			
		||||
@@ -639,19 +619,13 @@ OBJS-$(CONFIG_ADPCM_SBPRO_4_DECODER)      += adpcm.o adpcm_data.o
 | 
			
		||||
OBJS-$(CONFIG_ADPCM_SWF_DECODER)          += adpcm.o adpcm_data.o
 | 
			
		||||
OBJS-$(CONFIG_ADPCM_SWF_ENCODER)          += adpcmenc.o adpcm_data.o
 | 
			
		||||
OBJS-$(CONFIG_ADPCM_THP_DECODER)          += adpcm.o adpcm_data.o
 | 
			
		||||
OBJS-$(CONFIG_ADPCM_VIMA_DECODER)         += vima.o adpcm_data.o
 | 
			
		||||
OBJS-$(CONFIG_ADPCM_XA_DECODER)           += adpcm.o adpcm_data.o
 | 
			
		||||
OBJS-$(CONFIG_ADPCM_YAMAHA_DECODER)       += adpcm.o adpcm_data.o
 | 
			
		||||
OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER)       += adpcmenc.o adpcm_data.o
 | 
			
		||||
OBJS-$(CONFIG_VIMA_DECODER)               += vima.o adpcm_data.o
 | 
			
		||||
 | 
			
		||||
# hardware accelerators
 | 
			
		||||
OBJS-$(CONFIG_DXVA2)                      += dxva2.o
 | 
			
		||||
OBJS-$(CONFIG_VAAPI)                      += vaapi.o
 | 
			
		||||
OBJS-$(CONFIG_VDA)                        += vda.o
 | 
			
		||||
OBJS-$(CONFIG_VDPAU)                      += vdpau.o
 | 
			
		||||
 | 
			
		||||
OBJS-$(CONFIG_H263_VAAPI_HWACCEL)         += vaapi_mpeg4.o
 | 
			
		||||
OBJS-$(CONFIG_H263_VAAPI_HWACCEL)         += vaapi_mpeg4.o vaapi_mpeg.o
 | 
			
		||||
OBJS-$(CONFIG_H263_VDPAU_HWACCEL)         += vdpau_mpeg4.o
 | 
			
		||||
OBJS-$(CONFIG_H264_DXVA2_HWACCEL)         += dxva2_h264.o
 | 
			
		||||
OBJS-$(CONFIG_H264_VAAPI_HWACCEL)         += vaapi_h264.o
 | 
			
		||||
@@ -660,19 +634,22 @@ OBJS-$(CONFIG_H264_VDPAU_HWACCEL)         += vdpau_h264.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG1_VDPAU_HWACCEL)        += vdpau_mpeg12.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG1_XVMC_HWACCEL)         += mpegvideo_xvmc.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG2_DXVA2_HWACCEL)        += dxva2_mpeg2.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL)        += vaapi_mpeg2.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL)        += vaapi_mpeg2.o vaapi_mpeg.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG2_VDPAU_HWACCEL)        += vdpau_mpeg12.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG2_XVMC_HWACCEL)         += mpegvideo_xvmc.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL)        += vaapi_mpeg4.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL)        += vaapi_mpeg4.o vaapi_mpeg.o
 | 
			
		||||
OBJS-$(CONFIG_MPEG4_VDPAU_HWACCEL)        += vdpau_mpeg4.o
 | 
			
		||||
OBJS-$(CONFIG_VC1_DXVA2_HWACCEL)          += dxva2_vc1.o
 | 
			
		||||
OBJS-$(CONFIG_VC1_VAAPI_HWACCEL)          += vaapi_vc1.o
 | 
			
		||||
OBJS-$(CONFIG_VC1_VAAPI_HWACCEL)          += vaapi_vc1.o vaapi_mpeg.o
 | 
			
		||||
OBJS-$(CONFIG_VC1_VDPAU_HWACCEL)          += vdpau_vc1.o
 | 
			
		||||
 | 
			
		||||
# libavformat dependencies
 | 
			
		||||
OBJS-$(CONFIG_ADTS_MUXER)              += mpeg4audio.o
 | 
			
		||||
OBJS-$(CONFIG_ADX_DEMUXER)             += adx.o
 | 
			
		||||
OBJS-$(CONFIG_CAF_DEMUXER)             += mpeg4audio.o mpegaudiodata.o  \
 | 
			
		||||
                                          ac3tab.o
 | 
			
		||||
OBJS-$(CONFIG_DV_DEMUXER)              += dv_profile.o
 | 
			
		||||
OBJS-$(CONFIG_DV_MUXER)                += dv_profile.o
 | 
			
		||||
OBJS-$(CONFIG_FLAC_DEMUXER)            += flac.o flacdata.o vorbis_data.o \
 | 
			
		||||
                                          vorbis_parser.o xiph.o
 | 
			
		||||
OBJS-$(CONFIG_FLAC_MUXER)              += flac.o flacdata.o vorbis_data.o
 | 
			
		||||
@@ -683,8 +660,7 @@ OBJS-$(CONFIG_ISMV_MUXER)              += mpeg4audio.o mpegaudiodata.o
 | 
			
		||||
OBJS-$(CONFIG_LATM_MUXER)              += mpeg4audio.o
 | 
			
		||||
OBJS-$(CONFIG_MATROSKA_AUDIO_MUXER)    += xiph.o mpeg4audio.o vorbis_data.o \
 | 
			
		||||
                                          flac.o flacdata.o
 | 
			
		||||
OBJS-$(CONFIG_MATROSKA_DEMUXER)        += mpeg4audio.o mpegaudiodata.o  \
 | 
			
		||||
                                          vorbis_parser.o xiph.o
 | 
			
		||||
OBJS-$(CONFIG_MATROSKA_DEMUXER)        += mpeg4audio.o mpegaudiodata.o
 | 
			
		||||
OBJS-$(CONFIG_MATROSKA_MUXER)          += mpeg4audio.o mpegaudiodata.o  \
 | 
			
		||||
                                          flac.o flacdata.o vorbis_data.o xiph.o
 | 
			
		||||
OBJS-$(CONFIG_MP2_MUXER)               += mpegaudiodata.o mpegaudiodecheader.o
 | 
			
		||||
@@ -695,7 +671,6 @@ OBJS-$(CONFIG_MPEGTS_MUXER)            += mpeg4audio.o
 | 
			
		||||
OBJS-$(CONFIG_MPEGTS_DEMUXER)          += mpeg4audio.o mpegaudiodata.o
 | 
			
		||||
OBJS-$(CONFIG_MXF_MUXER)               += dnxhddata.o
 | 
			
		||||
OBJS-$(CONFIG_NUT_MUXER)               += mpegaudiodata.o
 | 
			
		||||
OBJS-$(CONFIG_OGA_MUXER)               += xiph.o flac.o flacdata.o
 | 
			
		||||
OBJS-$(CONFIG_OGG_DEMUXER)             += xiph.o flac.o flacdata.o     \
 | 
			
		||||
                                          mpeg12data.o vorbis_parser.o \
 | 
			
		||||
                                          dirac.o vorbis_data.o
 | 
			
		||||
@@ -709,7 +684,6 @@ OBJS-$(CONFIG_TAK_DEMUXER)             += tak.o
 | 
			
		||||
OBJS-$(CONFIG_WEBM_MUXER)              += mpeg4audio.o mpegaudiodata.o  \
 | 
			
		||||
                                          xiph.o flac.o flacdata.o \
 | 
			
		||||
                                          vorbis_data.o
 | 
			
		||||
OBJS-$(CONFIG_WEBM_DASH_MANIFEST_DEMUXER) += vorbis_parser.o xiph.o
 | 
			
		||||
OBJS-$(CONFIG_WTV_DEMUXER)             += mpeg4audio.o mpegaudiodata.o
 | 
			
		||||
 | 
			
		||||
# libavfilter dependencies
 | 
			
		||||
@@ -721,10 +695,10 @@ OBJS-$(CONFIG_LIBCELT_DECODER)            += libcelt_dec.o
 | 
			
		||||
OBJS-$(CONFIG_LIBFAAC_ENCODER)            += libfaac.o
 | 
			
		||||
OBJS-$(CONFIG_LIBFDK_AAC_DECODER)         += libfdk-aacdec.o
 | 
			
		||||
OBJS-$(CONFIG_LIBFDK_AAC_ENCODER)         += libfdk-aacenc.o
 | 
			
		||||
OBJS-$(CONFIG_LIBGSM_DECODER)             += libgsmdec.o
 | 
			
		||||
OBJS-$(CONFIG_LIBGSM_ENCODER)             += libgsmenc.o
 | 
			
		||||
OBJS-$(CONFIG_LIBGSM_MS_DECODER)          += libgsmdec.o
 | 
			
		||||
OBJS-$(CONFIG_LIBGSM_MS_ENCODER)          += libgsmenc.o
 | 
			
		||||
OBJS-$(CONFIG_LIBGSM_DECODER)             += libgsm.o
 | 
			
		||||
OBJS-$(CONFIG_LIBGSM_ENCODER)             += libgsm.o
 | 
			
		||||
OBJS-$(CONFIG_LIBGSM_MS_DECODER)          += libgsm.o
 | 
			
		||||
OBJS-$(CONFIG_LIBGSM_MS_ENCODER)          += libgsm.o
 | 
			
		||||
OBJS-$(CONFIG_LIBILBC_DECODER)            += libilbc.o
 | 
			
		||||
OBJS-$(CONFIG_LIBILBC_ENCODER)            += libilbc.o
 | 
			
		||||
OBJS-$(CONFIG_LIBMP3LAME_ENCODER)         += libmp3lame.o mpegaudiodecheader.o
 | 
			
		||||
@@ -788,7 +762,11 @@ OBJS-$(CONFIG_FLAC_PARSER)             += flac_parser.o flacdata.o flac.o \
 | 
			
		||||
OBJS-$(CONFIG_GSM_PARSER)              += gsm_parser.o
 | 
			
		||||
OBJS-$(CONFIG_H261_PARSER)             += h261_parser.o
 | 
			
		||||
OBJS-$(CONFIG_H263_PARSER)             += h263_parser.o
 | 
			
		||||
OBJS-$(CONFIG_H264_PARSER)             += h264_parser.o
 | 
			
		||||
OBJS-$(CONFIG_H264_PARSER)             += h264_parser.o h264.o            \
 | 
			
		||||
                                          cabac.o                         \
 | 
			
		||||
                                          h264_refs.o h264_sei.o h264_direct.o \
 | 
			
		||||
                                          h264_loopfilter.o h264_cabac.o \
 | 
			
		||||
                                          h264_cavlc.o h264_ps.o
 | 
			
		||||
OBJS-$(CONFIG_HEVC_PARSER)             += hevc_parser.o
 | 
			
		||||
OBJS-$(CONFIG_MJPEG_PARSER)            += mjpeg_parser.o
 | 
			
		||||
OBJS-$(CONFIG_MLP_PARSER)              += mlp_parser.o mlp.o
 | 
			
		||||
@@ -800,13 +778,12 @@ OBJS-$(CONFIG_MPEGAUDIO_PARSER)        += mpegaudio_parser.o \
 | 
			
		||||
                                          mpegaudiodecheader.o mpegaudiodata.o
 | 
			
		||||
OBJS-$(CONFIG_MPEGVIDEO_PARSER)        += mpegvideo_parser.o    \
 | 
			
		||||
                                          mpeg12.o mpeg12data.o
 | 
			
		||||
OBJS-$(CONFIG_OPUS_PARSER)             += opus_parser.o opus.o vorbis_data.o
 | 
			
		||||
OBJS-$(CONFIG_PNG_PARSER)              += png_parser.o
 | 
			
		||||
OBJS-$(CONFIG_PNM_PARSER)              += pnm_parser.o pnm.o
 | 
			
		||||
OBJS-$(CONFIG_RV30_PARSER)             += rv34_parser.o
 | 
			
		||||
OBJS-$(CONFIG_RV40_PARSER)             += rv34_parser.o
 | 
			
		||||
OBJS-$(CONFIG_TAK_PARSER)              += tak_parser.o tak.o
 | 
			
		||||
OBJS-$(CONFIG_VC1_PARSER)              += vc1_parser.o vc1.o vc1data.o vc1dsp.o \
 | 
			
		||||
OBJS-$(CONFIG_VC1_PARSER)              += vc1_parser.o vc1.o vc1data.o \
 | 
			
		||||
                                          msmpeg4.o msmpeg4data.o mpeg4video.o \
 | 
			
		||||
                                          h263.o
 | 
			
		||||
OBJS-$(CONFIG_VORBIS_PARSER)           += vorbis_parser.o xiph.o
 | 
			
		||||
@@ -843,7 +820,6 @@ SKIPHEADERS                            += %_tablegen.h                  \
 | 
			
		||||
                                          %_tables.h                    \
 | 
			
		||||
                                          aac_tablegen_decl.h           \
 | 
			
		||||
                                          fft-internal.h                \
 | 
			
		||||
                                          libutvideo.h                  \
 | 
			
		||||
                                          old_codec_ids.h               \
 | 
			
		||||
                                          tableprint.h                  \
 | 
			
		||||
                                          $(ARCH)/vp56_arith.h          \
 | 
			
		||||
@@ -853,31 +829,29 @@ SKIPHEADERS-$(CONFIG_LIBSCHROEDINGER)  += libschroedinger.h
 | 
			
		||||
SKIPHEADERS-$(CONFIG_LIBUTVIDEO)       += libutvideo.h
 | 
			
		||||
SKIPHEADERS-$(CONFIG_XVMC)             += xvmc.h
 | 
			
		||||
SKIPHEADERS-$(CONFIG_VAAPI)            += vaapi_internal.h
 | 
			
		||||
SKIPHEADERS-$(CONFIG_VDA)              += vda.h vda_internal.h
 | 
			
		||||
SKIPHEADERS-$(CONFIG_VDA)              += vda.h
 | 
			
		||||
SKIPHEADERS-$(CONFIG_VDPAU)            += vdpau.h vdpau_internal.h
 | 
			
		||||
 | 
			
		||||
TESTPROGS = imgconvert                                                  \
 | 
			
		||||
 | 
			
		||||
TESTPROGS-$(CONFIG_CABAC)                 += cabac
 | 
			
		||||
TESTPROGS-$(CONFIG_FFT)                   += fft fft-fixed fft-fixed32
 | 
			
		||||
TESTPROGS-$(CONFIG_IDCTDSP)               += dct
 | 
			
		||||
TESTPROGS-$(CONFIG_IIRFILTER)             += iirfilter
 | 
			
		||||
TESTPROGS-$(HAVE_MMX)                     += motion
 | 
			
		||||
TESTPROGS-$(CONFIG_GOLOMB)                += golomb
 | 
			
		||||
TESTPROGS-$(CONFIG_RANGECODER)            += rangecoder
 | 
			
		||||
TESTPROGS-$(CONFIG_SNOW_ENCODER)          += snowenc
 | 
			
		||||
TESTPROGS = cabac                                                       \
 | 
			
		||||
            fft                                                         \
 | 
			
		||||
            fft-fixed                                                   \
 | 
			
		||||
            fft-fixed32                                                 \
 | 
			
		||||
            golomb                                                      \
 | 
			
		||||
            iirfilter                                                   \
 | 
			
		||||
            imgconvert                                                  \
 | 
			
		||||
            rangecoder                                                  \
 | 
			
		||||
            snowenc                                                     \
 | 
			
		||||
 | 
			
		||||
TESTPROGS-$(CONFIG_DCT) += dct
 | 
			
		||||
TESTPROGS-$(HAVE_MMX) += motion
 | 
			
		||||
TESTOBJS = dctref.o
 | 
			
		||||
 | 
			
		||||
TOOLS = fourcc2pixfmt
 | 
			
		||||
 | 
			
		||||
HOSTPROGS = aac_tablegen                                                \
 | 
			
		||||
            aacps_tablegen                                              \
 | 
			
		||||
            aacsbr_tablegen                                             \
 | 
			
		||||
            cabac_tablegen                                              \
 | 
			
		||||
            cbrt_tablegen                                               \
 | 
			
		||||
            cos_tablegen                                                \
 | 
			
		||||
            dsd_tablegen                                                \
 | 
			
		||||
            dv_tablegen                                                 \
 | 
			
		||||
            motionpixels_tablegen                                       \
 | 
			
		||||
            mpegaudio_tablegen                                          \
 | 
			
		||||
@@ -902,7 +876,7 @@ else
 | 
			
		||||
$(SUBDIR)%_tablegen$(HOSTEXESUF): HOSTCFLAGS += -DCONFIG_SMALL=0
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
GEN_HEADERS = cabac_tables.h cbrt_tables.h aacps_tables.h aacsbr_tables.h aac_tables.h dsd_tables.h dv_tables.h     \
 | 
			
		||||
GEN_HEADERS = cbrt_tables.h aacps_tables.h aac_tables.h dv_tables.h     \
 | 
			
		||||
              sinewin_tables.h mpegaudio_tables.h motionpixels_tables.h \
 | 
			
		||||
              pcm_tables.h qdm2_tables.h
 | 
			
		||||
GEN_HEADERS := $(addprefix $(SUBDIR), $(GEN_HEADERS))
 | 
			
		||||
@@ -913,10 +887,7 @@ $(GEN_HEADERS): $(SUBDIR)%_tables.h: $(SUBDIR)%_tablegen$(HOSTEXESUF)
 | 
			
		||||
ifdef CONFIG_HARDCODED_TABLES
 | 
			
		||||
$(SUBDIR)aacdec.o: $(SUBDIR)cbrt_tables.h
 | 
			
		||||
$(SUBDIR)aacps.o: $(SUBDIR)aacps_tables.h
 | 
			
		||||
$(SUBDIR)aacsbr.o: $(SUBDIR)aacsbr_tables.h
 | 
			
		||||
$(SUBDIR)aactab.o: $(SUBDIR)aac_tables.h
 | 
			
		||||
$(SUBDIR)cabac.o: $(SUBDIR)cabac_tables.h
 | 
			
		||||
$(SUBDIR)dsddec.o: $(SUBDIR)dsd_tables.h
 | 
			
		||||
$(SUBDIR)dvenc.o: $(SUBDIR)dv_tables.h
 | 
			
		||||
$(SUBDIR)sinewin.o: $(SUBDIR)sinewin_tables.h
 | 
			
		||||
$(SUBDIR)mpegaudiodec_fixed.o: $(SUBDIR)mpegaudio_tables.h
 | 
			
		||||
 
 | 
			
		||||
@@ -28,7 +28,6 @@
 | 
			
		||||
#include "a64tables.h"
 | 
			
		||||
#include "elbg.h"
 | 
			
		||||
#include "internal.h"
 | 
			
		||||
#include "libavutil/avassert.h"
 | 
			
		||||
#include "libavutil/common.h"
 | 
			
		||||
#include "libavutil/intreadwrite.h"
 | 
			
		||||
 | 
			
		||||
@@ -66,7 +65,7 @@ static const int mc_colors[5]={0x0,0xb,0xc,0xf,0x1};
 | 
			
		||||
//static const int mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
 | 
			
		||||
//static const int mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
 | 
			
		||||
 | 
			
		||||
static void to_meta_with_crop(AVCodecContext *avctx, const AVFrame *p, int *dest)
 | 
			
		||||
static void to_meta_with_crop(AVCodecContext *avctx, AVFrame *p, int *dest)
 | 
			
		||||
{
 | 
			
		||||
    int blockx, blocky, x, y;
 | 
			
		||||
    int luma = 0;
 | 
			
		||||
@@ -79,13 +78,9 @@ static void to_meta_with_crop(AVCodecContext *avctx, const AVFrame *p, int *dest
 | 
			
		||||
            for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
 | 
			
		||||
                for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
 | 
			
		||||
                    if(x < width && y < height) {
 | 
			
		||||
                        if (x + 1 < width) {
 | 
			
		||||
                            /* build average over 2 pixels */
 | 
			
		||||
                            luma = (src[(x + 0 + y * p->linesize[0])] +
 | 
			
		||||
                                    src[(x + 1 + y * p->linesize[0])]) / 2;
 | 
			
		||||
                        } else {
 | 
			
		||||
                            luma = src[(x + y * p->linesize[0])];
 | 
			
		||||
                        }
 | 
			
		||||
                        /* build average over 2 pixels */
 | 
			
		||||
                        luma = (src[(x + 0 + y * p->linesize[0])] +
 | 
			
		||||
                                src[(x + 1 + y * p->linesize[0])]) / 2;
 | 
			
		||||
                        /* write blocks as linear data now so they are suitable for elbg */
 | 
			
		||||
                        dest[0] = luma;
 | 
			
		||||
                    }
 | 
			
		||||
@@ -191,11 +186,12 @@ static void render_charset(AVCodecContext *avctx, uint8_t *charset,
 | 
			
		||||
static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
 | 
			
		||||
{
 | 
			
		||||
    A64Context *c = avctx->priv_data;
 | 
			
		||||
    av_freep(&c->mc_meta_charset);
 | 
			
		||||
    av_freep(&c->mc_best_cb);
 | 
			
		||||
    av_freep(&c->mc_charset);
 | 
			
		||||
    av_freep(&c->mc_charmap);
 | 
			
		||||
    av_freep(&c->mc_colram);
 | 
			
		||||
    av_frame_free(&avctx->coded_frame);
 | 
			
		||||
    av_free(c->mc_meta_charset);
 | 
			
		||||
    av_free(c->mc_best_cb);
 | 
			
		||||
    av_free(c->mc_charset);
 | 
			
		||||
    av_free(c->mc_charmap);
 | 
			
		||||
    av_free(c->mc_colram);
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -224,9 +220,9 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
 | 
			
		||||
                           a64_palette[mc_colors[a]][2] * 0.11;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (!(c->mc_meta_charset = av_mallocz_array(c->mc_lifetime, 32000 * sizeof(int))) ||
 | 
			
		||||
    if (!(c->mc_meta_charset = av_malloc(32000 * c->mc_lifetime * sizeof(int))) ||
 | 
			
		||||
       !(c->mc_best_cb       = av_malloc(CHARSET_CHARS * 32 * sizeof(int)))     ||
 | 
			
		||||
       !(c->mc_charmap       = av_mallocz_array(c->mc_lifetime, 1000 * sizeof(int))) ||
 | 
			
		||||
       !(c->mc_charmap       = av_mallocz(1000 * c->mc_lifetime * sizeof(int))) ||
 | 
			
		||||
       !(c->mc_colram        = av_mallocz(CHARSET_CHARS * sizeof(uint8_t)))     ||
 | 
			
		||||
       !(c->mc_charset       = av_malloc(0x800 * (INTERLACED+1) * sizeof(uint8_t)))) {
 | 
			
		||||
        av_log(avctx, AV_LOG_ERROR, "Failed to allocate buffer memory.\n");
 | 
			
		||||
@@ -242,6 +238,14 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
 | 
			
		||||
    AV_WB32(avctx->extradata, c->mc_lifetime);
 | 
			
		||||
    AV_WB32(avctx->extradata + 16, INTERLACED);
 | 
			
		||||
 | 
			
		||||
    avctx->coded_frame = av_frame_alloc();
 | 
			
		||||
    if (!avctx->coded_frame) {
 | 
			
		||||
        a64multi_close_encoder(avctx);
 | 
			
		||||
        return AVERROR(ENOMEM);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
 | 
			
		||||
    avctx->coded_frame->key_frame = 1;
 | 
			
		||||
    if (!avctx->codec_tag)
 | 
			
		||||
         avctx->codec_tag = AV_RL32("a64m");
 | 
			
		||||
 | 
			
		||||
@@ -266,9 +270,10 @@ static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colra
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
 | 
			
		||||
                                 const AVFrame *p, int *got_packet)
 | 
			
		||||
                                 const AVFrame *pict, int *got_packet)
 | 
			
		||||
{
 | 
			
		||||
    A64Context *c = avctx->priv_data;
 | 
			
		||||
    AVFrame *const p = avctx->coded_frame;
 | 
			
		||||
 | 
			
		||||
    int frame;
 | 
			
		||||
    int x, y;
 | 
			
		||||
@@ -299,7 +304,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* no data, means end encoding asap */
 | 
			
		||||
    if (!p) {
 | 
			
		||||
    if (!pict) {
 | 
			
		||||
        /* all done, end encoding */
 | 
			
		||||
        if (!c->mc_lifetime) return 0;
 | 
			
		||||
        /* no more frames in queue, prepare to flush remaining frames */
 | 
			
		||||
@@ -312,10 +317,13 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
 | 
			
		||||
    } else {
 | 
			
		||||
        /* fill up mc_meta_charset with data until lifetime exceeds */
 | 
			
		||||
        if (c->mc_frame_counter < c->mc_lifetime) {
 | 
			
		||||
            *p = *pict;
 | 
			
		||||
            p->pict_type = AV_PICTURE_TYPE_I;
 | 
			
		||||
            p->key_frame = 1;
 | 
			
		||||
            to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
 | 
			
		||||
            c->mc_frame_counter++;
 | 
			
		||||
            if (c->next_pts == AV_NOPTS_VALUE)
 | 
			
		||||
                c->next_pts = p->pts;
 | 
			
		||||
                c->next_pts = pict->pts;
 | 
			
		||||
            /* lifetime is not reached so wait for next frame first */
 | 
			
		||||
            return 0;
 | 
			
		||||
        }
 | 
			
		||||
@@ -326,8 +334,8 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
 | 
			
		||||
        req_size = 0;
 | 
			
		||||
        /* any frames to encode? */
 | 
			
		||||
        if (c->mc_lifetime) {
 | 
			
		||||
            int alloc_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
 | 
			
		||||
            if ((ret = ff_alloc_packet2(avctx, pkt, alloc_size)) < 0)
 | 
			
		||||
            req_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
 | 
			
		||||
            if ((ret = ff_alloc_packet2(avctx, pkt, req_size)) < 0)
 | 
			
		||||
                return ret;
 | 
			
		||||
            buf = pkt->data;
 | 
			
		||||
 | 
			
		||||
@@ -343,7 +351,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
 | 
			
		||||
 | 
			
		||||
            /* advance pointers */
 | 
			
		||||
            buf      += charset_size;
 | 
			
		||||
            req_size += charset_size;
 | 
			
		||||
            charset  += charset_size;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        /* write x frames to buf */
 | 
			
		||||
@@ -380,7 +388,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
 | 
			
		||||
        pkt->pts = pkt->dts = c->next_pts;
 | 
			
		||||
        c->next_pts         = AV_NOPTS_VALUE;
 | 
			
		||||
 | 
			
		||||
        av_assert0(pkt->size >= req_size);
 | 
			
		||||
        pkt->size   = req_size;
 | 
			
		||||
        pkt->flags |= AV_PKT_FLAG_KEY;
 | 
			
		||||
        *got_packet = !!req_size;
 | 
			
		||||
 
 | 
			
		||||
@@ -57,7 +57,7 @@ static int aac_adtstoasc_filter(AVBitStreamFilterContext *bsfc,
 | 
			
		||||
 | 
			
		||||
    if (avpriv_aac_parse_header(&gb, &hdr) < 0) {
 | 
			
		||||
        av_log(avctx, AV_LOG_ERROR, "Error parsing ADTS frame header!\n");
 | 
			
		||||
        return AVERROR_INVALIDDATA;
 | 
			
		||||
        return -1;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (!hdr.crc_absent && hdr.num_aac_frames > 1) {
 | 
			
		||||
 
 | 
			
		||||
@@ -34,7 +34,7 @@ static int aac_sync(uint64_t state, AACAC3ParseContext *hdr_info,
 | 
			
		||||
    int size;
 | 
			
		||||
    union {
 | 
			
		||||
        uint64_t u64;
 | 
			
		||||
        uint8_t  u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
 | 
			
		||||
        uint8_t  u8[8];
 | 
			
		||||
    } tmp;
 | 
			
		||||
 | 
			
		||||
    tmp.u64 = av_be2ne64(state);
 | 
			
		||||
 
 | 
			
		||||
@@ -31,7 +31,7 @@
 | 
			
		||||
#include "libavutil/mathematics.h"
 | 
			
		||||
float ff_aac_pow2sf_tab[428];
 | 
			
		||||
 | 
			
		||||
av_cold void ff_aac_tableinit(void)
 | 
			
		||||
void ff_aac_tableinit(void)
 | 
			
		||||
{
 | 
			
		||||
    int i;
 | 
			
		||||
    for (i = 0; i < 428; i++)
 | 
			
		||||
 
 | 
			
		||||
@@ -31,7 +31,7 @@ int avpriv_aac_parse_header(GetBitContext *gbc, AACADTSHeaderInfo *hdr)
 | 
			
		||||
    int size, rdb, ch, sr;
 | 
			
		||||
    int aot, crc_abs;
 | 
			
		||||
 | 
			
		||||
    if (get_bits(gbc, 12) != 0xfff)
 | 
			
		||||
    if(get_bits(gbc, 12) != 0xfff)
 | 
			
		||||
        return AAC_AC3_PARSE_ERROR_SYNC;
 | 
			
		||||
 | 
			
		||||
    skip_bits1(gbc);             /* id */
 | 
			
		||||
@@ -39,10 +39,10 @@ int avpriv_aac_parse_header(GetBitContext *gbc, AACADTSHeaderInfo *hdr)
 | 
			
		||||
    crc_abs = get_bits1(gbc);    /* protection_absent */
 | 
			
		||||
    aot     = get_bits(gbc, 2);  /* profile_objecttype */
 | 
			
		||||
    sr      = get_bits(gbc, 4);  /* sample_frequency_index */
 | 
			
		||||
    if (!avpriv_mpeg4audio_sample_rates[sr])
 | 
			
		||||
    if(!avpriv_mpeg4audio_sample_rates[sr])
 | 
			
		||||
        return AAC_AC3_PARSE_ERROR_SAMPLE_RATE;
 | 
			
		||||
    skip_bits1(gbc);             /* private_bit */
 | 
			
		||||
    ch = get_bits(gbc, 3);       /* channel_configuration */
 | 
			
		||||
    ch      = get_bits(gbc, 3);  /* channel_configuration */
 | 
			
		||||
 | 
			
		||||
    skip_bits1(gbc);             /* original/copy */
 | 
			
		||||
    skip_bits1(gbc);             /* home */
 | 
			
		||||
@@ -50,8 +50,8 @@ int avpriv_aac_parse_header(GetBitContext *gbc, AACADTSHeaderInfo *hdr)
 | 
			
		||||
    /* adts_variable_header */
 | 
			
		||||
    skip_bits1(gbc);             /* copyright_identification_bit */
 | 
			
		||||
    skip_bits1(gbc);             /* copyright_identification_start */
 | 
			
		||||
    size = get_bits(gbc, 13);    /* aac_frame_length */
 | 
			
		||||
    if (size < AAC_ADTS_HEADER_SIZE)
 | 
			
		||||
    size    = get_bits(gbc, 13); /* aac_frame_length */
 | 
			
		||||
    if(size < AAC_ADTS_HEADER_SIZE)
 | 
			
		||||
        return AAC_AC3_PARSE_ERROR_FRAME_SIZE;
 | 
			
		||||
 | 
			
		||||
    skip_bits(gbc, 11);          /* adts_buffer_fullness */
 | 
			
		||||
 
 | 
			
		||||
@@ -53,7 +53,7 @@ static const uint8_t run_value_bits_short[16] = {
 | 
			
		||||
    3, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 9
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static const uint8_t * const run_value_bits[2] = {
 | 
			
		||||
static const uint8_t *run_value_bits[2] = {
 | 
			
		||||
    run_value_bits_long, run_value_bits_short
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
@@ -776,6 +776,7 @@ static void search_for_quantizers_twoloop(AVCodecContext *avctx,
 | 
			
		||||
        do {
 | 
			
		||||
            int prev = -1;
 | 
			
		||||
            tbits = 0;
 | 
			
		||||
            fflag = 0;
 | 
			
		||||
            for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
 | 
			
		||||
                start = w*128;
 | 
			
		||||
                for (g = 0;  g < sce->ics.num_swb; g++) {
 | 
			
		||||
@@ -952,6 +953,7 @@ static void search_for_quantizers_faac(AVCodecContext *avctx, AACEncContext *s,
 | 
			
		||||
            }
 | 
			
		||||
            sce->zeroes[w*16+g] = 0;
 | 
			
		||||
            scf  = prev_scf = av_clip(SCALE_ONE_POS - SCALE_DIV_512 - log2f(1/maxq[w*16+g])*16/3, 60, 218);
 | 
			
		||||
            step = 16;
 | 
			
		||||
            for (;;) {
 | 
			
		||||
                float dist = 0.0f;
 | 
			
		||||
                int quant_max;
 | 
			
		||||
 
 | 
			
		||||
@@ -425,7 +425,7 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
 | 
			
		||||
 * Save current output configuration if and only if it has been locked.
 | 
			
		||||
 */
 | 
			
		||||
static void push_output_configuration(AACContext *ac) {
 | 
			
		||||
    if (ac->oc[1].status == OC_LOCKED || ac->oc[0].status == OC_NONE) {
 | 
			
		||||
    if (ac->oc[1].status == OC_LOCKED) {
 | 
			
		||||
        ac->oc[0] = ac->oc[1];
 | 
			
		||||
    }
 | 
			
		||||
    ac->oc[1].status = OC_NONE;
 | 
			
		||||
@@ -881,7 +881,7 @@ static int decode_eld_specific_config(AACContext *ac, AVCodecContext *avctx,
 | 
			
		||||
        if (len == 15 + 255)
 | 
			
		||||
            len += get_bits(gb, 16);
 | 
			
		||||
        if (get_bits_left(gb) < len * 8 + 4) {
 | 
			
		||||
            av_log(avctx, AV_LOG_ERROR, overread_err);
 | 
			
		||||
            av_log(ac->avctx, AV_LOG_ERROR, overread_err);
 | 
			
		||||
            return AVERROR_INVALIDDATA;
 | 
			
		||||
        }
 | 
			
		||||
        skip_bits_long(gb, 8 * len);
 | 
			
		||||
@@ -1946,7 +1946,7 @@ static int decode_ics(AACContext *ac, SingleChannelElement *sce,
 | 
			
		||||
            avpriv_request_sample(ac->avctx, "SSR");
 | 
			
		||||
            return AVERROR_PATCHWELCOME;
 | 
			
		||||
        }
 | 
			
		||||
        // I see no textual basis in the spec for this occurring after SSR gain
 | 
			
		||||
        // I see no textual basis in the spec for this occuring after SSR gain
 | 
			
		||||
        // control, but this is what both reference and real implmentations do
 | 
			
		||||
        if (tns->present && er_syntax)
 | 
			
		||||
            if (decode_tns(ac, tns, gb, ics) < 0)
 | 
			
		||||
@@ -2631,7 +2631,7 @@ static void apply_dependent_coupling(AACContext *ac,
 | 
			
		||||
                const float gain = cce->coup.gain[index][idx];
 | 
			
		||||
                for (group = 0; group < ics->group_len[g]; group++) {
 | 
			
		||||
                    for (k = offsets[i]; k < offsets[i + 1]; k++) {
 | 
			
		||||
                        // FIXME: SIMDify
 | 
			
		||||
                        // XXX dsputil-ize
 | 
			
		||||
                        dest[group * 128 + k] += gain * src[group * 128 + k];
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
 
 | 
			
		||||
@@ -165,7 +165,7 @@ static void put_audio_specific_config(AVCodecContext *avctx)
 | 
			
		||||
    PutBitContext pb;
 | 
			
		||||
    AACEncContext *s = avctx->priv_data;
 | 
			
		||||
 | 
			
		||||
    init_put_bits(&pb, avctx->extradata, avctx->extradata_size);
 | 
			
		||||
    init_put_bits(&pb, avctx->extradata, avctx->extradata_size*8);
 | 
			
		||||
    put_bits(&pb, 5, 2); //object type - AAC-LC
 | 
			
		||||
    put_bits(&pb, 4, s->samplerate_index); //sample rate index
 | 
			
		||||
    put_bits(&pb, 4, s->channels);
 | 
			
		||||
@@ -705,8 +705,8 @@ static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
 | 
			
		||||
static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s)
 | 
			
		||||
{
 | 
			
		||||
    int ch;
 | 
			
		||||
    FF_ALLOCZ_ARRAY_OR_GOTO(avctx, s->buffer.samples, s->channels, 3 * 1024 * sizeof(s->buffer.samples[0]), alloc_fail);
 | 
			
		||||
    FF_ALLOCZ_ARRAY_OR_GOTO(avctx, s->cpe, s->chan_map[0], sizeof(ChannelElement), alloc_fail);
 | 
			
		||||
    FF_ALLOCZ_OR_GOTO(avctx, s->buffer.samples, 3 * 1024 * s->channels * sizeof(s->buffer.samples[0]), alloc_fail);
 | 
			
		||||
    FF_ALLOCZ_OR_GOTO(avctx, s->cpe, sizeof(ChannelElement) * s->chan_map[0], alloc_fail);
 | 
			
		||||
    FF_ALLOCZ_OR_GOTO(avctx, avctx->extradata, 5 + FF_INPUT_BUFFER_PADDING_SIZE, alloc_fail);
 | 
			
		||||
 | 
			
		||||
    for(ch = 0; ch < s->channels; ch++)
 | 
			
		||||
@@ -746,10 +746,10 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
 | 
			
		||||
 | 
			
		||||
    s->chan_map = aac_chan_configs[s->channels-1];
 | 
			
		||||
 | 
			
		||||
    if ((ret = dsp_init(avctx, s)) < 0)
 | 
			
		||||
    if (ret = dsp_init(avctx, s))
 | 
			
		||||
        goto fail;
 | 
			
		||||
 | 
			
		||||
    if ((ret = alloc_buffers(avctx, s)) < 0)
 | 
			
		||||
    if (ret = alloc_buffers(avctx, s))
 | 
			
		||||
        goto fail;
 | 
			
		||||
 | 
			
		||||
    avctx->extradata_size = 5;
 | 
			
		||||
@@ -761,8 +761,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
 | 
			
		||||
    lengths[1] = ff_aac_num_swb_128[i];
 | 
			
		||||
    for (i = 0; i < s->chan_map[0]; i++)
 | 
			
		||||
        grouping[i] = s->chan_map[i + 1] == TYPE_CPE;
 | 
			
		||||
    if ((ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths,
 | 
			
		||||
                           s->chan_map[0], grouping)) < 0)
 | 
			
		||||
    if (ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping))
 | 
			
		||||
        goto fail;
 | 
			
		||||
    s->psypp = ff_psy_preprocess_init(avctx);
 | 
			
		||||
    s->coder = &ff_aac_coders[s->options.aac_coder];
 | 
			
		||||
@@ -770,7 +769,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
 | 
			
		||||
    if (HAVE_MIPSDSPR1)
 | 
			
		||||
        ff_aac_coder_init_mips(s);
 | 
			
		||||
 | 
			
		||||
    s->lambda = avctx->global_quality > 0 ? avctx->global_quality : 120;
 | 
			
		||||
    s->lambda = avctx->global_quality ? avctx->global_quality : 120;
 | 
			
		||||
 | 
			
		||||
    ff_aac_tableinit();
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -82,7 +82,7 @@ int main(void)
 | 
			
		||||
    write_float_3d_array(f34_2_4, 4, 8, 2);
 | 
			
		||||
    printf("};\n");
 | 
			
		||||
 | 
			
		||||
    printf("static const DECLARE_ALIGNED(16, float, Q_fract_allpass)[2][50][3][2] = {\n");
 | 
			
		||||
    printf("static TABLE_CONST DECLARE_ALIGNED(16, float, Q_fract_allpass)[2][50][3][2] = {\n");
 | 
			
		||||
    write_float_4d_array(Q_fract_allpass, 2, 50, 3, 2);
 | 
			
		||||
    printf("};\n");
 | 
			
		||||
    printf("static const DECLARE_ALIGNED(16, float, phi_fract)[2][50][2] = {\n");
 | 
			
		||||
 
 | 
			
		||||
@@ -70,7 +70,7 @@ static const float g2_Q4[] = {
 | 
			
		||||
     0.16486303567403f,  0.23279856662996f, 0.25f
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static av_cold void make_filters_from_proto(float (*filter)[8][2], const float *proto, int bands)
 | 
			
		||||
static void make_filters_from_proto(float (*filter)[8][2], const float *proto, int bands)
 | 
			
		||||
{
 | 
			
		||||
    int q, n;
 | 
			
		||||
    for (q = 0; q < bands; q++) {
 | 
			
		||||
@@ -82,7 +82,7 @@ static av_cold void make_filters_from_proto(float (*filter)[8][2], const float *
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static av_cold void ps_tableinit(void)
 | 
			
		||||
static void ps_tableinit(void)
 | 
			
		||||
{
 | 
			
		||||
    static const float ipdopd_sin[] = { 0, M_SQRT1_2, 1,  M_SQRT1_2,  0, -M_SQRT1_2, -1, -M_SQRT1_2 };
 | 
			
		||||
    static const float ipdopd_cos[] = { 1, M_SQRT1_2, 0, -M_SQRT1_2, -1, -M_SQRT1_2,  0,  M_SQRT1_2 };
 | 
			
		||||
 
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user