整合音视频编解码的功能

master
a7458969 2020-05-11 23:35:53 +08:00
parent f8ceeeca45
commit 16c2747b11
536 changed files with 209464 additions and 2 deletions

View File

@ -0,0 +1,25 @@
const BYTE Base64IdxTab[128] =
{
255,255,255,255, 255,255,255,255, 255,255,255,255, 255,255,255,255,
255,255,255,255, 255,255,255,255, 255,255,255,255, 255,255,255,255,
255,255,255,255, 255,255,255,255, 255,255,255,62, 255,255,255,63,
52,53,54,55, 56,57,58,59, 60,61,255,255, 255,255,255,255,
255,0,1,2, 3,4,5,6, 7,8,9,10, 11,12,13,14,
15,16,17,18, 19,20,21,22, 23,24,25,255, 255,255,255,255,
255,26,27,28, 29,30,31,32, 33,34,35,36, 37,38,39,40,
41,42,43,44, 45,46,47,48, 49,50,51,255, 255,255,255,255
};
#define BVal(x) Base64IdxTab[x]
int DecodeBase64(char * pInput, char * pOutput);
const BYTE Base64ValTab[65] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
#define AVal(x) Base64ValTab[x]
int EncodeBase64(BYTE * pInput, int iInputLen, BYTE * pOutput);
#define DCD_ONCE_LEN 400*1024
#define CDC_ONCE_LEN 300*1024

View File

@ -0,0 +1,181 @@
// BitmapEx.h: interface for the CBitmapEx class.
//
//////////////////////////////////////////////////////////////////////
#if !defined(AFX_BITMAPEX_H__80F20A52_B43F_42C5_B182_AC8D27BF5C0E__INCLUDED_)
#define AFX_BITMAPEX_H__80F20A52_B43F_42C5_B182_AC8D27BF5C0E__INCLUDED_
#if _MSC_VER > 1000
#pragma once
#endif // _MSC_VER > 1000
#define _PI 3.1415926f // Value of PI
#define _BITS_PER_PIXEL_32 32 // 32-bit color depth
#define _BITS_PER_PIXEL_24 24 // 24-bit color depth
#define _PIXEL DWORD // Pixel
#define _RGB(r,g,b) (((r) << 16) | ((g) << 8) | (b)) // Convert to RGB
#define _GetRValue(c) ((BYTE)(((c) & 0x00FF0000) >> 16)) // Red color component
#define _GetGValue(c) ((BYTE)(((c) & 0x0000FF00) >> 8)) // Green color component
#define _GetBValue(c) ((BYTE)((c) & 0x000000FF)) // Blue color component
typedef long fixed; // Our new fixed point type
#define itofx(x) ((x) << 8) // Integer to fixed point
#define ftofx(x) (long)((x) * 256) // Float to fixed point
#define dtofx(x) (long)((x) * 256) // Double to fixed point
#define fxtoi(x) ((x) >> 8) // Fixed point to integer
#define fxtof(x) ((float) (x) / 256) // Fixed point to float
#define fxtod(x) ((double)(x) / 256) // Fixed point to double
#define Mulfx(x,y) (((x) * (y)) >> 8) // Multiply a fixed by a fixed
#define Divfx(x,y) (((x) << 8) / (y)) // Divide a fixed by a fixed
typedef struct __POINT
{
long x;
long y;
} _POINT, *_LPPOINT;
typedef struct __QUAD
{
_POINT p1;
_POINT p2;
_POINT p3;
_POINT p4;
} _QUAD, *_LPQUAD;
typedef enum __RESAMPLE_MODE
{
RM_NEARESTNEIGHBOUR = 0,
RM_BILINEAR,
RM_BICUBIC,
} _RESAMPLE_MODE;
typedef enum __GRADIENT_MODE
{
GM_NONE = 0x00,
GM_HORIZONTAL = 0x01,
GM_VERTICAL = 0x02,
GM_RADIAL = 0x04
} _GRADIENT_MODE;
class CBitmapEx
{
public:
// Public methods
CBitmapEx();
virtual ~CBitmapEx();
void Create(long width, long height);
void Create(CBitmapEx& bitmapEx);
void Load(LPTSTR lpszBitmapFile);
void Load(LPBYTE lpBitmapData);
void Save(LPTSTR lpszBitmapFile);
void Save(LPBYTE lpBitmapData);
void Scale(long horizontalPercent=100, long verticalPercent=100);
void Rotate(long degrees=0, _PIXEL bgColor=_RGB(0,0,0));
void FlipHorizontal();
void FlipVertical();
void MirrorLeft();
void MirrorRight();
void MirrorTop();
void MirrorBottom();
void Clear(_PIXEL clearColor=_RGB(0,0,0));
void Negative();
void Grayscale();
void Sepia(long depth=34);
void Emboss();
void Engrave();
void Pixelize(long size=4);
void Draw(HDC hDC);
void Draw(HDC hDC, long dstX, long dstY);
void Draw(long dstX, long dstY, long width, long height, CBitmapEx& bitmapEx, long srcX, long srcY);
void Draw(long dstX, long dstY, long width, long height, CBitmapEx& bitmapEx, long srcX, long srcY, long alpha);
void Draw(_QUAD dstQuad, CBitmapEx& bitmapEx);
void Draw(_QUAD dstQuad, CBitmapEx& bitmapEx, long alpha);
void Draw(_QUAD dstQuad, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight);
void Draw(_QUAD dstQuad, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, long alpha);
void Draw(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight);
void Draw(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, long alpha);
void DrawTransparent(long dstX, long dstY, long width, long height, CBitmapEx& bitmapEx, long srcX, long srcY, _PIXEL transparentColor=_RGB(0,0,0));
void DrawTransparent(long dstX, long dstY, long width, long height, CBitmapEx& bitmapEx, long srcX, long srcY, long alpha, _PIXEL transparentColor=_RGB(0,0,0));
void DrawTransparent(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, _PIXEL transparentColor=_RGB(0,0,0));
void DrawTransparent(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, long alpha, _PIXEL transparentColor=_RGB(0,0,0));
void DrawTransparent(_QUAD dstQuad, CBitmapEx& bitmapEx, _PIXEL transparentColor=_RGB(0,0,0));
void DrawTransparent(_QUAD dstQuad, CBitmapEx& bitmapEx, long alpha, _PIXEL transparentColor=_RGB(0,0,0));
void DrawTransparent(_QUAD dstQuad, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, _PIXEL transparentColor=_RGB(0,0,0));
void DrawTransparent(_QUAD dstQuad, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, long alpha, _PIXEL transparentColor=_RGB(0,0,0));
void DrawBlended(long dstX, long dstY, long width, long height, CBitmapEx& bitmapEx, long srcX, long srcY, long startAlpha, long endAlpha, DWORD mode=GM_NONE);
void DrawBlended(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, long startAlpha, long endAlpha, DWORD mode=GM_NONE);
LPBITMAPFILEHEADER GetFileInfo() {return &m_bfh;}
LPBITMAPINFOHEADER GetInfo() {return &m_bih;}
long GetWidth() {return m_bih.biWidth;}
long GetHeight() {return m_bih.biHeight;}
long GetPitch() {return m_iPitch;}
long GetBpp() {return m_iBpp;}
long GetPaletteEntries() {return m_iPaletteEntries;}
LPRGBQUAD GetPalette() {return m_lpPalette;}
DWORD GetSize() {return m_dwSize;}
LPBYTE GetData() {return m_lpData;}
void SetResampleMode(_RESAMPLE_MODE mode=RM_NEARESTNEIGHBOUR) {m_ResampleMode = mode;}
_RESAMPLE_MODE GetResampleMode() {return m_ResampleMode;}
BOOL IsValid() {return (m_dwSize > 0);}
_PIXEL GetPixel(long x, long y);
void SetPixel(long x, long y, _PIXEL pixel);
//wangjun
void LoadImageFile(LPTSTR lpszImageFile);
void SaveJPGFile(LPTSTR lpszImageFile);
private:
// Private methods
void _ConvertTo32Bpp();
void _ConvertTo24Bpp();
void _ScaleNearestNeighbour(long horizontalPercent, long verticalPercent);
void _ScaleBilinear(long horizontalPercent, long verticalPercent);
void _ScaleBicubic(long horizontalPercent, long verticalPercent);
void _RotateNearestNeighbour(long degrees, _PIXEL bgColor);
void _RotateBilinear(long degrees, _PIXEL bgColor);
void _RotateBicubic(long degrees, _PIXEL bgColor);
void _DrawNearestNeighbour(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight);
void _DrawBilinear(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight);
void _DrawBicubic(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight);
void _DrawNearestNeighbour(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, long alpha);
void _DrawBilinear(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, long alpha);
void _DrawBicubic(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, long alpha);
void _DrawTransparentNearestNeighbour(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, _PIXEL transparentColor=_RGB(0,0,0));
void _DrawTransparentBilinear(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, _PIXEL transparentColor=_RGB(0,0,0));
void _DrawTransparentBicubic(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, _PIXEL transparentColor=_RGB(0,0,0));
void _DrawTransparentNearestNeighbour(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, long alpha, _PIXEL transparentColor=_RGB(0,0,0));
void _DrawTransparentBilinear(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, long alpha, _PIXEL transparentColor=_RGB(0,0,0));
void _DrawTransparentBicubic(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, long alpha, _PIXEL transparentColor=_RGB(0,0,0));
void _DrawBlendedNearestNeighbour(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, long startAlpha, long endAlpha, DWORD mode=GM_NONE);
void _DrawBlendedBilinear(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, long startAlpha, long endAlpha, DWORD mode=GM_NONE);
void _DrawBlendedBicubic(long dstX, long dstY, long dstWidth, long dstHeight, CBitmapEx& bitmapEx, long srcX, long srcY, long srcWidth, long srcHeight, long startAlpha, long endAlpha, DWORD mode=GM_NONE);
// wanhjun
HANDLE _dibFromBitmap(HBITMAP hBitmap); //DDB->DIB
int _DIBNumColors (LPBITMAPINFOHEADER lpbi);
HBITMAP _extractBitmap(IPicture* pPicture);
int _GetCodecClsid(const WCHAR* format, CLSID* pClsid);
private:
// Private members
BITMAPFILEHEADER m_bfh;
BITMAPINFOHEADER m_bih;
long m_iPaletteEntries;
RGBQUAD m_lpPalette[256];
long m_iPitch;
long m_iBpp;
DWORD m_dwSize;
LPBYTE m_lpData;
_RESAMPLE_MODE m_ResampleMode;
};
#endif // !defined(AFX_BITMAPEX_H__80F20A52_B43F_42C5_B182_AC8D27BF5C0E__INCLUDED_)

View File

@ -0,0 +1,11 @@
#pragma once
#include <string>
#include <memory>
using namespace std;
string GetGUID();
std::wstring s2ws(const std::string& s);
string WideCharToMultiChar(wstring str);
string Utf8ToAnsi(const char *UTF8);
wstring char2wchar(const char* cchar);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,145 @@
#include "AACAudioCoder.h"
//#include "Debuger.h"
using namespace AAC_CODER;
AACAudioCoder::~AACAudioCoder() {
}
void AACAudioCoder::OnAudioData(const void *frameaddress, uint32_t framelen)
{
this->Encode((unsigned char *)frameaddress, framelen * 4);
}
AACAudioCoder::AACAudioCoder(unsigned int smprate, unsigned int channel) {
AVCodecID codec_id = AV_CODEC_ID_AAC;
pCodec = avcodec_find_encoder_by_name("libfdk_aac");
if (!pCodec) {
printf("Codec not found\n");
this->mStatus = FAIL;
}
mCodecCtx = avcodec_alloc_context3(pCodec);
if (!mCodecCtx) {
printf("Could not allocate video codec context\n");
this->mStatus = FAIL;
}
mCodecCtx->codec_id = pCodec->id;
mCodecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
mCodecCtx->sample_fmt = AV_SAMPLE_FMT_S16; ///< float
mCodecCtx->sample_rate = 44100;
mCodecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
mCodecCtx->channels = 2;
mCodecCtx->bit_rate = 640000;
mCodecCtx->time_base.den = 1;
mCodecCtx->time_base.num = 23;
mCodecCtx->frame_size = 1024;
this->mObserver = nullptr;
if (avcodec_open2(mCodecCtx, pCodec, NULL) < 0) {
this->mStatus = FAIL;
}
mFrame = av_frame_alloc();
mFrame->nb_samples = mCodecCtx->frame_size;
mFrame->format = mCodecCtx->sample_fmt;
int size = av_samples_get_buffer_size(NULL, mCodecCtx->channels, mCodecCtx->frame_size, mCodecCtx->sample_fmt, 1);
mFrameBuf = (uint8_t *)av_malloc(size);
avcodec_fill_audio_frame(mFrame, mCodecCtx->channels, mCodecCtx->sample_fmt, (const uint8_t*)mFrameBuf, size, 1);
mPts = 0;
}
int adts_sample_rates[]={96000,882000,64000,48000,441000,32000,24000,22050,16000,12000,11025,8000,7350,0,0,0};
int FindAdstSRIndex(int samplerate)
{
int i;
for (i = 0; i < 16; i++)
{
if (samplerate == adts_sample_rates[i])
return i;
}
return 16 - 1;
}
#define ADTS_HEAD_LEN 7
void MakeAdtsHeader(unsigned char *data, int samplerate, int channels, int iFrameLen)
{
int profile = 2; //AAC LCMediaCodecInfo.CodecProfileLevel.AACObjectLC;
int freqIdx = 4; //32K, 见后面注释avpriv_mpeg4audio_sample_rates中32000对应的数组下标来自ffmpeg源码
int chanCfg = channels; //见后面注释channel_configurationStero双声道立体声
/*int avpriv_mpeg4audio_sample_rates[] = {
96000, 88200, 64000, 48000, 44100, 32000,
24000, 22050, 16000, 12000, 11025, 8000, 7350
};
channel_configuration: chanCfg
0: Defined in AOT Specifc Config
1: 1 channel: front-center
2: 2 channels: front-left, front-right
3: 3 channels: front-center, front-left, front-right
4: 4 channels: front-center, front-left, front-right, back-center
5: 5 channels: front-center, front-left, front-right, back-left, back-right
6: 6 channels: front-center, front-left, front-right, back-left, back-right, LFE-channel
7: 8 channels: front-center, front-left, front-right, side-left, side-right, back-left, back-right, LFE-channel
8-15: Reserved
*/
// fill in ADTS data
data[0] = (uint8_t)0xFF;
data[1] = (uint8_t)0xF9;
data[2] = (uint8_t)(((profile - 1) << 6) + (freqIdx << 2) + (chanCfg >> 2));
data[3] = (uint8_t)(((chanCfg & 3) << 6) + (iFrameLen >> 11));
data[4] = (uint8_t)((iFrameLen & 0x7FF) >> 3);
data[5] = (uint8_t)(((iFrameLen & 7) << 5) + 0x1F);
data[6] = (uint8_t)0xFC;
}
FILE *ptest = nullptr;
int once = 1;
int AACAudioCoder::Encode( unsigned char *input, unsigned int num) {
mFrame->nb_samples = mCodecCtx->frame_size;
mFrame->format = mCodecCtx->sample_fmt;
avcodec_fill_audio_frame(mFrame, mCodecCtx->channels,
mCodecCtx->sample_fmt, input,
num, 1);
int aac_out_len = 0;
unsigned char*aac_buf;
if (nullptr == input) {
return -1;
}
if (nullptr == ptest) {
ptest = fopen("dst.aac", "wb");
}
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
int got_output = 0;
mFrame->pts = mPts += 23;
int ret = avcodec_encode_audio2(mCodecCtx, &pkt, mFrame, &got_output);
if (ret < 0) {
printf("Error encoding frame\n");
return -1;
}
if (got_output) {
if (nullptr != mObserver) {
mObserver->OnAudioEncode(pkt.data, pkt.size, mFrame->pts);
}
fwrite(pkt.data, 1, pkt.size, ptest);
av_free_packet(&pkt);
}
return 0;
}
int AAC_CODER::AACAudioCoder::SetObserver(EncodeAudioObserver *p)
{
if (nullptr == this->mObserver) {
this->mObserver = p;
}
return 0;
}

View File

@ -0,0 +1,53 @@
#pragma once
#include "AudioCapture.h"
#ifdef __cplusplus
extern "C"
{
#include "falabaac/fa_aacapi.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/avutil.h"
#include "libswscale/swscale.h"
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
};
#endif
namespace AAC_CODER {
class AACAudioCoder :public CaptureAudio::CaptureAudioObserver {
public:
class EncodeAudioObserver {
public:
virtual void OnAudioEncode(const void *frameaddress, uint32_t framelen,uint16_t pts) {};
};
enum CAP_STATUS {
RUNNING = 1,
STOP = 2,
PAUSE = 3,
READY = 4,
UNREADY = 5,
FAIL = 6,
};
void OnAudioData(const void *frameaddress, uint32_t framelen);
AACAudioCoder(unsigned int smprate, unsigned int channel);
~AACAudioCoder();
int Encode(unsigned char *input, unsigned int num);
int SetObserver(EncodeAudioObserver *);
private:
unsigned int mpts;
CAP_STATUS mStatus;
unsigned long mSampleRate = 44100;
unsigned int mChannels = 2;
unsigned int mPCMBitSize = 16;
uint8_t* mAACBuffer;
unsigned long nMaxOutputBytes;
uintptr_t mFablaAacenc;
AVCodec *mCodec;
AVCodec *pCodec;
AVCodecContext *mCodecCtx = NULL;
AVFrame *mFrame;
AVPacket pkt;
uint8_t* mFrameBuf;
uint16_t mPts;
EncodeAudioObserver *mObserver;
};
}

View File

@ -0,0 +1,138 @@
#include "AACDecoder.h"
#include "Debuger.h"
void AACDecoder::OnRtmpFrame(void * dat, uint32_t size)
{
this->Decode((uint8_t *)dat, size);
}
AACDecoder::AACDecoder() :mObserver(nullptr)
{
mStatus = RUNNING;
this->mObserverType = Observer_Audio;
mCodec = avcodec_find_decoder(AV_CODEC_ID_AAC);
if (mCodec == NULL) {
Debuger::Debug(L"find codec fail\r\n");
mStatus = FAIL;
}
mCodecCtx = avcodec_alloc_context3(mCodec);
if (nullptr == mCodecCtx) {
Debuger::Debug(L"find codec ctx fail\r\n");
mStatus = FAIL;
}
mCodecCtx->codec = mCodec;
mCodecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
mCodecCtx->sample_rate = 44100;
mCodecCtx->channels = 2;
mCodecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
mCodecCtx->sample_fmt = AV_SAMPLE_FMT_FLTP;
mCodecCtx->frame_size = 2048;
#if LIBSWRESAMPLE_VERSION_MINOR >= 17 // 根据版本不同,选用适当函数
mSwrCtx = swr_alloc();
av_opt_set_int(mSwrCtx, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
av_opt_set_int(mSwrCtx, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
av_opt_set_int(mSwrCtx, "in_sample_rate", 44100, 0);
av_opt_set_int(mSwrCtx, "out_sample_rate", 44100, 0);
av_opt_set_sample_fmt(mSwrCtx, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);
av_opt_set_sample_fmt(mSwrCtx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
swr_init(mSwrCtx);
#else
mSwrCtx = swr_alloc();
mSwrCtx = swr_alloc_set_opts(mSwrCtx,
AV_CH_LAYOUT_STEREO, //output
AV_SAMPLE_FMT_S16,
44100,
AV_CH_LAYOUT_STEREO, // input
AV_SAMPLE_FMT_FLTP,
44100,
0, NULL);
swr_init(mSwrCtx);
#endif
if (avcodec_open2(mCodecCtx, mCodec, NULL) < 0) {
Debuger::Debug(L"can't open codec\r\n");
mStatus = FAIL;
}
mSampleRate = 44100;
mChannel = 2;
mChannelLayout = AV_CH_LAYOUT_STEREO;
mSampleFmt = AV_SAMPLE_FMT_FLTP;
mStatus = RUNNING;
mU16Data = (uint8_t *)av_malloc(192000);
}
#define MAX_AUDIO_FRAME_SIZE 192000
AACDecoder::AACDecoder(AVStream * p):mObserver(nullptr)
{
mStatus = RUNNING;
this->mObserverType = Observer_Audio;
if (nullptr == p) {
Debuger::Debug(L"find codec fail\r\n");
mStatus = FAIL;
}
mCodecCtx = p->codec;
mCodec = avcodec_find_decoder(mCodecCtx->codec_id);
if (mCodec == NULL) {
Debuger::Debug(L"find codec fail\r\n");
mStatus = FAIL;
}
if (avcodec_open2(mCodecCtx, mCodec, NULL) < 0) {
Debuger::Debug(L"can't open codec\r\n");
mStatus = FAIL;
}
mSampleRate = mCodecCtx->sample_rate;
mChannel = mCodecCtx->channels;
mChannelLayout = mCodecCtx->channel_layout;
mSampleFmt = mCodecCtx->sample_fmt;
}
int AACDecoder::Decode(uint8_t * dat, uint16_t size)
{
AVPacket pkt;
int got_pcm = 0;
int len = 0;
if (mStatus == RUNNING) {
mPcmDat = av_frame_alloc();
av_init_packet(&pkt);
char* data = (char*)dat;
pkt.data = (uint8_t *)data;
pkt.size = size;
len = avcodec_decode_audio4(this->mCodecCtx, mPcmDat, &got_pcm, &pkt);
if (len < 0) {
printf("Error while decoding a frame.\n");
return -1;
}
if (got_pcm == 0) {
return 0;
}
int buffer_size = av_samples_get_buffer_size(NULL, AV_CH_LAYOUT_STEREO,
mPcmDat->nb_samples,
AV_SAMPLE_FMT_S16, 1);
swr_convert(mSwrCtx, &mU16Data, buffer_size, (const uint8_t **)mPcmDat->data,
mPcmDat->nb_samples);
//Debuger::Debug(L"get %d audio samples\r\n", mPcmDat->nb_samples);
if (nullptr != this->mObserver) {
int out_buffer_size = av_samples_get_buffer_size(NULL, 2, mPcmDat->nb_samples,
AV_SAMPLE_FMT_FLTP, 1);
this->mObserver->OnAudioDecode(mU16Data, buffer_size);
}
//(const uint8_t **)mPcmDat->data, mPcmDat->nb_samples;
av_frame_free(&mPcmDat);
return 0;
}
}
int AACDecoder::SetObserver(AACDecoderObserver *p)
{
if(nullptr != p)
this->mObserver = p;
return 0;
}

View File

@ -0,0 +1,49 @@
#pragma once
#include "RtmpPuller2.h"
extern "C"
{
#include "falabaac/fa_aacapi.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/avutil.h"
#include "libswscale/swscale.h"
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "libswresample\swresample.h"
};
class AACDecoder :public RtmpPuller2::RtmpPullObserver {
public:
enum DECODE_STATUS {
RUNNING = 1,
STOP = 2,
PAUSE = 3,
FAIL = 4,
NOSOURCE = 6,
};
class AACDecoderObserver {
public:
virtual int OnAudioDecode(uint8_t *dat, uint16_t size) { return 0; };
};
void OnRtmpFrame(void * dat, uint32_t size);
AACDecoder();
AACDecoder(AVStream *p);
int Decode(uint8_t *dat,uint16_t);
int SetObserver(AACDecoderObserver *);
private:
AVFormatContext *mFormatCtx = nullptr;
AVCodecContext *mCodecCtx = nullptr;
AVCodec *mCodec = nullptr;
AVPacket *mPacket = nullptr;
uint16_t mSampleCnt;
enum AVSampleFormat mSampleFmt;
uint16_t mSampleRate;
uint16_t mChannel;
uint64_t mChannelLayout;
AVFrame *mPcmDat;
uint8_t *mU16Data;
AACDecoderObserver *mObserver;
DECODE_STATUS mStatus;
SwrContext * mSwrCtx;
};

View File

@ -0,0 +1,126 @@
#include "AudioCapture.h"
#include "Debuger.h"
PaStream *gStreamOut = nullptr;
CaptureAudio::CaptureAudio(uint16_t rate, uint8_t channel) {
this->mChanel = channel;
this->mSampleRate = rate;
this->mSize = 0;
this->mStatus = FAIL;
this->observer = nullptr;
}
int CaptureAudio::SetObserver(CaptureAudioObserver* ob) {
if (nullptr == ob) return -1;
this->observer = ob;
return 0;
}
int paOutStreamBkss(const void* input, void* output, unsigned long frameCount,
const PaStreamCallbackTimeInfo* timeInfo, PaStreamCallbackFlags statusFlags, void * userData)
{
CaptureAudio *pCap;
Debuger::Debug(L"%d\r\n", frameCount);
if (userData != nullptr) {
pCap = (CaptureAudio *)userData;
pCap->OnCallBack(input,output,frameCount);
}
pCap->AddCnt(4 * frameCount);
return 0;
}
int CaptureAudio::OnCallBack(const void* input, void* output, unsigned long frameCount) {
if(nullptr != this->observer)
this->observer->OnAudioData(input, frameCount);
return 0;
}
CaptureAudio::~CaptureAudio() {
if(mInStream != nullptr)
Pa_CloseStream(mInStream);
}
int CaptureAudio::StartCapture()
{
PaError err = paNoError;
if (this->mStatus == RUNNING) {
err = Pa_StartStream(mInStream);
if (err != paNoError) {
this->mStatus = FAIL;
}
}
else
return -1;
return 0;
}
vector<CaptureAudio::MICInfo> CaptureAudio::EnumSpeakers()
{
vector<CaptureAudio::MICInfo> ret;
PaError err = Pa_Initialize();
if (err != paNoError) {
Debuger::Debug(L"init stream error\r\n");
mStatus = FAIL;
}
//»ñµÃÉ豸ÊýÁ¿
PaDeviceIndex iNumDevices = Pa_GetDeviceCount();
if (iNumDevices <= 0)
{
return ret;
}
for (int i = 0; i < iNumDevices; i++)
{
MICInfo ins;
ins.index = i;
const PaDeviceInfo *deviceInfo = Pa_GetDeviceInfo(i);
if (nullptr != deviceInfo)
if (deviceInfo->maxInputChannels > 0) {
ins.name = deviceInfo->name;
ret.push_back(ins);
}
}
return ret;
}
int CaptureAudio::InitCapture(int index,uint16_t rate, uint8_t channel) {
PaStreamParameters intputParameters;
PaError err = paNoError;
err = Pa_Initialize();
if (err != paNoError) goto error;
if (index < 0)
{
index = Pa_GetDefaultInputDevice();
}
if (paNoDevice == index) {
mStatus = FAIL;
return -1;
}
intputParameters.device = index;
intputParameters.channelCount = 2;
intputParameters.sampleFormat = paInt16;
intputParameters.suggestedLatency = Pa_GetDeviceInfo(intputParameters.device)->defaultLowInputLatency;
intputParameters.hostApiSpecificStreamInfo = NULL;
err = Pa_OpenStream(&mInStream, &intputParameters, NULL, 44100, 1024,
paFramesPerBufferUnspecified, paOutStreamBkss, this);
if (err != paNoError) {
this->mStatus = FAIL;
return -1;
}
this->mStatus = RUNNING;
return 0;
error:
Pa_Terminate();
return -1;
}
void CaptureAudio::StopCapture()
{
if (this->mStatus == RUNNING) {
Pa_StopStream(mInStream);
this->mStatus = STOP;
}
}

View File

@ -0,0 +1,49 @@
#ifndef __CAPTUREAUDIO_H__
#define __CAPTUREAUDIO_H__
#include "stdint.h"
#include "../third/portaudio/portaudio.h"
#include <vector>
#include <string>
using namespace std;
typedef int (CbAudio)(const void* input, void* output, unsigned long frameCount,
const PaStreamCallbackTimeInfo* timeInfo, PaStreamCallbackFlags statusFlags, void * userData);
class CaptureAudio {
public:
class CaptureAudioObserver {
public:
virtual void OnAudioData(const void *frameaddress, uint32_t framelen) {};
};
typedef struct _T_MicInfo
{
string name;
int index;
}MICInfo;
enum CAP_STATUS {
RUNNING = 1,
STOP = 2,
PAUSE = 3,
FAIL = 4,
};
vector<CaptureAudio::MICInfo> EnumSpeakers();
CaptureAudio(uint16_t rate, uint8_t channel);
~CaptureAudio();
int StartCapture();
int InitCapture(int index,uint16_t rate,uint8_t channel);
void StopCapture();
int SetObserver(CaptureAudioObserver*);
int OnCallBack(const void* input, void* output, unsigned long frameCount);
void AddCnt(unsigned int x) {this->mSize += x;};
private:
uint16_t mSampleRate; //²ÉÑùÂÊ
uint16_t mChanel; //ͨµÀºÅ
PaStream *mInStream;
PaStream *mOutStream;
unsigned long mSize;
CAP_STATUS mStatus;
CaptureAudioObserver *observer;
};
#endif //__CAPTUREAUDIO_H__

View File

@ -0,0 +1,91 @@
#include "AudioPlayer.h"
#include "Debuger.h"
#include "utils.h"
AudioPlayer::AudioPlayer(int index)
{
mStatus = RUNNING;
PaError err = Pa_Initialize();
if (err != paNoError) {
Debuger::Debug(L"init stream error\r\n");
mStatus = FAIL;
}
//获得设备数量
PaDeviceIndex iNumDevices = Pa_GetDeviceCount();
if (iNumDevices < 0)
{
}
for (int i = 0; i < iNumDevices; i++)
{
const PaDeviceInfo *deviceInfo = Pa_GetDeviceInfo(i);
Debuger::Debug(L"index %d %d %d \r\n",i,
deviceInfo->maxInputChannels, deviceInfo->maxOutputChannels); //打印设备名
}
mOutputParameters.device = index;
mOutputParameters.channelCount = 2; //输出采用双声道,左声道在前
mOutputParameters.sampleFormat = paInt16;
mOutputParameters.suggestedLatency = Pa_GetDeviceInfo(mOutputParameters.device)->defaultLowOutputLatency;
mOutputParameters.hostApiSpecificStreamInfo = NULL;
err = Pa_OpenStream(&mOutStream, NULL, &mOutputParameters, 44100, 1024,
paFramesPerBufferUnspecified, NULL, NULL);
if (err != paNoError) {
Debuger::Debug(L"open output stream error\r\n");
mStatus = FAIL;
goto end;
}
err = Pa_StartStream(mOutStream);
if (err != paNoError) {
Debuger::Debug(L"start stream error\r\n");
mStatus = FAIL;
}
end:
return;
}
vector<AudioPlayer::SpeakerInfo> AudioPlayer::EnumSpeakers()
{
vector<AudioPlayer::SpeakerInfo> ret;
PaError err = Pa_Initialize();
if (err != paNoError) {
Debuger::Debug(L"init stream error\r\n");
mStatus = FAIL;
}
//获得设备数量
PaDeviceIndex iNumDevices = Pa_GetDeviceCount();
if (iNumDevices <= 0)
{
return ret;
}
for (int i = 0; i < iNumDevices; i++)
{
SpeakerInfo ins;
ins.index = i;
const PaDeviceInfo *deviceInfo = Pa_GetDeviceInfo(i);
if(nullptr != deviceInfo)
if (deviceInfo->maxOutputChannels > 0) {
ins.name = char2wchar(deviceInfo->name);
ret.push_back(ins);
}
}
return ret;
}
int AudioPlayer::Play(uint8_t * data, uint16_t num)
{
PaError err;
if (mStatus == RUNNING) {
err = Pa_WriteStream(mOutStream, data, num);
if (paNoError != err) {
return -1;
}
}
else {
return -1;
}
return 0;
}
int AudioPlayer::OnAudioDecode(uint8_t * dat, uint16_t size)
{
return this->Play(dat, 1024);
return 0;
}

View File

@ -0,0 +1,31 @@
#pragma once
#include "stdint.h"
#include "../third/portaudio/portaudio.h"
#include "AACDecoder.h"
class AudioPlayer :public AACDecoder::AACDecoderObserver{
public:
class AudioPlayerObserver{
public:
virtual int OnAudioPlay();
};
typedef struct {
int index;
wstring name;
}SpeakerInfo;
enum PLAY_STATUS {
RUNNING = 1,
STOP = 2,
PAUSE = 3,
FAIL = 4,
};
AudioPlayer(int index);
vector<SpeakerInfo> EnumSpeakers();
int Play(uint8_t *data,uint16_t num);
int OnAudioDecode(uint8_t *dat, uint16_t size);
private:
PLAY_STATUS mStatus;
PaStreamParameters mOutputParameters;
PaStream *mOutStream;
};

View File

@ -0,0 +1,422 @@
#include "CameraCapture.h"
#include "Debuger.h"
#pragma comment(lib, "strmiids")
//define release maco
#define ReleaseInterface(x) \
if ( NULL != x ) \
{ \
x->Release( ); \
x = NULL; \
}
// Application-defined message to notify app of filter graph events
#define WM_GRAPHNOTIFY WM_APP+100
Camera::Camera():
mInitOK(false),
mVideoHeight(0),
mVideoWidth(0),
mDevFilter(NULL),
mCaptureGB(NULL),
mGraphBuilder(NULL),
mMediaControl(NULL),
mMediaEvent(NULL),
mSampGrabber(NULL),
mIsVideoOpened(false)
{
//HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
//if (SUCCEEDED(hr))
{
mInitOK = true;
}
mStatus = STOP;
}
Camera::~Camera()
{
Close();
CoUninitialize();
}
HRESULT Camera::InitializeEnv()
{
HRESULT hr;
//Create the filter graph
hr = CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER,
IID_IGraphBuilder, (LPVOID*)&mGraphBuilder);
if (FAILED(hr))
return hr;
//Create the capture graph builder
hr = CoCreateInstance(CLSID_CaptureGraphBuilder2, NULL, CLSCTX_INPROC_SERVER,
IID_ICaptureGraphBuilder2, (LPVOID*)&mCaptureGB);
if (FAILED(hr))
return hr;
//Obtain interfaces for media control and Video Window
hr = mGraphBuilder->QueryInterface(IID_IMediaControl, (LPVOID*)&mMediaControl);
if (FAILED(hr))
return hr;
hr = mGraphBuilder->QueryInterface(IID_IMediaEventEx, (LPVOID*)&mMediaEvent);
if (FAILED(hr))
return hr;
mCaptureGB->SetFiltergraph(mGraphBuilder);
if (FAILED(hr))
return hr;
return hr;
}
std::vector<std::wstring> Camera::EnumAllCamera(void)
{
if (mInitOK == false)
return std::vector<std::wstring>();
std::vector<std::wstring> names;
IEnumMoniker *pEnum = nullptr;
// Create the System Device Enumerator.
ICreateDevEnum *pDevEnum;
HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL,
CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&pDevEnum));
if (SUCCEEDED(hr))
{
// Create an enumerator for the category.
hr = pDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory, &pEnum, 0);
if (hr == S_FALSE)
{
hr = VFW_E_NOT_FOUND; // The category is empty. Treat as an error.
}
pDevEnum->Release();
}
if (!SUCCEEDED(hr))
return std::vector<std::wstring>();
IMoniker *pMoniker = NULL;
while (pEnum->Next(1, &pMoniker, NULL) == S_OK)
{
IPropertyBag *pPropBag;
IBindCtx* bindCtx = NULL;
LPOLESTR str = NULL;
VARIANT var;
VariantInit(&var);
HRESULT hr = pMoniker->BindToStorage(0, 0, IID_PPV_ARGS(&pPropBag));
if (FAILED(hr))
{
pMoniker->Release();
continue;
}
// Get description or friendly name.
hr = pPropBag->Read(L"Description", &var, 0);
if (FAILED(hr))
{
hr = pPropBag->Read(L"FriendlyName", &var, 0);
}
if (SUCCEEDED(hr))
{
names.push_back(var.bstrVal);
VariantClear(&var);
}
pPropBag->Release();
pMoniker->Release();
}
pEnum->Release();
return names;
}
HRESULT Camera::BindFilter(int deviceID, IBaseFilter **pBaseFilter)
{
ICreateDevEnum *pDevEnum;
IEnumMoniker *pEnumMon;
IMoniker *pMoniker;
HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER,
IID_ICreateDevEnum, (LPVOID*)&pDevEnum);
if (SUCCEEDED(hr))
{
hr = pDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory, &pEnumMon, 0);
if (hr == S_FALSE)
{
hr = VFW_E_NOT_FOUND;
return hr;
}
pEnumMon->Reset();
ULONG cFetched;
int index = 0;
hr = pEnumMon->Next(1, &pMoniker, &cFetched);
while (hr == S_OK && index <= deviceID)
{
IPropertyBag *pProBag;
hr = pMoniker->BindToStorage(0, 0, IID_IPropertyBag, (LPVOID*)&pProBag);
if (SUCCEEDED(hr))
{
if (index == deviceID)
{
pMoniker->BindToObject(0, 0, IID_IBaseFilter, (LPVOID*)pBaseFilter);
}
}
pMoniker->Release();
index++;
hr = pEnumMon->Next(1, &pMoniker, &cFetched);
}
pEnumMon->Release();
}
return hr;
}
int Camera::SetObserver(CameraObserver *p)
{
return this->mSampleGrabberCB.SetObserver(p);
}
int Camera::RemoveObserver(CameraObserver * p)
{
return this->mSampleGrabberCB.RemoveObserver(p);
}
int Camera::SampleGrabberCallback::SetObserver(CameraObserver *p)
{
if (nullptr == p)
return -1;
mMux.lock();
for (auto itr = this->mObserver.begin(); itr != mObserver.end(); itr++) {
if (p == *itr) return 0;
}
this->mObserver.push_back(p);
mMux.unlock();
return 0;
return 0;
}
int Camera::SampleGrabberCallback::RemoveObserver(CameraObserver * p)
{
mMux.lock();
bool founded = false;
auto itrDel = this->mObserver.begin();
for (auto itr = this->mObserver.begin(); itr != mObserver.end(); itr++) {
if (p == *itr) {
itrDel = itr;
founded = true;
}
}
if (founded)
mObserver.erase(itrDel);
mMux.unlock();
return 0;
return 0;
}
bool Camera::Open(std::wstring &camera_name)
{
if (mInitOK == false)
return false;
if (mIsVideoOpened)
return true;
HRESULT hr;
#define CHECK_HR(x) do{ hr = (x); if (FAILED(hr)){ Close(); return false;}}while(0)
CHECK_HR(InitializeEnv());
IBaseFilter *pSampleGrabberFilter , *dest_filter;
std::vector<std::wstring> names = EnumAllCamera();
if (names.empty())
{
Close(); return false;
}
auto name_it = find(names.begin(), names.end(), camera_name);
if (name_it == names.end())
{
Close(); return false;
}
int deviceID = static_cast<int>(distance(names.begin(), name_it));
// create grabber filter instance
CHECK_HR(CoCreateInstance(CLSID_SampleGrabber, NULL, CLSCTX_INPROC_SERVER,
IID_IBaseFilter, (LPVOID*)&pSampleGrabberFilter));
// bind source device
CHECK_HR(BindFilter(deviceID, &mDevFilter));
// add src filter
CHECK_HR(mGraphBuilder->AddFilter(mDevFilter, L"Video Filter"));
// add grabber filter and query interface
CHECK_HR(mGraphBuilder->AddFilter(pSampleGrabberFilter, L"Sample Grabber"));
CHECK_HR(pSampleGrabberFilter->QueryInterface(IID_ISampleGrabber, (LPVOID*)&mSampGrabber));
// find the current bit depth
HDC hdc = GetDC(NULL);
mBitDepth = GetDeviceCaps(hdc, BITSPIXEL);
ReleaseDC(NULL, hdc);
// set the media type for grabber filter
AM_MEDIA_TYPE mediaType;
ZeroMemory(&mediaType, sizeof(AM_MEDIA_TYPE));
mediaType.majortype = MEDIATYPE_Video;
switch (mBitDepth)
{
case 8:
mediaType.subtype = MEDIASUBTYPE_RGB8;
break;
case 16:
mediaType.subtype = MEDIASUBTYPE_RGB555;
break;
case 24:
mediaType.subtype = MEDIASUBTYPE_RGB24;
break;
case 32:
mediaType.subtype = MEDIASUBTYPE_RGB32;
break;
default:
Close();
return false;
}
mediaType.formattype = FORMAT_VideoInfo;
hr = mSampGrabber->SetMediaType(&mediaType);
CHECK_HR(CoCreateInstance(CLSID_NullRenderer, NULL, CLSCTX_INPROC_SERVER, IID_IBaseFilter, (void**)(&dest_filter)));
mGraphBuilder->AddFilter(dest_filter, L"NullRenderer");
// connect source filter to grabber filter
CHECK_HR(mCaptureGB->RenderStream(&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Video,
mDevFilter, pSampleGrabberFilter, dest_filter));
// get connected media type
CHECK_HR(mSampGrabber->GetConnectedMediaType(&mediaType));
VIDEOINFOHEADER * vih = (VIDEOINFOHEADER*)mediaType.pbFormat;
mVideoWidth = vih->bmiHeader.biWidth;
mVideoHeight = vih->bmiHeader.biHeight;
mPixFmt = mediaType.subtype;
Debuger::Debug(L"guid media type is %x %x %x %x width is %d ,height is %d\r\n", mediaType.subtype.Data1,
mediaType.subtype.Data2,
mediaType.subtype.Data3,
mediaType.subtype.Data4,
mVideoWidth,mVideoHeight);
// configure grabber filter
CHECK_HR(mSampGrabber->SetOneShot(0));
CHECK_HR(mSampGrabber->SetBufferSamples(0));
// Use the BufferCB callback method
CHECK_HR(mSampGrabber->SetCallback(&mSampleGrabberCB, 1));
mSampleGrabberCB.mNewDataCallBack = mFrameCallBack;
mMediaControl->Run();
dest_filter->Release();
pSampleGrabberFilter->Release();
// release resource
if (mediaType.cbFormat != 0)
{
CoTaskMemFree((PVOID)mediaType.pbFormat);
mediaType.cbFormat = 0;
mediaType.pbFormat = NULL;
}
if (mediaType.pUnk != NULL)
{
mediaType.pUnk->Release();
mediaType.pUnk = NULL;
}
mIsVideoOpened = TRUE;
mStatus = RUNNING;
return true;
}
bool Camera::Close()
{
if (mMediaControl)
{
mMediaControl->Stop();
}
if (mMediaEvent)
{
mMediaEvent->SetNotifyWindow(NULL, WM_GRAPHNOTIFY, 0);
}
mIsVideoOpened = false;
//release interface
ReleaseInterface(mDevFilter);
ReleaseInterface(mCaptureGB);
ReleaseInterface(mGraphBuilder);
ReleaseInterface(mMediaControl);
ReleaseInterface(mMediaEvent);
ReleaseInterface(mSampGrabber);
return true;
}
void Camera::SetCallBack(std::function<void(double, BYTE*, LONG)> f)
{
mFrameCallBack = f;
}
ULONG STDMETHODCALLTYPE Camera::SampleGrabberCallback::AddRef()
{
return 1;
}
ULONG STDMETHODCALLTYPE Camera::SampleGrabberCallback::Release()
{
return 2;
}
HRESULT STDMETHODCALLTYPE Camera::SampleGrabberCallback::QueryInterface(REFIID riid, void** ppvObject)
{
if (NULL == ppvObject) return E_POINTER;
if (riid == __uuidof(IUnknown))
{
*ppvObject = static_cast<IUnknown*>(this);
return S_OK;
}
if (riid == IID_ISampleGrabberCB)
{
*ppvObject = static_cast<ISampleGrabberCB*>(this);
return S_OK;
}
return E_NOTIMPL;
}
HRESULT STDMETHODCALLTYPE Camera::SampleGrabberCallback::SampleCB(double Time, IMediaSample *pSample)
{
return E_NOTIMPL;
}
HRESULT STDMETHODCALLTYPE Camera::SampleGrabberCallback::BufferCB(double Time, BYTE * pBuffer, long BufferLen)
{
//Debuger::Debug(L"receieve %d \r\n", BufferLen);
if (mNewDataCallBack)
{
mNewDataCallBack(Time, pBuffer, BufferLen);
}
if (mObserver.size() > 0) {
mMux.lock();
for (auto itr = this->mObserver.begin(); itr != mObserver.end(); itr++) {
CameraObserver *p = (CameraObserver *)*itr;
p->OnCameraData(pBuffer, BufferLen);
}
mMux.unlock();
}
return S_OK;
}

View File

@ -0,0 +1,88 @@
#pragma once
#include <vector>
#include <functional>
#include <dshow.h>
#include <windows.h>
#include "qedit.h"
#include <mutex>
#include <vector>
using namespace std;
class Camera
{
public:
enum CAP_STATUS {
RUNNING = 1,
STOP = 2,
PAUSE = 3,
FAIL = 4,
};
class CameraObserver {
public:
virtual int OnCameraData(uint8_t *dat, uint32_t size) { return 0; };
};
private:
Camera();
Camera(const Camera &) = delete;
Camera& operator =(const Camera&) = delete;
~Camera();
bool mInitOK;
bool mIsVideoOpened;
int mVideoWidth, mVideoHeight, mBitDepth;
std::function<void(double, BYTE *, LONG)> mFrameCallBack;
private:
class SampleGrabberCallback : public ISampleGrabberCB
{
public:
ULONG STDMETHODCALLTYPE AddRef();
ULONG STDMETHODCALLTYPE Release();
HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, void** ppvObject);
HRESULT STDMETHODCALLTYPE SampleCB(double Time, IMediaSample *pSample);
HRESULT STDMETHODCALLTYPE BufferCB(double Time, BYTE *pBuffer, long BufferLen);
std::function<void(double, BYTE *, LONG)> mNewDataCallBack;
mutex mMux;
int SetObserver(CameraObserver *);
int RemoveObserver(CameraObserver *p);
private:
vector<CameraObserver*> mObserver;
};
IGraphBuilder *mGraphBuilder;
ICaptureGraphBuilder2 *mCaptureGB;
IMediaControl *mMediaControl;
IBaseFilter *mDevFilter;
ISampleGrabber *mSampGrabber;
IMediaEventEx *mMediaEvent;
SampleGrabberCallback mSampleGrabberCB;
HRESULT InitializeEnv();
HRESULT BindFilter(int deviceID, IBaseFilter **pBaseFilter);
public:
int SetObserver(CameraObserver *);
int RemoveObserver(CameraObserver *p);
CAP_STATUS mStatus;
static Camera *GetInstance(void)
{
static Camera *instance = nullptr;
if (nullptr == instance)
instance = new Camera();
return instance;
}
std::vector<std::wstring> EnumAllCamera(void);
GUID mPixFmt;
bool Open(std::wstring &camera_name);
bool Close(void);
/*!
* @param time : Starting time of the sample, in seconds.
* @param buff : Pointer to a buffer that contains the sample data.
* @param len : Length of the buffer pointed to by pBuffer, in bytes.
*/
void SetCallBack(std::function<void(double time, BYTE *buff, LONG len)>);
int GetHeight() { return mVideoHeight; }
int GetWidth() { return mVideoWidth; }
int GetBitDepth() { return mBitDepth; }
};

View File

@ -0,0 +1,16 @@
#pragma once
#include <string>
using namespace std;
class Debuger
{
public:
Debuger();
~Debuger();
static int Debug(wstring log);
static int Debug(const wchar_t *format, ...);
static int Debug(string log);
};

View File

@ -0,0 +1,100 @@
#include "H264Docoder.h"
#include "Debuger.h"
extern "C" {
#include "libswscale/swscale.h"
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libswscale/swscale.h"
#include "libavutil/pixfmt.h"
}
H264decoder::H264decoder()
:mObserver(nullptr){
this->mObserverType = Observer_Video;
avcodec_register_all();
mCodec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (!mCodec) {
cout << "could not found 264 decoder" << endl;
exit(1);
}
mCtx = avcodec_alloc_context3(mCodec);
picture = av_frame_alloc();
if ((mCodec->capabilities)&AV_CODEC_CAP_TRUNCATED)
(mCtx->flags) |= AV_CODEC_FLAG2_CHUNKS;
mCtx->height = 720;
mCtx->width = 1280;
if (avcodec_open2(mCtx, mCodec, NULL) < 0) {
cout << "could not open codec\n";
exit(1);
}
}
H264decoder::~H264decoder()
{
}
//
// Created by 29019 on 2019/5/7.
//
const int width = 640;
const int height = 480;
const int framesize = width * height * 3 / 2; //Ò»¸±Í¼Ëùº¬µÄÏñËظöÊý
VData *H264decoder::Decodes(void *dat,uint32_t size) {
//FILE *pOut = fopen("pic.yuv","wb+");
AVPacket pkt;
int got_picture = 0;
int len = 0;
picture = av_frame_alloc();
av_init_packet(&pkt);
char* data = (char*)dat;
pkt.data = (uint8_t *)data;
pkt.size = size;
len = avcodec_decode_video2(this->mCtx, picture, &got_picture, &pkt);
if (len < 0) {
printf("Error while decoding a frame.\n");
return nullptr;
}
if (got_picture == 0) {
return nullptr;
}
++frame;
AVPixelFormat pix;
int pic_size = avpicture_get_size(AV_PIX_FMT_YUVJ420P, picture->width, picture->height);
/*
cout << "receive width " << picture->width << " height "
<< picture->height<<"pic size "
<< pic_size <<" channel layout "
<< picture->linesize[0]<< endl;*/
uint32_t pitchY = picture->linesize[0];
uint32_t pitchU = picture->linesize[1];
uint32_t pitchV = picture->linesize[2];
uint8_t *avY = picture->data[0];
uint8_t *avU = picture->data[1];
uint8_t *avV = picture->data[2];
if (nullptr != mObserver) {
this->mObserver->OnRecieveData(picture);
}
av_frame_free(&picture);
}
void H264decoder::OnRtmpFrame(void * dat, uint32_t size)
{
//Debuger::Debug(L"get data\r\n");
this->Decodes(dat, size);
}
int H264decoder::SetObserver(H264DecodeObserver *p)
{
if (nullptr != p)
this->mObserver = p;
else
return -1;
return 0;
}

View File

@ -0,0 +1,58 @@
#pragma once
#include <ctype.h>
#include <vector>
#include <list>
#include <iostream>
#include <time.h>
#include "RtmpPuller.h"
#include "RtmpPuller2.h"
using namespace std;
extern "C" {
#include "libavutil/pixfmt.h"
#include "libavcodec/avcodec.h"
#include "sdl/SDL.h"
}
#define INBUF_SIZE 4096
typedef vector<char> VData;
class Decoder {
private:
list<VData> mDecodeData;
public:
virtual int Decode(VData &dat) { return -1; };
};
typedef class H264decoder :public Decoder, public RtmpPuller2::RtmpPullObserver {
public:
class H264DecodeObserver {
public:
virtual int OnRecieveData(AVFrame *frame) { return 0; };
};
enum CAP_STATUS {
RUNNING = 1,
STOP = 2,
PAUSE = 3,
FAIL = 4,
};
H264decoder();
~H264decoder();
VData *Decodes(void *dat, uint32_t len);
void OnRtmpFrame(void * dat, uint32_t size);
int SetObserver(H264DecodeObserver *);
private:
AVCodec *mCodec;
AVCodecContext *mCtx = NULL;
int frame, got_picture, len;
AVFrame *picture;
AVPacket avpkt;
H264DecodeObserver *mObserver;
//SDL---------------------------
int screen_w = 0, screen_h = 0;
SDL_Window *screen;
SDL_Renderer* sdlRenderer;
SDL_Texture* sdlTexture;
SDL_Rect sdlRect;
}CH264Decoder;

View File

@ -0,0 +1,36 @@
#pragma once
#include "ImageUtil.h"
bool GuidCompare(GUID g1, GUID g2) {
if (g1.Data1 != g2.Data1) {
return false;
}
if (g1.Data2 != g2.Data2) {
return false;
}
if (g1.Data3 != g2.Data3) {
return false;
}
return true;
}
AVPixelFormat GetFormatFromGuid(GUID g)
{
if (GuidCompare(g, MEDIASUBTYPE_YUY2)) {
return AV_PIX_FMT_YUYV422;
}
if (GuidCompare(g, MEDIASUBTYPE_RGB24)) {
return AV_PIX_FMT_RGB24;
}
if (GuidCompare(g, MEDIASUBTYPE_RGB32)) {
return AV_PIX_FMT_RGB32;
}
if (GuidCompare(g, MEDIASUBTYPE_MJPG)) {
return AV_PIX_FMT_YUVJ420P;
}
if (GuidCompare(g, MEDIASUBTYPE_IYUV)) {
return AV_PIX_FMT_YUYV422;
}
return AV_PIX_FMT_NONE;
}

View File

@ -0,0 +1,12 @@
#pragma once
#include "guiddef.h"
#include "uuids.h"
extern "C" {
#include "libswscale/swscale.h"
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libswscale/swscale.h"
#include "libavutil/pixfmt.h"
}
AVPixelFormat GetFormatFromGuid(GUID g);

View File

@ -0,0 +1,63 @@
#pragma once
//Windows
#include <string>
#include <thread>
#include <vector>
#include <mutex>
using namespace std;
extern "C"
{
#include "libavformat/avformat.h"
#include "libavutil/mathematics.h"
#include "libavutil/time.h"
};
#pragma comment (lib, "ws2_32.lib")
#pragma comment (lib, "Secur32.lib")
#pragma comment (lib, "Bcrypt.lib")
class RtmpPuller {
public:
class RtmpPullObserver {
public :
enum ObserverType {
Observer_Video = 0,
Observer_Audio = 1,
};
virtual void OnRtmpFrame(void * dat, uint32_t size) {};
ObserverType mObserverType;
};
enum CAP_STATUS {
RUNNING = 1,
STOP = 2,
PAUSE = 3,
FAIL = 4,
NOSOURCE = 6,
};
RtmpPuller();
int ConnectServer(const char *);
int StartPull();
int PullData();
int SetObserver(RtmpPullObserver *);
CAP_STATUS Status();
AVStream *AudioStream();
private:
CAP_STATUS mStatus;
AVOutputFormat *mOutFormat = NULL;
//Input AVFormatContext and Output AVFormatContext
AVFormatContext *mIfmtCtx = NULL;
AVPacket pkt;
string mRtmpUrl;
int mVideoIndex;
int mAudioIndex;
int mFrameIndex;
AVBitStreamFilterContext* mH264bsfc;
std::thread *mThread;
vector<RtmpPullObserver*> mObserver;
AVStream *mAudioStream;
AVStream *mVideoStream;
mutex mMux;
};
int ThreadPull(RtmpPuller*p);

View File

@ -0,0 +1,71 @@
#pragma once
extern "C" {
#include "librtmp\rtmp.h"
#include "librtmp\rtmp_sys.h"
#include "librtmp\amf.h"
}
#include <windows.h>
#include <string>
#include <thread>
#include <vector>
#include <mutex>
using namespace std;
#ifdef WIN32
#include <windows.h>
#pragma comment(lib,"WS2_32.lib")
#pragma comment(lib,"winmm.lib")
#endif
class RtmpPuller2
{
public:
class RtmpPullObserver {
public:
enum ObserverType {
Observer_Video = 0,
Observer_Audio = 1,
};
virtual void OnRtmpFrame(void * dat, uint32_t size) {};
ObserverType mObserverType;
};
enum CAP_STATUS {
CONNECTED = 0,
RUNNING = 1,
STOP = 2,
PAUSE = 3,
FAIL = 4,
NOSOURCE = 6,
};
RtmpPuller2();
~RtmpPuller2();
int StopPull();
int StartPull();
int PullData();
int SetObserver(RtmpPuller2::RtmpPullObserver *);
CAP_STATUS Status();
int ConnectServer(string url);
private:
std::thread *mThread;
RTMP *mRtmp;
string mUrl;
CAP_STATUS mStatus;
vector<RtmpPuller2::RtmpPullObserver*> mObserver;
mutex mMux;
uint8_t *mAccBuffer;
// adts 头部信息因为aac码流只会在首包发送
uint8_t ch0 = 0;
uint8_t ch1 = 0;
uint16_t config = 0;
uint16_t object_type = 0;
uint16_t sample_frequency_index = 0;
uint16_t channels = 0;
uint16_t frame_length_flag = 0;
uint16_t depend_on_core_coder = 0;
uint16_t extension_flag = 0;
};

View File

@ -0,0 +1,6 @@
#pragma once
class RtmpPuser {
};

View File

@ -0,0 +1,111 @@
#pragma once
#include "librtmp_send264.h"
#include "librtmp\rtmp.h"
#include "librtmp\rtmp_sys.h"
#include "librtmp\amf.h"
#include "AACAudioCoder.h"
#include "sps_decode.h"
#include "VideoCoder.h"
#include <mutex>
#include <thread>
#include <queue>
#ifdef WIN32
#include <windows.h>
#pragma comment(lib,"WS2_32.lib")
#pragma comment(lib,"winmm.lib")
#endif
#include <string>
using namespace std;
#define RTMP_HEAD_SIZE (sizeof(RTMPPacket)+RTMP_MAX_HEADER_SIZE)
class RtmpPusher
{
protected:
RTMP *m_pRtmp;
string mUrl;
int mTick = 10;
std::mutex mMux;
std::thread *mThread;
bool mIfConnected = false;
std::thread::id mThreadId;
public:
bool IfConnect();
int RTMP264_Connect(const char* url);
void RTMP264_Close();
int SendPacket(unsigned int nPacketType, unsigned char *data, unsigned int size, unsigned int nTimestamp);
int SendVideoPacket(unsigned int nPacketType, unsigned char *data, unsigned int size, unsigned int nTimestamp);
int SetTick(int tick) { this->mTick = tick; };
virtual int StartPush() { return 0; };
RtmpPusher();
virtual ~RtmpPusher();
};
/**
* _RTMPMetadata
*
*/
typedef struct _RTMPMetadata
{
// video, must be h264 type
int nWidth;
int nHeight;
int nFrameRate;
unsigned int nSpsLen;
unsigned char *Sps;
unsigned int nPpsLen;
unsigned char *Pps;
} RTMPMetadata, *LPRTMPMetadata;
enum Payload_Type {
PAYLOAD_TYPE_VIDEO = 0,
PAYLOAD_TYPE_AUDIO = 1
};
typedef struct _T_Buffer {
uint8_t *buf;
int len;
Payload_Type type;
}Buffer;
class H264RtmpPuser : public RtmpPusher ,
public VideoCodeObserver,
public AAC_CODER::AACAudioCoder::EncodeAudioObserver {
private:
bool mFirtACC;
uint16_t mAudioPts;
bool mIfStart = false;
// 视频同步包
int SendVideoSpsPps(unsigned char *pps, int pps_len,
unsigned char * sps, int sps_len, unsigned int nTimeStamp);
// 音频同步包
int SendAudioSync(int audioType, int sampleIndex, int channel, unsigned int nTimeStamp);
int SendAudioData(unsigned char*dat, unsigned int size, unsigned int nTimeStamp);
int SendH264Packet(unsigned char *data,
unsigned int size, int bIsKeyFrame, unsigned int nTimeStamp);
int sendDataPackH264(unsigned char *data,
unsigned int size, int bIsKeyFrame, unsigned int nTimeStamp);
int sendDataPackAAC(unsigned char *data, unsigned int size, unsigned int nTimeStamp);
public:
queue<Buffer> mPack;
RTMPMetadata metaData;
H264RtmpPuser();
int sortAndSendNal(uint8_t *data, int len);
int SetSpsPps(unsigned char *pps, int pps_len,
unsigned char * sps, int sps_len);
void OnAudioEncode(const void *frameaddress, uint32_t framelen, uint16_t pts);
void OnGetCodeFrame(uint8_t *data, int len);
void ProcessSend();
int StartPush();
int StopPush();
};
int ThreadEncode(H264RtmpPuser*p);

View File

@ -0,0 +1,328 @@
#include "SdlPlayer.h"
#include "Debuger.h"
uint32_t FfmpegPixFormatToSdl(AVPixelFormat av) {
switch (av) {
case AV_PIX_FMT_YUYV422:
return SDL_PIXELFORMAT_YUY2;
}
}
SDLPlayser::SDLPlayser(HWND hwnd,int width,int height, AVPixelFormat fmt):
mScreen(nullptr),
mRender(nullptr)
{
if (nullptr == hwnd) {
}
// SDL part
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
printf("Could not initialize SDL - %s\n", SDL_GetError());
}
this->mInWidth = width;
this->mInHeight = height;
screen_w = 640;
screen_h = 480;
//SDL 2.0 Support for multiple windows
/*
screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
screen_w, screen_h,
SDL_WINDOW_OPENGL);*/
if (nullptr == mScreen) {
mScreen = SDL_CreateWindowFrom((void *)hwnd);
printf("SDL: could not create window - exiting:%s\n", SDL_GetError());
}
mRender = SDL_CreateRenderer(mScreen, -1, 0);
SDL_GetWindowSize(mScreen, &screen_w, &screen_h);
Debuger::Debug(L"pix width %d height %d\r\n",screen_w,screen_h);
//IYUV: Y + U + V (3 planes)
//YV12: Y + V + U (3 planes)
mTexture = SDL_CreateTexture(mRender, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
screen_w, screen_h);
this->mFormat = fmt;
}
SDLPlayser::~SDLPlayser()
{
SDL_DestroyWindow(mScreen);
SDL_DestroyRenderer(mRender);
}
// 强制把其他个数的数据转换成libav可以认得到的数据
int forceYUV420PBuffer(uint8_t * src, int size,int inWidth,int inHeight,
AVPixelFormat format, uint8_t **dst[4], int *len,int mWidth,int mHeight)
{
uint8_t *src_data[4];
int src_linesize[4];
uint8_t *dst_data[4];
int dst_linesize[4];
struct SwsContext *img_convert_ctx;
int ret = 0;
if (nullptr == dst || nullptr == len) {
return -2;
}
int src_bpp = av_get_bits_per_pixel(av_pix_fmt_desc_get(format));
AVPixelFormat dst_pixfmt = AV_PIX_FMT_YUV420P;
int dst_bpp = av_get_bits_per_pixel(av_pix_fmt_desc_get(dst_pixfmt));
ret = av_image_alloc(src_data, src_linesize, inWidth, inHeight, format, 1);
if (ret< 0) {
Debuger::Debug(L"Could not allocate source image\n");
return -1;
}
ret = av_image_alloc(dst_data, dst_linesize, mWidth, mHeight, AV_PIX_FMT_YUV420P, 1);
if (ret< 0) {
Debuger::Debug(L"Could not allocate destination image\n");
return -1;
}
img_convert_ctx = sws_alloc_context();
//Show AVOption
//av_opt_show2(img_convert_ctx, stdout, AV_OPT_FLAG_VIDEO_PARAM, 0);
//Set Value
av_opt_set_int(img_convert_ctx, "sws_flags", SWS_BICUBIC | SWS_PRINT_INFO, 0);
av_opt_set_int(img_convert_ctx, "srcw", inWidth, 0);
av_opt_set_int(img_convert_ctx, "srch", inHeight, 0);
av_opt_set_int(img_convert_ctx, "src_format", format, 0);
//'0' for MPEG (Y:0-235);'1' for JPEG (Y:0-255)
av_opt_set_int(img_convert_ctx, "src_range", 1, 0);
av_opt_set_int(img_convert_ctx, "dstw", mWidth, 0);
av_opt_set_int(img_convert_ctx, "dsth", mHeight, 0);
av_opt_set_int(img_convert_ctx, "dst_format", dst_pixfmt, 0);
av_opt_set_int(img_convert_ctx, "dst_range", 1, 0);
sws_init_context(img_convert_ctx, NULL, NULL);
// 设置输入
switch (format) {
case AV_PIX_FMT_GRAY8: {
memcpy(src_data[0], src, inWidth*inHeight);
break;
}
case AV_PIX_FMT_YUV420P: {
memcpy(src_data[0], src, inWidth*inHeight); //Y
memcpy(src_data[1], src + inWidth*inHeight, inWidth*inHeight / 4); //U
memcpy(src_data[2], src + inWidth*inHeight * 5 / 4, inWidth*inHeight / 4); //V
break;
}
case AV_PIX_FMT_YUV422P: {
memcpy(src_data[0], src, inWidth*inHeight); //Y
memcpy(src_data[1], src + inWidth*inHeight, inWidth*inHeight / 2); //U
memcpy(src_data[2], src + inWidth*inHeight * 3 / 2, inWidth*inHeight / 2); //V
break;
}
case AV_PIX_FMT_YUV444P: {
memcpy(src_data[0], src, inWidth*inHeight); //Y
memcpy(src_data[1], src + inWidth*inHeight, inWidth*inHeight); //U
memcpy(src_data[2], src + inWidth*inHeight * 2, inWidth*inHeight); //V
break;
}
case AV_PIX_FMT_YUYV422: {
memcpy(src_data[0], src, inWidth*inHeight * 2); //Packed
break;
}
case AV_PIX_FMT_RGB24: {
memcpy(src_data[0], src, inWidth*inHeight * 3); //Packed
break;
}
case AV_PIX_FMT_RGB32: {
memcpy(src_data[0], src, inWidth*inHeight * 4); //Packed
break;
}
default: {
Debuger::Debug(L"Not Support Input Pixel Format.\n");
break;
}
}
if (FALSE)
{
src_linesize[0] = -inHeight;
src_data[0] += inWidth*(src_linesize[0] - 1);
// 转换数据
ret = sws_scale(img_convert_ctx, src_data, src_linesize,
0, inHeight, dst_data, dst_linesize);
if (ret < 0) {
return ret;
}
}
else{
ret = sws_scale(img_convert_ctx, src_data, src_linesize,
0, inHeight, dst_data, dst_linesize);
if (ret < 0) {
return ret;
}
}
memcpy(dst[0], dst_data[0], mWidth*mHeight);
memcpy(dst[1], dst_data[1], mWidth*mHeight / 4);
memcpy(dst[2], dst_data[2], mWidth*mHeight / 4);
*len = mWidth*mHeight + mWidth*mHeight / 2;
// source此时就不需要了但是dst要在外面free
av_freep(&src_data[0]);
av_freep(&dst_data[0]);
sws_freeContext(img_convert_ctx);
return 0;
}
int SDLPlayser::RenderYuv(void * pBuf, uint32_t size, AVPixelFormat pix)
{
uint8_t *pFrame[4];
int iFramesize;
int lineSize[4];
static FILE *pFile = nullptr;
if (nullptr == pFile) {
pFile = fopen("test.yuv", "wb");
}
int ret = av_image_alloc(pFrame, lineSize, 640, 480, AV_PIX_FMT_YUV420P, 1);
if (ret< 0) {
Debuger::Debug(L"Could not allocate destination image\n");
}
forceYUV420PBuffer((uint8_t *)pBuf, size,this->mInWidth,this->mInHeight, this->mFormat
,(uint8_t ***)&pFrame, &iFramesize, 640, 480);
SDL_UpdateYUVTexture(mTexture, &sdlRect,
pFrame[0], lineSize[0],
pFrame[1], lineSize[1],
pFrame[2], lineSize[2]);
//SDL_UpdateTexture(sdlTexture, NULL, picture->data[0], 1280);
/*
SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
picture->data[0], picture->linesize[0],
picture->data[1], picture->linesize[1],
picture->data[2], picture->linesize[2]);
*/
SDL_RenderClear(mRender);
sdlRect.x = 0;
sdlRect.y = 0;
sdlRect.w = screen_w;
sdlRect.h = screen_h;
//fwrite(pFrame[0], lineSize[0]*480,1 ,pFile);
//fwrite(pFrame[1], lineSize[1] * 480 ,1, pFile);
//fwrite(pFrame[2], lineSize[2] * 480 ,1, pFile);
fflush(pFile);
SDL_RenderCopy(mRender, mTexture, nullptr, &sdlRect);
SDL_RenderPresent(mRender);
av_freep(&pFrame[0]);
free(pFrame[0]);
//Debuger::Debug(L"local screen size is %d %d\r\n", screen_w, screen_h);
return 0;
return 0;
}
int forceYUV420P(AVFrame * picture,uint8_t **dst[4], int *len,int width,int height)
{
uint8_t *dst_data[4];
int dst_linesize[4];
struct SwsContext *img_convert_ctx;
int ret = 0;
if (nullptr == dst || nullptr == len) {
return -2;
}
AVPixelFormat dst_pixfmt = AV_PIX_FMT_YUV420P;
int dst_bpp = av_get_bits_per_pixel(av_pix_fmt_desc_get(dst_pixfmt));
ret = av_image_alloc(dst_data, dst_linesize, width, height, AV_PIX_FMT_YUV420P, 1);
if (ret< 0) {
Debuger::Debug(L"Could not allocate destination image\n");
return -1;
}
img_convert_ctx = sws_alloc_context();
//Show AVOption
//av_opt_show2(img_convert_ctx, stdout, AV_OPT_FLAG_VIDEO_PARAM, 0);
//Set Value
av_opt_set_int(img_convert_ctx, "sws_flags", SWS_BICUBIC | SWS_PRINT_INFO, 0);
av_opt_set_int(img_convert_ctx, "srcw", picture->width, 0);
av_opt_set_int(img_convert_ctx, "srch", picture->height, 0);
av_opt_set_int(img_convert_ctx, "src_format", dst_pixfmt, 0);
//'0' for MPEG (Y:0-235);'1' for JPEG (Y:0-255)
av_opt_set_int(img_convert_ctx, "src_range", 1, 0);
av_opt_set_int(img_convert_ctx, "dstw", width, 0);
av_opt_set_int(img_convert_ctx, "dsth", height, 0);
av_opt_set_int(img_convert_ctx, "dst_format", dst_pixfmt, 0);
av_opt_set_int(img_convert_ctx, "dst_range", 1, 0);
sws_init_context(img_convert_ctx, NULL, NULL);
// 转换数据
ret = sws_scale(img_convert_ctx, picture->data, picture->linesize, 0, picture->height,
dst_data, dst_linesize);
if (ret < 0) {
return ret;
}
memcpy(dst[0], dst_data[0], width*height);
memcpy(dst[1], dst_data[1], width*height / 4);
memcpy(dst[2], dst_data[2], width*height / 4);
// source此时就不需要了但是dst要在外面free
av_freep(&dst_data[0]);
sws_freeContext(img_convert_ctx);
return 0;
}
// 输入参数一定YUV 420P的情况
int SDLPlayser::OnRecieveData(AVFrame * picture)
{
uint8_t *pFrame[4];
int iFramesize;
int lineSize[4];
int ret = av_image_alloc(pFrame, lineSize, 640, 480, AV_PIX_FMT_YUV420P, 1);
if (ret< 0) {
Debuger::Debug(L"Could not allocate destination image\n");
}
forceYUV420P(picture, (uint8_t ***)&pFrame, &iFramesize, 640, 480);
uint32_t pitchY = picture->linesize[0];
uint32_t pitchU = picture->linesize[1];
uint32_t pitchV = picture->linesize[2];
uint8_t *avY = picture->data[0];
uint8_t *avU = picture->data[1];
uint8_t *avV = picture->data[2];
SDL_UpdateYUVTexture(mTexture, &sdlRect,
pFrame[0], lineSize[0],
pFrame[1], lineSize[1],
pFrame[2], lineSize[2]);
//SDL_UpdateTexture(sdlTexture, NULL, picture->data[0], 1280);
/*
SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
picture->data[0], picture->linesize[0],
picture->data[1], picture->linesize[1],
picture->data[2], picture->linesize[2]);
*/
SDL_RenderClear(mRender);
sdlRect.x = 0;
sdlRect.y = 0;
sdlRect.w = screen_w;
sdlRect.h = screen_h;
SDL_RenderCopy(mRender, mTexture, nullptr, &sdlRect);
SDL_RenderPresent(mRender);
av_freep(&pFrame[0]);
free(pFrame[0]);
//Debuger::Debug(L"remote screen size is %d %d\r\n", screen_w, screen_h);
return 0;
}
int SDLPlayser::OnBuffer(double dblSampleTime, BYTE * pBuffer, long lBufferSize)
{
this->RenderYuv(pBuffer, lBufferSize, this->mFormat);
return 0;
}
int SDLPlayser::OnCameraData(uint8_t * dat, uint32_t size)
{
this->RenderYuv(dat, size, AV_PIX_FMT_YUYV422);
return 0;
}

View File

@ -0,0 +1,34 @@
#pragma once
#include <Windows.h>
#include "H264Docoder.h"
#include "CameraCapture.h"
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/avutil.h"
#include "libswscale/swscale.h"
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "sdl/SDL.h"
};
class SDLPlayser : public H264decoder::H264DecodeObserver , public Camera::CameraObserver{
public:
SDLPlayser(HWND,int ,int, AVPixelFormat);
~SDLPlayser();
int RenderYuv(void *pBuf,uint32_t size, AVPixelFormat pix);
int OnRecieveData(AVFrame *frame);
int OnBuffer(double dblSampleTime, BYTE * pBuffer, long lBufferSize) ;
int OnCameraData(uint8_t *dat, uint32_t size) ;
private:
HWND mWindowWnd;
//SDL---------------------------
int screen_w = 0, screen_h = 0;
int mInWidth, mInHeight;
SDL_Texture* mTexture;
SDL_Rect sdlRect;
AVPixelFormat mFormat;
SDL_Window *mScreen;
SDL_Renderer *mRender;
};

View File

@ -0,0 +1,278 @@

#include "VideoCoder.h"
#include "Debuger.h"
FILE *p = nullptr;
int VideoCoder::OnBuffer(double dblSampleTime, BYTE * pBuffer, long lBufferSize)
{
this->Encode(pBuffer, lBufferSize, AV_PIX_FMT_YUV420P);
return 0;
}
int VideoCoder::OnCameraData(uint8_t * dat, uint32_t size)
{
this->Encode(dat, size, AV_PIX_FMT_YUV420P);
return 0;
}
int VideoCoder::SetDestPix(uint8_t width, uint8_t height)
{
this->mDestHeight = height;
this->mDestWidth = width;
return 0;
}
VideoCoder::VideoCoder(int width, int height, AVPixelFormat formt)
:mObserver(nullptr),mFrame(nullptr), mPitureBuffer(nullptr), mFormatCtx(nullptr), mOutputFmt(nullptr),
mVideoStream(nullptr), mCodecCtx(nullptr), mCodec(nullptr)
{
AVCodecID codec_id = AV_CODEC_ID_H264;
mCodec = avcodec_find_encoder(codec_id);
av_register_all();
if (nullptr == p) {
p = fopen("shit.h264", "wb");
}
this->mWidth = width;
this->mHeight = height;
this->mInformat = formt;
if (!mCodec) {
printf("Codec not found\n");
}
this->mFormatCtx = avformat_alloc_context();
//原文链接https ://blog.csdn.net/leixiaohua1020/article/details/25430425 引用来自雷神的文章,雷神保佑
this->mOutputFmt = av_guess_format(NULL, "shit.h264", NULL);
this->mFormatCtx->oformat = mOutputFmt;
mCodecCtx = avcodec_alloc_context3(mCodec);
if (!mCodecCtx) {
printf("Could not allocate video codec context\n");
}
mCodecCtx->bit_rate = 1000;
this->mDestHeight = 480;
this->mDestWidth = 640;
mCodecCtx->width = this->mDestWidth;
mCodecCtx->height = this->mDestHeight;
mCodecCtx->time_base.num = 1;
mCodecCtx->time_base.den = 10;
mCodecCtx->max_b_frames = 0;
mCodecCtx->qmin = 10;
mCodecCtx->qmax = 25;
//mCodecCtx->flags |= AV_CODEC_FLAG_LOW_DELAY;
mCodecCtx->gop_size = 10;
mCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
av_opt_set(mCodecCtx->priv_data, "preset", "superfast", 0);
av_opt_set(mCodecCtx->priv_data, "tune", "zerolatency", 0);
if (avcodec_open2(mCodecCtx, mCodec, NULL) < 0) {
printf("Could not open codec\n");
}
mFrame = av_frame_alloc();
if (!mFrame) {
printf("Could not allocate video frame\n");
}
mFrame->format = mCodecCtx->pix_fmt;
mFrame->width = mCodecCtx->width/2;
mFrame->height = mCodecCtx->height/2;
mFrame->pts = 0;
int ret = av_image_alloc(mFrame->data, mFrame->linesize, mCodecCtx->width, mCodecCtx->height,
mCodecCtx->pix_fmt, 8);
if (ret < 0) {
printf("Could not allocate raw picture buffer\n");
}
// 让我们假设分辨率都是不可改变的AvPack可以复用
avformat_write_header(mFormatCtx, NULL);
int picture_size = avpicture_get_size(AV_PIX_FMT_YUV420P, mCodecCtx->width, mCodecCtx->height);
}
VideoCoder::~VideoCoder()
{
fclose(p);
}
void VideoCoder::Encode(uint8_t * src, int size, enum AVPixelFormat format)
{
uint8_t *pFrame[4];
int lineSize[4];
static int debugs = 1;
//如果不是yuv420p就转成yuv420p
int iFramesize;
av_init_packet(&mAVPack);
mAVPack.data = NULL; // packet data will be allocated by the encoder
int ret = av_image_alloc(pFrame, lineSize, mWidth, mHeight, AV_PIX_FMT_YUV420P, 1);
if (ret< 0) {
Debuger::Debug(L"Could not allocate destination image\n");
}
if (this->mInformat != AV_PIX_FMT_YUV420P || (this->mDestHeight != mHeight)) {
int size = avpicture_get_size(this->mInformat,mWidth,mHeight);
this->forceYUV420P(src, size, mInformat, (uint8_t ***)&pFrame,&iFramesize);
//仅仅支持yuv420p
mFrame->data[0] = pFrame[0]; //Y
mFrame->data[1] = pFrame[1]; //U
mFrame->data[2] = pFrame[2]; //V
}
else {
mFrame->data[0] = src; //Y
mFrame->data[1] = src + mWidth*mHeight; //U
mFrame->data[2] = src + mWidth*mHeight + mWidth*mHeight/4; //V
}
//PTS
mFrame->pts++;
int got_picture = 0;
//Encode
avcodec_encode_video2(mCodecCtx, &mAVPack, mFrame, &got_picture);
if (got_picture > 0) {
if(nullptr != this->mObserver)
this->mObserver->OnGetCodeFrame(mAVPack.data, mAVPack.size);
}
//Debuger::Debug(L"Succeed to encode frame: %5d\tsize:%5d\n", 1, mAVPack.size);
//fwrite(mAVPack.data, 1, mAVPack.size, p);
//fflush(p);
// 刷新coder防止包挤压
av_packet_unref(&mAVPack);
av_freep(&pFrame[0]);
free(pFrame[0]);
//av_freep(&mFrame->data[0]);
//av_freep(&mFrame->data[0]);
}
void VideoCoder::SetOutPutPixel(unsigned int width, unsigned int height)
{
this->mHeight = height;
this->mWidth = width;
}
int VideoCoder::flushCoder(AVFormatContext *fmt_ctx, unsigned int stream_index)
{
int ret;
int got_frame;
AVPacket enc_pkt;
if (!(this->mFormatCtx->streams[stream_index]->codec->codec->capabilities ))
return 0;
while (1) {
enc_pkt.data = NULL;
enc_pkt.size = 0;
av_init_packet(&enc_pkt);
ret = avcodec_encode_video2(fmt_ctx->streams[stream_index]->codec, &enc_pkt,
NULL, &got_frame);
av_frame_free(NULL);
if (ret < 0)
break;
if (!got_frame) {
ret = 0;
break;
}
Debuger::Debug(L"Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n", enc_pkt.size);
/* mux encoded frame */
ret = av_write_frame(fmt_ctx, &enc_pkt);
if (ret < 0)
break;
}
return ret;
}
// 强制把其他个数的数据转换成libav可以认得到的数据
int VideoCoder::forceYUV420P(uint8_t * src, int size,
AVPixelFormat format,uint8_t **dst[4],int *len)
{
uint8_t *src_data[4];
int src_linesize[4];
uint8_t *dst_data[4];
int dst_linesize[4];
struct SwsContext *img_convert_ctx;
int ret = 0;
if (nullptr == dst || nullptr == len) {
return -2;
}
int src_bpp = av_get_bits_per_pixel(av_pix_fmt_desc_get(format));
AVPixelFormat dst_pixfmt = AV_PIX_FMT_YUV420P;
int dst_bpp = av_get_bits_per_pixel(av_pix_fmt_desc_get(dst_pixfmt));
ret = av_image_alloc(src_data, src_linesize, mWidth, mHeight, format, 1);
if (ret< 0) {
Debuger::Debug(L"Could not allocate source image\n");
return -1;
}
ret = av_image_alloc(dst_data, dst_linesize, mDestWidth, mDestHeight, AV_PIX_FMT_YUV420P, 1);
if (ret< 0) {
Debuger::Debug(L"Could not allocate destination image\n");
return -1;
}
img_convert_ctx = sws_alloc_context();
//Show AVOption
//av_opt_show2(img_convert_ctx, stdout, AV_OPT_FLAG_VIDEO_PARAM, 0);
//Set Value
av_opt_set_int(img_convert_ctx, "sws_flags", SWS_BICUBIC | SWS_PRINT_INFO, 0);
av_opt_set_int(img_convert_ctx, "srcw", mWidth, 0);
av_opt_set_int(img_convert_ctx, "srch", mHeight, 0);
av_opt_set_int(img_convert_ctx, "src_format", format, 0);
av_opt_set_int(img_convert_ctx, "src_range", 1, 0);
av_opt_set_int(img_convert_ctx, "dstw", mDestWidth, 0);
av_opt_set_int(img_convert_ctx, "dsth", mDestHeight, 0);
av_opt_set_int(img_convert_ctx, "dst_format", dst_pixfmt, 0);
av_opt_set_int(img_convert_ctx, "dst_range", 1, 0);
sws_init_context(img_convert_ctx, NULL, NULL);
// 设置输入
switch (format) {
case AV_PIX_FMT_GRAY8: {
memcpy(src_data[0], src, mWidth*mHeight);
break;
}
case AV_PIX_FMT_YUV420P: {
memcpy(src_data[0], src, mWidth*mHeight); //Y
memcpy(src_data[1], src + mWidth*mHeight, mWidth*mHeight / 4); //U
memcpy(src_data[2], src + mWidth*mHeight * 5 / 4, mWidth*mHeight / 4); //V
break;
}
case AV_PIX_FMT_YUV422P: {
memcpy(src_data[0], src, mWidth*mHeight); //Y
memcpy(src_data[1], src + mWidth*mHeight, mWidth*mHeight / 2); //U
memcpy(src_data[2], src + mWidth*mHeight * 3 / 2, mWidth*mHeight / 2); //V
break;
}
case AV_PIX_FMT_YUV444P: {
memcpy(src_data[0], src, mWidth*mHeight); //Y
memcpy(src_data[1], src + mWidth*mHeight, mWidth*mHeight); //U
memcpy(src_data[2], src + mWidth*mHeight * 2, mWidth*mHeight); //V
break;
}
case AV_PIX_FMT_YUYV422: {
memcpy(src_data[0], src, mWidth*mHeight * 2); //Packed
break;
}
case AV_PIX_FMT_RGB24: {
memcpy(src_data[0], src, mWidth*mHeight * 3); //Packed
break;
}
case AV_PIX_FMT_RGB32: {
memcpy(src_data[0], src, mWidth*mHeight *4); //Packed
break;
}
default: {
Debuger::Debug(L"Not Support Input Pixel Format.\n");
break;
}
}
// 转换数据
ret = sws_scale(img_convert_ctx, src_data, src_linesize, 0, mHeight, dst_data, dst_linesize);
if (ret < 0) {
return ret;
}
memcpy(dst[0], dst_data[0], mDestWidth*mDestHeight);
memcpy(dst[1], dst_data[1], mDestWidth*mDestHeight /4);
memcpy(dst[2], dst_data[2], mDestWidth*mDestHeight /4);
*len = mDestWidth*mDestHeight + mDestWidth*mDestHeight / 2;
// source此时就不需要了但是dst要在外面free
av_freep(&src_data[0]);
av_freep(&dst_data[0]);
sws_freeContext(img_convert_ctx);
return 0;
}

View File

@ -0,0 +1,70 @@
#pragma once
#ifdef _WIN32
#include "Debuger.h"
#include "CameraCapture.h"
//Windows
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/avutil.h"
#include "libswscale/swscale.h"
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
};
#else
//Linux...
#ifdef __cplusplus
extern "C"
{
#endif
#include "libavutil/opt.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#ifdef __cplusplus
};
#endif
#endif
class VideoCodeObserver {
public:
virtual void OnGetCodeFrame(uint8_t *data, int len) {
Debuger::Debug(L"get one code %d \r\n", len);
}
};
class VideoCoder : public Camera::CameraObserver{
private:
int mWidth;
int mHeight;
unsigned int mDestWidth;
unsigned int mDestHeight;
int mBytePerPixel;
enum AVPixelFormat mInformat;
AVFormatContext *mFormatCtx;
AVOutputFormat *mOutputFmt;
AVStream *mVideoStream;
AVCodecContext *mCodecCtx;
AVCodec *mCodec;
AVPacket mAVPack;
uint8_t *mPitureBuffer;
AVFrame *mFrame;
VideoCodeObserver *mObserver;
public:
int OnBuffer(double dblSampleTime, BYTE * pBuffer, long lBufferSize);
int OnCameraData(uint8_t *dat, uint32_t size) ;
int SetDestPix(uint8_t width,uint8_t height);
VideoCoder(int width,int height,AVPixelFormat formt);
~VideoCoder();
void Encode(uint8_t*src,int size, enum AVPixelFormat format);
void SetOberver(VideoCodeObserver *p) {
this->mObserver = p;
}
void SetOutPutPixel(unsigned int width,unsigned int height);
private:
int flushCoder(AVFormatContext *fmt_ctx, unsigned int stream_index);
int forceYUV420P(uint8_t *src, int size, enum AVPixelFormat format, uint8_t ***dst,int *s);
};

View File

@ -0,0 +1,41 @@
/**
* Simplest Librtmp Send 264
*
*
* leixiaohua1020@126.com
* zhanghuicuc@gmail.com
* /
* Communication University of China / Digital TV Technology
* http://blog.csdn.net/leixiaohua1020
*
* H.264RTMP
*
*/
/**
*
*
* @param url webapp
*
* @1 , 0
*/
int RTMP264_Connect(const char* url);
/**
* H.264RTMP
*
* @param read_buffer
* 2
* uint8_t *buf
* int buf_size
*
* @1 , 0
*/
int RTMP264_Send(int (*read_buffer)(unsigned char *buf, int buf_size));
/**
*
*
*/
void RTMP264_Close();

View File

@ -0,0 +1,247 @@
/**
* Simplest Librtmp Send 264
*
*
* leixiaohua1020@126.com
* zhanghuicuc@gmail.com
* /
* Communication University of China / Digital TV Technology
* http://blog.csdn.net/leixiaohua1020
*
* H.264RTMP
*
*/
#include "sps_decode.h"
typedef unsigned int UINT;
typedef unsigned char BYTE;
typedef unsigned long DWORD;
UINT Ue(BYTE *pBuff, UINT nLen, UINT &nStartBit)
{
//计算0bit的个数
UINT nZeroNum = 0;
while (nStartBit < nLen * 8)
{
if (pBuff[nStartBit / 8] & (0x80 >> (nStartBit % 8))) //&:按位与,%取余
{
break;
}
nZeroNum++;
nStartBit++;
}
nStartBit ++;
//计算结果
DWORD dwRet = 0;
for (UINT i=0; i<nZeroNum; i++)
{
dwRet <<= 1;
if (pBuff[nStartBit / 8] & (0x80 >> (nStartBit % 8)))
{
dwRet += 1;
}
nStartBit++;
}
return (1 << nZeroNum) - 1 + dwRet;
}
int Se(BYTE *pBuff, UINT nLen, UINT &nStartBit)
{
int UeVal=Ue(pBuff,nLen,nStartBit);
double k=UeVal;
int nValue=ceil(k/2);
//ceil函数ceil函数的作用是求不小于给定实数的最小整数。ceil(2)=ceil(1.2)=cei(1.5)=2.00
if (UeVal % 2==0)
nValue=-nValue;
return nValue;
}
DWORD u(UINT BitCount,BYTE * buf,UINT &nStartBit)
{
DWORD dwRet = 0;
for (UINT i=0; i<BitCount; i++)
{
dwRet <<= 1;
if (buf[nStartBit / 8] & (0x80 >> (nStartBit % 8)))
{
dwRet += 1;
}
nStartBit++;
}
return dwRet;
}
/**
* H264NAL
*
* @param buf SPS
*
* @
*/
void de_emulation_prevention(BYTE* buf,unsigned int* buf_size)
{
int i=0,j=0;
BYTE* tmp_ptr = nullptr;
unsigned int tmp_buf_size=0;
int val=0;
tmp_ptr=buf;
tmp_buf_size=*buf_size;
for(i=0;i<(tmp_buf_size-2);i++)
{
//check for 0x000003
val=(tmp_ptr[i]^0x00) +(tmp_ptr[i+1]^0x00)+(tmp_ptr[i+2]^0x03);
if(val==0)
{
//kick out 0x03
for(j=i+2;j<tmp_buf_size-1;j++)
tmp_ptr[j]=tmp_ptr[j+1];
//and so we should devrease bufsize
(*buf_size)--;
}
}
return;
}
/**
* SPS,
*
* @param buf SPS
* @param nLen SPS
* @param width
* @param height
* @1 , 0
*/
int h264_decode_sps(BYTE * buf,unsigned int nLen,int &width,int &height,int &fps)
{
UINT StartBit=0;
fps=0;
de_emulation_prevention(buf,&nLen);
int forbidden_zero_bit = u(1,buf,StartBit);
int nal_ref_idc = u(2,buf,StartBit);
int nal_unit_type = u(5,buf,StartBit);
if(nal_unit_type==7)
{
int profile_idc=u(8,buf,StartBit);
int constraint_set0_flag=u(1,buf,StartBit);//(buf[1] & 0x80)>>7;
int constraint_set1_flag=u(1,buf,StartBit);//(buf[1] & 0x40)>>6;
int constraint_set2_flag=u(1,buf,StartBit);//(buf[1] & 0x20)>>5;
int constraint_set3_flag=u(1,buf,StartBit);//(buf[1] & 0x10)>>4;
int reserved_zero_4bits=u(4,buf,StartBit);
int level_idc=u(8,buf,StartBit);
int seq_parameter_set_id=Ue(buf,nLen,StartBit);
if( profile_idc == 100 || profile_idc == 110 ||
profile_idc == 122 || profile_idc == 144 )
{
int chroma_format_idc=Ue(buf,nLen,StartBit);
if( chroma_format_idc == 3 )
int residual_colour_transform_flag=u(1,buf,StartBit);
int bit_depth_luma_minus8=Ue(buf,nLen,StartBit);
int bit_depth_chroma_minus8=Ue(buf,nLen,StartBit);
int qpprime_y_zero_transform_bypass_flag=u(1,buf,StartBit);
int seq_scaling_matrix_present_flag=u(1,buf,StartBit);
int seq_scaling_list_present_flag[8];
if( seq_scaling_matrix_present_flag )
{
for( int i = 0; i < 8; i++ ) {
seq_scaling_list_present_flag[i]=u(1,buf,StartBit);
}
}
}
int log2_max_frame_num_minus4=Ue(buf,nLen,StartBit);
int pic_order_cnt_type=Ue(buf,nLen,StartBit);
if( pic_order_cnt_type == 0 )
int log2_max_pic_order_cnt_lsb_minus4=Ue(buf,nLen,StartBit);
else if( pic_order_cnt_type == 1 )
{
int delta_pic_order_always_zero_flag=u(1,buf,StartBit);
int offset_for_non_ref_pic=Se(buf,nLen,StartBit);
int offset_for_top_to_bottom_field=Se(buf,nLen,StartBit);
int num_ref_frames_in_pic_order_cnt_cycle=Ue(buf,nLen,StartBit);
int *offset_for_ref_frame=new int[num_ref_frames_in_pic_order_cnt_cycle];
for( int i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++ )
offset_for_ref_frame[i]=Se(buf,nLen,StartBit);
delete [] offset_for_ref_frame;
}
int num_ref_frames=Ue(buf,nLen,StartBit);
int gaps_in_frame_num_value_allowed_flag=u(1,buf,StartBit);
int pic_width_in_mbs_minus1=Ue(buf,nLen,StartBit);
int pic_height_in_map_units_minus1=Ue(buf,nLen,StartBit);
width=(pic_width_in_mbs_minus1+1)*16;
height=(pic_height_in_map_units_minus1+1)*16;
int frame_mbs_only_flag=u(1,buf,StartBit);
if(!frame_mbs_only_flag)
int mb_adaptive_frame_field_flag=u(1,buf,StartBit);
int direct_8x8_inference_flag=u(1,buf,StartBit);
int frame_cropping_flag=u(1,buf,StartBit);
if(frame_cropping_flag)
{
int frame_crop_left_offset=Ue(buf,nLen,StartBit);
int frame_crop_right_offset=Ue(buf,nLen,StartBit);
int frame_crop_top_offset=Ue(buf,nLen,StartBit);
int frame_crop_bottom_offset=Ue(buf,nLen,StartBit);
}
int vui_parameter_present_flag=u(1,buf,StartBit);
if(vui_parameter_present_flag)
{
int aspect_ratio_info_present_flag=u(1,buf,StartBit);
if(aspect_ratio_info_present_flag)
{
int aspect_ratio_idc=u(8,buf,StartBit);
if(aspect_ratio_idc==255)
{
int sar_width=u(16,buf,StartBit);
int sar_height=u(16,buf,StartBit);
}
}
int overscan_info_present_flag=u(1,buf,StartBit);
if(overscan_info_present_flag)
int overscan_appropriate_flagu=u(1,buf,StartBit);
int video_signal_type_present_flag=u(1,buf,StartBit);
if(video_signal_type_present_flag)
{
int video_format=u(3,buf,StartBit);
int video_full_range_flag=u(1,buf,StartBit);
int colour_description_present_flag=u(1,buf,StartBit);
if(colour_description_present_flag)
{
int colour_primaries=u(8,buf,StartBit);
int transfer_characteristics=u(8,buf,StartBit);
int matrix_coefficients=u(8,buf,StartBit);
}
}
int chroma_loc_info_present_flag=u(1,buf,StartBit);
if(chroma_loc_info_present_flag)
{
int chroma_sample_loc_type_top_field=Ue(buf,nLen,StartBit);
int chroma_sample_loc_type_bottom_field=Ue(buf,nLen,StartBit);
}
int timing_info_present_flag=u(1,buf,StartBit);
if(timing_info_present_flag)
{
int num_units_in_tick=u(32,buf,StartBit);
int time_scale=u(32,buf,StartBit);
fps=time_scale/(2*num_units_in_tick);
}
}
return true;
}
else
return false;
}

View File

@ -0,0 +1,34 @@
#ifndef __SPS_DECODE__
#define __SPS_DECODE__
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <windows.h>
UINT Ue(BYTE *pBuff, UINT nLen, UINT &nStartBit);
int Se(BYTE *pBuff, UINT nLen, UINT &nStartBit);
DWORD u(UINT BitCount, BYTE * buf, UINT &nStartBit);
/**
* H264NAL
*
* @param buf SPS
*
* @
*/
void de_emulation_prevention(BYTE* buf, unsigned int* buf_size);
/**
* SPS,
*
* @param buf SPS
* @param nLen SPS
* @param width
* @param height
* @1 , 0
*/
int h264_decode_sps(BYTE * buf, unsigned int nLen, int &width, int &height, int &fps);
#endif

View File

@ -0,0 +1,106 @@
/*
falab - free algorithm lab
Copyright (C) 2012 luolongzhi (Chengdu, China)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
filename: fa_aacapi.h
version : v1.0.0
time : 2012/11/24 17:58
author : luolongzhi ( falab2012@gmail.com luolongzhi@gmail.com )
code URL: http://code.google.com/p/falab/
*/
#ifndef _FA_AACAPI_H
#define _FA_AACAPI_H
#include "fa_inttypes.h"
#ifdef __cplusplus
extern "C"
{
#endif
//typedef unsigned uintptr_t;
#define FA_AACENC_MPEG_VER_DEF 1
#define FA_AACENC_OBJ_TYPE_DEF 2
/*
For you attention:
1.sample rate only support 32, 44.1, 48kHz(I think enough, high is no useless, low sample rate sound terrible),
if you want to support more sample rate, complete fa_swbtab and add code in fa_aacenc_init by yourself, very easy
2.vbr with quality control (0.1 ~ 1.0), vbr mode is recommend for it adjust bitrate according to the frame feature;
cbr(roughtly cbr, because if want have accurate bit rate can cost more bitrate control loop and will degrade the
speed performance), cbr support 16~160 per channel, means that if encode stereo audio, can support (32 ~320kbps)
default settings is: -q 0.7, band width is 17kHz, roughtly bitrate is 110 ~ 150kbps according to the audio sample feature
if you want the best quality and wide band width, use -q 1, the band width extend to 22kHz and encoding whole
frequency line using psychmodel
you can download the EBU test audio file frome the web site below to do sound quality test:
http://tech.ebu.ch/public-cations/sqamcd
3.64 chn can support, lfe support (need you test, lfe I didn't test more)
4.now only support mpeg_version 2(mpeg2aac), sbr and ps not support now, is developing
MPEG2 1
MPEG4 0
and only ADTS format support
5.aac_objtype only support MAIN and LC, and ltp of MAIN can not support(useless, very slow and influece quality little) , SSR is not support
MAIN 1
LOW 2
SSR 3
LTP 4
6.ms encode is support, but if you use fast quantize method(according to you speed_level choice), I close ms encode
7.band_width you can change by yourself using option -w
8.I give 6 speed level(1~6), you can choose according to your application , 1 is the lowest but very good quality, 6 is fatest but low quality,
default is 1(strongly recommend),
I think level 3 is a good choice if you want fast encoding, the speed is more than 2 times compare to the speed_level 1.
9.Summary----
Want Best quality (default) : use -l 1 (best quality, very good sound, smoothly and stable)
Want Normal quality and fast speed: use -l 3 (it is good choise, normal quality and speed you can tolerant)
Want Fast speed : use -l 5 (if is rock music, can not hear abnormal; but if is quiet or piano audio, maybe not good)
*/
uintptr_t fa_aacenc_init(int sample_rate, int bit_rate, int chn_num, float quality, int vbr_flag,
int mpeg_version, int aac_objtype, int lfe_enable,
float band_width,
int speed_level,
int time_resolution_first);
void fa_aacenc_uninit(uintptr_t handle);
//WARN: the inlen must be 1024*chn_num*2 (2 means 2 bytes for per sample, so your sample should be 16 bits short type),
void fa_aacenc_encode(uintptr_t handle, unsigned char *buf_in, int inlen, unsigned char *buf_out, int *outlen);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,159 @@
/*
falab - free algorithm lab
Copyright (C) 2012 luolongzhi (Chengdu, China)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
filename: fa_inttypes.h
version : v1.0.0
time : 2012/07/15 14:14
author : luolongzhi ( falab2012@gmail.com luolongzhi@gmail.com )
code URL: http://code.google.com/p/falab/
*/
#ifndef _FA_INTTYPES_H
#define _FA_INTTYPES_H
#ifdef __cplusplus
extern "C"
{
#endif
typedef char * caddr_t;
#ifdef WIN32
//typedef char int8_t;
typedef unsigned __int64 u_int64_t;
#else
typedef long long __int64;
typedef unsigned long long u_int64_t;
#endif
typedef unsigned char u_int8_t;
typedef short int16_t;
typedef unsigned short u_int16_t;
typedef int int32_t;
typedef unsigned int u_int32_t;
typedef __int64 int64_t;
typedef int32_t register_t;
typedef u_int8_t uint8_t;
typedef u_int16_t uint16_t;
typedef u_int32_t uint32_t;
typedef u_int64_t uint64_t;
#ifdef __GNUC__
#ifndef INT64_MAX
#define INT64_MAX 9223372036854775807LL
#endif
#else
#define INT64_MAX 9223372036854775807i64
#endif
#ifdef __GNUC__
#ifndef INT64_MIN
#define INT64_MIN (-9223372036854775807LL - 1)
#endif
#else
#define INT64_MIN (-9223372036854775807i64 - 1)
#endif
#ifndef INT8_MAX
#define INT8_MAX 127
#endif
#ifndef INT8_MIN
#define INT8_MIN (-127 - 1)
#endif
#ifndef INT16_MAX
#define INT16_MAX 32767
#endif
#ifndef INT16_MIN
#define INT16_MIN (-32767 - 1)
#endif
#ifndef INT32_MAX
#define INT32_MAX 2147483647
#endif
#ifndef INT32_MIN
#define INT32_MIN (-2147483647 - 1)
#endif
#ifndef UINT8_MAX
#define UINT8_MAX 0xff /* 255U */
#endif
#ifndef UINT16_MAX
#define UINT16_MAX 0xffff /* 65535U */
#endif
#ifndef UINT32_MAX
#define UINT32_MAX 0xffffffff /* 4294967295U */
#endif
#ifdef __GNUC__
#ifndef UINT64_MAX
#define UINT64_MAX 0xffffffffffffffffULL /* 18446744073709551615ULL */
#endif
#else
#define UINT64_MAX 0xffffffffffffffffui64 /* 18446744073709551615ULL */
#endif
typedef int intptr_t;
typedef unsigned uintptr_t;
#ifndef INT64_C
# if defined(__GNUC__)
# define INT64_C(c) (c ## LL)
# define UINT64_C(c) (c ## ULL)
# else
# define INT64_C(c) (c ## i64)
# define UINT64_C(c) (c ## ui64)
# endif
#endif
#ifndef offsetof
# define offsetof(T, F) ((unsigned int)((char *)&((T *)0)->F))
#endif
#define INTMAX_MIN (-INT64_C(9223372036854775807)-1)
#define INTMAX_MAX (INT64_C(9223372036854775807))
#define UINTMAX_MAX (UINT64_C(18446744073709551615))
#ifndef WIN32
#define BYTE unsigned char
#endif
//other function compartiable define
#ifdef WIN32
#define inline __inline
#define snprintf _snprintf
#define vsnprintf _vsnprintf
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,36 @@
/*
* AC-3 parser prototypes
* Copyright (c) 2003 Fabrice Bellard
* Copyright (c) 2003 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_AC3_PARSER_H
#define AVCODEC_AC3_PARSER_H
#include <stddef.h>
#include <stdint.h>
/**
* Extract the bitstream ID and the frame size from AC-3 data.
*/
int av_ac3_parse_header(const uint8_t *buf, size_t size,
uint8_t *bitstream_id, uint16_t *frame_size);
#endif /* AVCODEC_AC3_PARSER_H */

View File

@ -0,0 +1,37 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_ADTS_PARSER_H
#define AVCODEC_ADTS_PARSER_H
#include <stddef.h>
#include <stdint.h>
#define AV_AAC_ADTS_HEADER_SIZE 7
/**
* Extract the number of samples and frames from AAC data.
* @param[in] buf pointer to AAC data buffer
* @param[out] samples Pointer to where number of samples is written
* @param[out] frames Pointer to where number of frames is written
* @return Returns 0 on success, error code on failure.
*/
int av_adts_header_parse(const uint8_t *buf, uint32_t *samples,
uint8_t *frames);
#endif /* AVCODEC_ADTS_PARSER_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,84 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_AVDCT_H
#define AVCODEC_AVDCT_H
#include "libavutil/opt.h"
/**
* AVDCT context.
* @note function pointers can be NULL if the specific features have been
* disabled at build time.
*/
typedef struct AVDCT {
const AVClass *av_class;
void (*idct)(int16_t *block /* align 16 */);
/**
* IDCT input permutation.
* Several optimized IDCTs need a permutated input (relative to the
* normal order of the reference IDCT).
* This permutation must be performed before the idct_put/add.
* Note, normally this can be merged with the zigzag/alternate scan<br>
* An example to avoid confusion:
* - (->decode coeffs -> zigzag reorder -> dequant -> reference IDCT -> ...)
* - (x -> reference DCT -> reference IDCT -> x)
* - (x -> reference DCT -> simple_mmx_perm = idct_permutation
* -> simple_idct_mmx -> x)
* - (-> decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant
* -> simple_idct_mmx -> ...)
*/
uint8_t idct_permutation[64];
void (*fdct)(int16_t *block /* align 16 */);
/**
* DCT algorithm.
* must use AVOptions to set this field.
*/
int dct_algo;
/**
* IDCT algorithm.
* must use AVOptions to set this field.
*/
int idct_algo;
void (*get_pixels)(int16_t *block /* align 16 */,
const uint8_t *pixels /* align 8 */,
ptrdiff_t line_size);
int bits_per_sample;
} AVDCT;
/**
* Allocates a AVDCT context.
* This needs to be initialized with avcodec_dct_init() after optionally
* configuring it with AVOptions.
*
* To free it use av_free()
*/
AVDCT *avcodec_dct_alloc(void);
int avcodec_dct_init(AVDCT *);
const AVClass *avcodec_dct_get_class(void);
#endif /* AVCODEC_AVDCT_H */

View File

@ -0,0 +1,118 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_AVFFT_H
#define AVCODEC_AVFFT_H
/**
* @file
* @ingroup lavc_fft
* FFT functions
*/
/**
* @defgroup lavc_fft FFT functions
* @ingroup lavc_misc
*
* @{
*/
typedef float FFTSample;
typedef struct FFTComplex {
FFTSample re, im;
} FFTComplex;
typedef struct FFTContext FFTContext;
/**
* Set up a complex FFT.
* @param nbits log2 of the length of the input array
* @param inverse if 0 perform the forward transform, if 1 perform the inverse
*/
FFTContext *av_fft_init(int nbits, int inverse);
/**
* Do the permutation needed BEFORE calling ff_fft_calc().
*/
void av_fft_permute(FFTContext *s, FFTComplex *z);
/**
* Do a complex FFT with the parameters defined in av_fft_init(). The
* input data must be permuted before. No 1.0/sqrt(n) normalization is done.
*/
void av_fft_calc(FFTContext *s, FFTComplex *z);
void av_fft_end(FFTContext *s);
FFTContext *av_mdct_init(int nbits, int inverse, double scale);
void av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);
void av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input);
void av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);
void av_mdct_end(FFTContext *s);
/* Real Discrete Fourier Transform */
enum RDFTransformType {
DFT_R2C,
IDFT_C2R,
IDFT_R2C,
DFT_C2R,
};
typedef struct RDFTContext RDFTContext;
/**
* Set up a real FFT.
* @param nbits log2 of the length of the input array
* @param trans the type of transform
*/
RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans);
void av_rdft_calc(RDFTContext *s, FFTSample *data);
void av_rdft_end(RDFTContext *s);
/* Discrete Cosine Transform */
typedef struct DCTContext DCTContext;
enum DCTTransformType {
DCT_II = 0,
DCT_III,
DCT_I,
DST_I,
};
/**
* Set up DCT.
*
* @param nbits size of the input array:
* (1 << nbits) for DCT-II, DCT-III and DST-I
* (1 << nbits) + 1 for DCT-I
* @param type the type of transform
*
* @note the first element of the input of DST-I is ignored
*/
DCTContext *av_dct_init(int nbits, enum DCTTransformType type);
void av_dct_calc(DCTContext *s, FFTSample *data);
void av_dct_end (DCTContext *s);
/**
* @}
*/
#endif /* AVCODEC_AVFFT_H */

View File

@ -0,0 +1,112 @@
/*
* Direct3D11 HW acceleration
*
* copyright (c) 2009 Laurent Aimar
* copyright (c) 2015 Steve Lhomme
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_D3D11VA_H
#define AVCODEC_D3D11VA_H
/**
* @file
* @ingroup lavc_codec_hwaccel_d3d11va
* Public libavcodec D3D11VA header.
*/
#if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602
#undef _WIN32_WINNT
#define _WIN32_WINNT 0x0602
#endif
#include <stdint.h>
#include <d3d11.h>
/**
* @defgroup lavc_codec_hwaccel_d3d11va Direct3D11
* @ingroup lavc_codec_hwaccel
*
* @{
*/
#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for Direct3D11 and old UVD/UVD+ ATI video cards
#define FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO 2 ///< Work around for Direct3D11 and old Intel GPUs with ClearVideo interface
/**
* This structure is used to provides the necessary configurations and data
* to the Direct3D11 FFmpeg HWAccel implementation.
*
* The application must make it available as AVCodecContext.hwaccel_context.
*
* Use av_d3d11va_alloc_context() exclusively to allocate an AVD3D11VAContext.
*/
typedef struct AVD3D11VAContext {
/**
* D3D11 decoder object
*/
ID3D11VideoDecoder *decoder;
/**
* D3D11 VideoContext
*/
ID3D11VideoContext *video_context;
/**
* D3D11 configuration used to create the decoder
*/
D3D11_VIDEO_DECODER_CONFIG *cfg;
/**
* The number of surface in the surface array
*/
unsigned surface_count;
/**
* The array of Direct3D surfaces used to create the decoder
*/
ID3D11VideoDecoderOutputView **surface;
/**
* A bit field configuring the workarounds needed for using the decoder
*/
uint64_t workaround;
/**
* Private to the FFmpeg AVHWAccel implementation
*/
unsigned report_id;
/**
* Mutex to access video_context
*/
HANDLE context_mutex;
} AVD3D11VAContext;
/**
* Allocate an AVD3D11VAContext.
*
* @return Newly-allocated AVD3D11VAContext or NULL on failure.
*/
AVD3D11VAContext *av_d3d11va_alloc_context(void);
/**
* @}
*/
#endif /* AVCODEC_D3D11VA_H */

View File

@ -0,0 +1,131 @@
/*
* Copyright (C) 2007 Marco Gerards <marco@gnu.org>
* Copyright (C) 2009 David Conrad
* Copyright (C) 2011 Jordi Ortiz
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_DIRAC_H
#define AVCODEC_DIRAC_H
/**
* @file
* Interface to Dirac Decoder/Encoder
* @author Marco Gerards <marco@gnu.org>
* @author David Conrad
* @author Jordi Ortiz
*/
#include "avcodec.h"
/**
* The spec limits the number of wavelet decompositions to 4 for both
* level 1 (VC-2) and 128 (long-gop default).
* 5 decompositions is the maximum before >16-bit buffers are needed.
* Schroedinger allows this for DD 9,7 and 13,7 wavelets only, limiting
* the others to 4 decompositions (or 3 for the fidelity filter).
*
* We use this instead of MAX_DECOMPOSITIONS to save some memory.
*/
#define MAX_DWT_LEVELS 5
/**
* Parse code values:
*
* Dirac Specification ->
* 9.6.1 Table 9.1
*
* VC-2 Specification ->
* 10.4.1 Table 10.1
*/
enum DiracParseCodes {
DIRAC_PCODE_SEQ_HEADER = 0x00,
DIRAC_PCODE_END_SEQ = 0x10,
DIRAC_PCODE_AUX = 0x20,
DIRAC_PCODE_PAD = 0x30,
DIRAC_PCODE_PICTURE_CODED = 0x08,
DIRAC_PCODE_PICTURE_RAW = 0x48,
DIRAC_PCODE_PICTURE_LOW_DEL = 0xC8,
DIRAC_PCODE_PICTURE_HQ = 0xE8,
DIRAC_PCODE_INTER_NOREF_CO1 = 0x0A,
DIRAC_PCODE_INTER_NOREF_CO2 = 0x09,
DIRAC_PCODE_INTER_REF_CO1 = 0x0D,
DIRAC_PCODE_INTER_REF_CO2 = 0x0E,
DIRAC_PCODE_INTRA_REF_CO = 0x0C,
DIRAC_PCODE_INTRA_REF_RAW = 0x4C,
DIRAC_PCODE_INTRA_REF_PICT = 0xCC,
DIRAC_PCODE_MAGIC = 0x42424344,
};
typedef struct DiracVersionInfo {
int major;
int minor;
} DiracVersionInfo;
typedef struct AVDiracSeqHeader {
unsigned width;
unsigned height;
uint8_t chroma_format; ///< 0: 444 1: 422 2: 420
uint8_t interlaced;
uint8_t top_field_first;
uint8_t frame_rate_index; ///< index into dirac_frame_rate[]
uint8_t aspect_ratio_index; ///< index into dirac_aspect_ratio[]
uint16_t clean_width;
uint16_t clean_height;
uint16_t clean_left_offset;
uint16_t clean_right_offset;
uint8_t pixel_range_index; ///< index into dirac_pixel_range_presets[]
uint8_t color_spec_index; ///< index into dirac_color_spec_presets[]
int profile;
int level;
AVRational framerate;
AVRational sample_aspect_ratio;
enum AVPixelFormat pix_fmt;
enum AVColorRange color_range;
enum AVColorPrimaries color_primaries;
enum AVColorTransferCharacteristic color_trc;
enum AVColorSpace colorspace;
DiracVersionInfo version;
int bit_depth;
} AVDiracSeqHeader;
/**
* Parse a Dirac sequence header.
*
* @param dsh this function will allocate and fill an AVDiracSeqHeader struct
* and write it into this pointer. The caller must free it with
* av_free().
* @param buf the data buffer
* @param buf_size the size of the data buffer in bytes
* @param log_ctx if non-NULL, this function will log errors here
* @return 0 on success, a negative AVERROR code on failure
*/
int av_dirac_parse_sequence_header(AVDiracSeqHeader **dsh,
const uint8_t *buf, size_t buf_size,
void *log_ctx);
#endif /* AVCODEC_DIRAC_H */

View File

@ -0,0 +1,83 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_DV_PROFILE_H
#define AVCODEC_DV_PROFILE_H
#include <stdint.h>
#include "libavutil/pixfmt.h"
#include "libavutil/rational.h"
#include "avcodec.h"
/* minimum number of bytes to read from a DV stream in order to
* determine the profile */
#define DV_PROFILE_BYTES (6 * 80) /* 6 DIF blocks */
/*
* AVDVProfile is used to express the differences between various
* DV flavors. For now it's primarily used for differentiating
* 525/60 and 625/50, but the plans are to use it for various
* DV specs as well (e.g. SMPTE314M vs. IEC 61834).
*/
typedef struct AVDVProfile {
int dsf; /* value of the dsf in the DV header */
int video_stype; /* stype for VAUX source pack */
int frame_size; /* total size of one frame in bytes */
int difseg_size; /* number of DIF segments per DIF channel */
int n_difchan; /* number of DIF channels per frame */
AVRational time_base; /* 1/framerate */
int ltc_divisor; /* FPS from the LTS standpoint */
int height; /* picture height in pixels */
int width; /* picture width in pixels */
AVRational sar[2]; /* sample aspect ratios for 4:3 and 16:9 */
enum AVPixelFormat pix_fmt; /* picture pixel format */
int bpm; /* blocks per macroblock */
const uint8_t *block_sizes; /* AC block sizes, in bits */
int audio_stride; /* size of audio_shuffle table */
int audio_min_samples[3]; /* min amount of audio samples */
/* for 48kHz, 44.1kHz and 32kHz */
int audio_samples_dist[5]; /* how many samples are supposed to be */
/* in each frame in a 5 frames window */
const uint8_t (*audio_shuffle)[9]; /* PCM shuffling table */
} AVDVProfile;
/**
* Get a DV profile for the provided compressed frame.
*
* @param sys the profile used for the previous frame, may be NULL
* @param frame the compressed data buffer
* @param buf_size size of the buffer in bytes
* @return the DV profile for the supplied data or NULL on failure
*/
const AVDVProfile *av_dv_frame_profile(const AVDVProfile *sys,
const uint8_t *frame, unsigned buf_size);
/**
* Get a DV profile for the provided stream parameters.
*/
const AVDVProfile *av_dv_codec_profile(int width, int height, enum AVPixelFormat pix_fmt);
/**
* Get a DV profile for the provided stream parameters.
* The frame rate is used as a best-effort parameter.
*/
const AVDVProfile *av_dv_codec_profile2(int width, int height, enum AVPixelFormat pix_fmt, AVRational frame_rate);
#endif /* AVCODEC_DV_PROFILE_H */

View File

@ -0,0 +1,93 @@
/*
* DXVA2 HW acceleration
*
* copyright (c) 2009 Laurent Aimar
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_DXVA2_H
#define AVCODEC_DXVA2_H
/**
* @file
* @ingroup lavc_codec_hwaccel_dxva2
* Public libavcodec DXVA2 header.
*/
#if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602
#undef _WIN32_WINNT
#define _WIN32_WINNT 0x0602
#endif
#include <stdint.h>
#include <d3d9.h>
#include <dxva2api.h>
/**
* @defgroup lavc_codec_hwaccel_dxva2 DXVA2
* @ingroup lavc_codec_hwaccel
*
* @{
*/
#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards
#define FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO 2 ///< Work around for DXVA2 and old Intel GPUs with ClearVideo interface
/**
* This structure is used to provides the necessary configurations and data
* to the DXVA2 FFmpeg HWAccel implementation.
*
* The application must make it available as AVCodecContext.hwaccel_context.
*/
struct dxva_context {
/**
* DXVA2 decoder object
*/
IDirectXVideoDecoder *decoder;
/**
* DXVA2 configuration used to create the decoder
*/
const DXVA2_ConfigPictureDecode *cfg;
/**
* The number of surface in the surface array
*/
unsigned surface_count;
/**
* The array of Direct3D surfaces used to create the decoder
*/
LPDIRECT3DSURFACE9 *surface;
/**
* A bit field configuring the workarounds needed for using the decoder
*/
uint64_t workaround;
/**
* Private to the FFmpeg AVHWAccel implementation
*/
unsigned report_id;
};
/**
* @}
*/
#endif /* AVCODEC_DXVA2_H */

View File

@ -0,0 +1,46 @@
/*
* JNI public API functions
*
* Copyright (c) 2015-2016 Matthieu Bouron <matthieu.bouron stupeflix.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_JNI_H
#define AVCODEC_JNI_H
/*
* Manually set a Java virtual machine which will be used to retrieve the JNI
* environment. Once a Java VM is set it cannot be changed afterwards, meaning
* you can call multiple times av_jni_set_java_vm with the same Java VM pointer
* however it will error out if you try to set a different Java VM.
*
* @param vm Java virtual machine
* @param log_ctx context used for logging, can be NULL
* @return 0 on success, < 0 otherwise
*/
int av_jni_set_java_vm(void *vm, void *log_ctx);
/*
* Get the Java virtual machine which has been set with av_jni_set_java_vm.
*
* @param vm Java virtual machine
* @return a pointer to the Java virtual machine
*/
void *av_jni_get_java_vm(void *log_ctx);
#endif /* AVCODEC_JNI_H */

View File

@ -0,0 +1,101 @@
/*
* Android MediaCodec public API
*
* Copyright (c) 2016 Matthieu Bouron <matthieu.bouron stupeflix.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_MEDIACODEC_H
#define AVCODEC_MEDIACODEC_H
#include "libavcodec/avcodec.h"
/**
* This structure holds a reference to a android/view/Surface object that will
* be used as output by the decoder.
*
*/
typedef struct AVMediaCodecContext {
/**
* android/view/Surface object reference.
*/
void *surface;
} AVMediaCodecContext;
/**
* Allocate and initialize a MediaCodec context.
*
* When decoding with MediaCodec is finished, the caller must free the
* MediaCodec context with av_mediacodec_default_free.
*
* @return a pointer to a newly allocated AVMediaCodecContext on success, NULL otherwise
*/
AVMediaCodecContext *av_mediacodec_alloc_context(void);
/**
* Convenience function that sets up the MediaCodec context.
*
* @param avctx codec context
* @param ctx MediaCodec context to initialize
* @param surface reference to an android/view/Surface
* @return 0 on success, < 0 otherwise
*/
int av_mediacodec_default_init(AVCodecContext *avctx, AVMediaCodecContext *ctx, void *surface);
/**
* This function must be called to free the MediaCodec context initialized with
* av_mediacodec_default_init().
*
* @param avctx codec context
*/
void av_mediacodec_default_free(AVCodecContext *avctx);
/**
* Opaque structure representing a MediaCodec buffer to render.
*/
typedef struct MediaCodecBuffer AVMediaCodecBuffer;
/**
* Release a MediaCodec buffer and render it to the surface that is associated
* with the decoder. This function should only be called once on a given
* buffer, once released the underlying buffer returns to the codec, thus
* subsequent calls to this function will have no effect.
*
* @param buffer the buffer to render
* @param render 1 to release and render the buffer to the surface or 0 to
* discard the buffer
* @return 0 on success, < 0 otherwise
*/
int av_mediacodec_release_buffer(AVMediaCodecBuffer *buffer, int render);
/**
* Release a MediaCodec buffer and render it at the given time to the surface
* that is associated with the decoder. The timestamp must be within one second
* of the current java/lang/System#nanoTime() (which is implemented using
* CLOCK_MONOTONIC on Android). See the Android MediaCodec documentation
* of android/media/MediaCodec#releaseOutputBuffer(int,long) for more details.
*
* @param buffer the buffer to render
* @param time timestamp in nanoseconds of when to render the buffer
* @return 0 on success, < 0 otherwise
*/
int av_mediacodec_render_buffer_at_time(AVMediaCodecBuffer *buffer, int64_t time);
#endif /* AVCODEC_MEDIACODEC_H */

View File

@ -0,0 +1,397 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_OLD_CODEC_IDS_H
#define AVCODEC_OLD_CODEC_IDS_H
/*
* This header exists to prevent new codec IDs from being accidentally added to
* the deprecated list.
* Do not include it directly. It will be removed on next major bump
*
* Do not add new items to this list. Use the AVCodecID enum instead.
*/
CODEC_ID_NONE = AV_CODEC_ID_NONE,
/* video codecs */
CODEC_ID_MPEG1VIDEO,
CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
#if FF_API_XVMC
CODEC_ID_MPEG2VIDEO_XVMC,
#endif
CODEC_ID_H261,
CODEC_ID_H263,
CODEC_ID_RV10,
CODEC_ID_RV20,
CODEC_ID_MJPEG,
CODEC_ID_MJPEGB,
CODEC_ID_LJPEG,
CODEC_ID_SP5X,
CODEC_ID_JPEGLS,
CODEC_ID_MPEG4,
CODEC_ID_RAWVIDEO,
CODEC_ID_MSMPEG4V1,
CODEC_ID_MSMPEG4V2,
CODEC_ID_MSMPEG4V3,
CODEC_ID_WMV1,
CODEC_ID_WMV2,
CODEC_ID_H263P,
CODEC_ID_H263I,
CODEC_ID_FLV1,
CODEC_ID_SVQ1,
CODEC_ID_SVQ3,
CODEC_ID_DVVIDEO,
CODEC_ID_HUFFYUV,
CODEC_ID_CYUV,
CODEC_ID_H264,
CODEC_ID_INDEO3,
CODEC_ID_VP3,
CODEC_ID_THEORA,
CODEC_ID_ASV1,
CODEC_ID_ASV2,
CODEC_ID_FFV1,
CODEC_ID_4XM,
CODEC_ID_VCR1,
CODEC_ID_CLJR,
CODEC_ID_MDEC,
CODEC_ID_ROQ,
CODEC_ID_INTERPLAY_VIDEO,
CODEC_ID_XAN_WC3,
CODEC_ID_XAN_WC4,
CODEC_ID_RPZA,
CODEC_ID_CINEPAK,
CODEC_ID_WS_VQA,
CODEC_ID_MSRLE,
CODEC_ID_MSVIDEO1,
CODEC_ID_IDCIN,
CODEC_ID_8BPS,
CODEC_ID_SMC,
CODEC_ID_FLIC,
CODEC_ID_TRUEMOTION1,
CODEC_ID_VMDVIDEO,
CODEC_ID_MSZH,
CODEC_ID_ZLIB,
CODEC_ID_QTRLE,
CODEC_ID_TSCC,
CODEC_ID_ULTI,
CODEC_ID_QDRAW,
CODEC_ID_VIXL,
CODEC_ID_QPEG,
CODEC_ID_PNG,
CODEC_ID_PPM,
CODEC_ID_PBM,
CODEC_ID_PGM,
CODEC_ID_PGMYUV,
CODEC_ID_PAM,
CODEC_ID_FFVHUFF,
CODEC_ID_RV30,
CODEC_ID_RV40,
CODEC_ID_VC1,
CODEC_ID_WMV3,
CODEC_ID_LOCO,
CODEC_ID_WNV1,
CODEC_ID_AASC,
CODEC_ID_INDEO2,
CODEC_ID_FRAPS,
CODEC_ID_TRUEMOTION2,
CODEC_ID_BMP,
CODEC_ID_CSCD,
CODEC_ID_MMVIDEO,
CODEC_ID_ZMBV,
CODEC_ID_AVS,
CODEC_ID_SMACKVIDEO,
CODEC_ID_NUV,
CODEC_ID_KMVC,
CODEC_ID_FLASHSV,
CODEC_ID_CAVS,
CODEC_ID_JPEG2000,
CODEC_ID_VMNC,
CODEC_ID_VP5,
CODEC_ID_VP6,
CODEC_ID_VP6F,
CODEC_ID_TARGA,
CODEC_ID_DSICINVIDEO,
CODEC_ID_TIERTEXSEQVIDEO,
CODEC_ID_TIFF,
CODEC_ID_GIF,
CODEC_ID_DXA,
CODEC_ID_DNXHD,
CODEC_ID_THP,
CODEC_ID_SGI,
CODEC_ID_C93,
CODEC_ID_BETHSOFTVID,
CODEC_ID_PTX,
CODEC_ID_TXD,
CODEC_ID_VP6A,
CODEC_ID_AMV,
CODEC_ID_VB,
CODEC_ID_PCX,
CODEC_ID_SUNRAST,
CODEC_ID_INDEO4,
CODEC_ID_INDEO5,
CODEC_ID_MIMIC,
CODEC_ID_RL2,
CODEC_ID_ESCAPE124,
CODEC_ID_DIRAC,
CODEC_ID_BFI,
CODEC_ID_CMV,
CODEC_ID_MOTIONPIXELS,
CODEC_ID_TGV,
CODEC_ID_TGQ,
CODEC_ID_TQI,
CODEC_ID_AURA,
CODEC_ID_AURA2,
CODEC_ID_V210X,
CODEC_ID_TMV,
CODEC_ID_V210,
CODEC_ID_DPX,
CODEC_ID_MAD,
CODEC_ID_FRWU,
CODEC_ID_FLASHSV2,
CODEC_ID_CDGRAPHICS,
CODEC_ID_R210,
CODEC_ID_ANM,
CODEC_ID_BINKVIDEO,
CODEC_ID_IFF_ILBM,
CODEC_ID_IFF_BYTERUN1,
CODEC_ID_KGV1,
CODEC_ID_YOP,
CODEC_ID_VP8,
CODEC_ID_PICTOR,
CODEC_ID_ANSI,
CODEC_ID_A64_MULTI,
CODEC_ID_A64_MULTI5,
CODEC_ID_R10K,
CODEC_ID_MXPEG,
CODEC_ID_LAGARITH,
CODEC_ID_PRORES,
CODEC_ID_JV,
CODEC_ID_DFA,
CODEC_ID_WMV3IMAGE,
CODEC_ID_VC1IMAGE,
CODEC_ID_UTVIDEO,
CODEC_ID_BMV_VIDEO,
CODEC_ID_VBLE,
CODEC_ID_DXTORY,
CODEC_ID_V410,
CODEC_ID_XWD,
CODEC_ID_CDXL,
CODEC_ID_XBM,
CODEC_ID_ZEROCODEC,
CODEC_ID_MSS1,
CODEC_ID_MSA1,
CODEC_ID_TSCC2,
CODEC_ID_MTS2,
CODEC_ID_CLLC,
CODEC_ID_Y41P = MKBETAG('Y','4','1','P'),
CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'),
CODEC_ID_EXR = MKBETAG('0','E','X','R'),
CODEC_ID_AVRP = MKBETAG('A','V','R','P'),
CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'),
CODEC_ID_AVUI = MKBETAG('A','V','U','I'),
CODEC_ID_AYUV = MKBETAG('A','Y','U','V'),
CODEC_ID_V308 = MKBETAG('V','3','0','8'),
CODEC_ID_V408 = MKBETAG('V','4','0','8'),
CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'),
CODEC_ID_SANM = MKBETAG('S','A','N','M'),
CODEC_ID_PAF_VIDEO = MKBETAG('P','A','F','V'),
CODEC_ID_SNOW = AV_CODEC_ID_SNOW,
/* various PCM "codecs" */
CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
CODEC_ID_PCM_S16LE = 0x10000,
CODEC_ID_PCM_S16BE,
CODEC_ID_PCM_U16LE,
CODEC_ID_PCM_U16BE,
CODEC_ID_PCM_S8,
CODEC_ID_PCM_U8,
CODEC_ID_PCM_MULAW,
CODEC_ID_PCM_ALAW,
CODEC_ID_PCM_S32LE,
CODEC_ID_PCM_S32BE,
CODEC_ID_PCM_U32LE,
CODEC_ID_PCM_U32BE,
CODEC_ID_PCM_S24LE,
CODEC_ID_PCM_S24BE,
CODEC_ID_PCM_U24LE,
CODEC_ID_PCM_U24BE,
CODEC_ID_PCM_S24DAUD,
CODEC_ID_PCM_ZORK,
CODEC_ID_PCM_S16LE_PLANAR,
CODEC_ID_PCM_DVD,
CODEC_ID_PCM_F32BE,
CODEC_ID_PCM_F32LE,
CODEC_ID_PCM_F64BE,
CODEC_ID_PCM_F64LE,
CODEC_ID_PCM_BLURAY,
CODEC_ID_PCM_LXF,
CODEC_ID_S302M,
CODEC_ID_PCM_S8_PLANAR,
/* various ADPCM codecs */
CODEC_ID_ADPCM_IMA_QT = 0x11000,
CODEC_ID_ADPCM_IMA_WAV,
CODEC_ID_ADPCM_IMA_DK3,
CODEC_ID_ADPCM_IMA_DK4,
CODEC_ID_ADPCM_IMA_WS,
CODEC_ID_ADPCM_IMA_SMJPEG,
CODEC_ID_ADPCM_MS,
CODEC_ID_ADPCM_4XM,
CODEC_ID_ADPCM_XA,
CODEC_ID_ADPCM_ADX,
CODEC_ID_ADPCM_EA,
CODEC_ID_ADPCM_G726,
CODEC_ID_ADPCM_CT,
CODEC_ID_ADPCM_SWF,
CODEC_ID_ADPCM_YAMAHA,
CODEC_ID_ADPCM_SBPRO_4,
CODEC_ID_ADPCM_SBPRO_3,
CODEC_ID_ADPCM_SBPRO_2,
CODEC_ID_ADPCM_THP,
CODEC_ID_ADPCM_IMA_AMV,
CODEC_ID_ADPCM_EA_R1,
CODEC_ID_ADPCM_EA_R3,
CODEC_ID_ADPCM_EA_R2,
CODEC_ID_ADPCM_IMA_EA_SEAD,
CODEC_ID_ADPCM_IMA_EA_EACS,
CODEC_ID_ADPCM_EA_XAS,
CODEC_ID_ADPCM_EA_MAXIS_XA,
CODEC_ID_ADPCM_IMA_ISS,
CODEC_ID_ADPCM_G722,
CODEC_ID_ADPCM_IMA_APC,
CODEC_ID_VIMA = MKBETAG('V','I','M','A'),
/* AMR */
CODEC_ID_AMR_NB = 0x12000,
CODEC_ID_AMR_WB,
/* RealAudio codecs*/
CODEC_ID_RA_144 = 0x13000,
CODEC_ID_RA_288,
/* various DPCM codecs */
CODEC_ID_ROQ_DPCM = 0x14000,
CODEC_ID_INTERPLAY_DPCM,
CODEC_ID_XAN_DPCM,
CODEC_ID_SOL_DPCM,
/* audio codecs */
CODEC_ID_MP2 = 0x15000,
CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
CODEC_ID_AAC,
CODEC_ID_AC3,
CODEC_ID_DTS,
CODEC_ID_VORBIS,
CODEC_ID_DVAUDIO,
CODEC_ID_WMAV1,
CODEC_ID_WMAV2,
CODEC_ID_MACE3,
CODEC_ID_MACE6,
CODEC_ID_VMDAUDIO,
CODEC_ID_FLAC,
CODEC_ID_MP3ADU,
CODEC_ID_MP3ON4,
CODEC_ID_SHORTEN,
CODEC_ID_ALAC,
CODEC_ID_WESTWOOD_SND1,
CODEC_ID_GSM, ///< as in Berlin toast format
CODEC_ID_QDM2,
CODEC_ID_COOK,
CODEC_ID_TRUESPEECH,
CODEC_ID_TTA,
CODEC_ID_SMACKAUDIO,
CODEC_ID_QCELP,
CODEC_ID_WAVPACK,
CODEC_ID_DSICINAUDIO,
CODEC_ID_IMC,
CODEC_ID_MUSEPACK7,
CODEC_ID_MLP,
CODEC_ID_GSM_MS, /* as found in WAV */
CODEC_ID_ATRAC3,
CODEC_ID_VOXWARE,
CODEC_ID_APE,
CODEC_ID_NELLYMOSER,
CODEC_ID_MUSEPACK8,
CODEC_ID_SPEEX,
CODEC_ID_WMAVOICE,
CODEC_ID_WMAPRO,
CODEC_ID_WMALOSSLESS,
CODEC_ID_ATRAC3P,
CODEC_ID_EAC3,
CODEC_ID_SIPR,
CODEC_ID_MP1,
CODEC_ID_TWINVQ,
CODEC_ID_TRUEHD,
CODEC_ID_MP4ALS,
CODEC_ID_ATRAC1,
CODEC_ID_BINKAUDIO_RDFT,
CODEC_ID_BINKAUDIO_DCT,
CODEC_ID_AAC_LATM,
CODEC_ID_QDMC,
CODEC_ID_CELT,
CODEC_ID_G723_1,
CODEC_ID_G729,
CODEC_ID_8SVX_EXP,
CODEC_ID_8SVX_FIB,
CODEC_ID_BMV_AUDIO,
CODEC_ID_RALF,
CODEC_ID_IAC,
CODEC_ID_ILBC,
CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),
CODEC_ID_SONIC = MKBETAG('S','O','N','C'),
CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'),
CODEC_ID_PAF_AUDIO = MKBETAG('P','A','F','A'),
CODEC_ID_OPUS = MKBETAG('O','P','U','S'),
/* subtitle codecs */
CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
CODEC_ID_DVD_SUBTITLE = 0x17000,
CODEC_ID_DVB_SUBTITLE,
CODEC_ID_TEXT, ///< raw UTF-8 text
CODEC_ID_XSUB,
CODEC_ID_SSA,
CODEC_ID_MOV_TEXT,
CODEC_ID_HDMV_PGS_SUBTITLE,
CODEC_ID_DVB_TELETEXT,
CODEC_ID_SRT,
CODEC_ID_MICRODVD = MKBETAG('m','D','V','D'),
CODEC_ID_EIA_608 = MKBETAG('c','6','0','8'),
CODEC_ID_JACOSUB = MKBETAG('J','S','U','B'),
CODEC_ID_SAMI = MKBETAG('S','A','M','I'),
CODEC_ID_REALTEXT = MKBETAG('R','T','X','T'),
CODEC_ID_SUBVIEWER = MKBETAG('S','u','b','V'),
/* other specific kind of codecs (generally used for attachments) */
CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
CODEC_ID_TTF = 0x18000,
CODEC_ID_BINTEXT = MKBETAG('B','T','X','T'),
CODEC_ID_XBIN = MKBETAG('X','B','I','N'),
CODEC_ID_IDF = MKBETAG( 0 ,'I','D','F'),
CODEC_ID_OTF = MKBETAG( 0 ,'O','T','F'),
CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it
CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
* stream (only used by libavformat) */
CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
* stream (only used by libavformat) */
CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information.
#endif /* AVCODEC_OLD_CODEC_IDS_H */

View File

@ -0,0 +1,107 @@
/*
* Intel MediaSDK QSV public API
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_QSV_H
#define AVCODEC_QSV_H
#include <mfx/mfxvideo.h>
#include "libavutil/buffer.h"
/**
* This struct is used for communicating QSV parameters between libavcodec and
* the caller. It is managed by the caller and must be assigned to
* AVCodecContext.hwaccel_context.
* - decoding: hwaccel_context must be set on return from the get_format()
* callback
* - encoding: hwaccel_context must be set before avcodec_open2()
*/
typedef struct AVQSVContext {
/**
* If non-NULL, the session to use for encoding or decoding.
* Otherwise, libavcodec will try to create an internal session.
*/
mfxSession session;
/**
* The IO pattern to use.
*/
int iopattern;
/**
* Extra buffers to pass to encoder or decoder initialization.
*/
mfxExtBuffer **ext_buffers;
int nb_ext_buffers;
/**
* Encoding only. If this field is set to non-zero by the caller, libavcodec
* will create an mfxExtOpaqueSurfaceAlloc extended buffer and pass it to
* the encoder initialization. This only makes sense if iopattern is also
* set to MFX_IOPATTERN_IN_OPAQUE_MEMORY.
*
* The number of allocated opaque surfaces will be the sum of the number
* required by the encoder and the user-provided value nb_opaque_surfaces.
* The array of the opaque surfaces will be exported to the caller through
* the opaque_surfaces field.
*/
int opaque_alloc;
/**
* Encoding only, and only if opaque_alloc is set to non-zero. Before
* calling avcodec_open2(), the caller should set this field to the number
* of extra opaque surfaces to allocate beyond what is required by the
* encoder.
*
* On return from avcodec_open2(), this field will be set by libavcodec to
* the total number of allocated opaque surfaces.
*/
int nb_opaque_surfaces;
/**
* Encoding only, and only if opaque_alloc is set to non-zero. On return
* from avcodec_open2(), this field will be used by libavcodec to export the
* array of the allocated opaque surfaces to the caller, so they can be
* passed to other parts of the pipeline.
*
* The buffer reference exported here is owned and managed by libavcodec,
* the callers should make their own reference with av_buffer_ref() and free
* it with av_buffer_unref() when it is no longer needed.
*
* The buffer data is an nb_opaque_surfaces-sized array of mfxFrameSurface1.
*/
AVBufferRef *opaque_surfaces;
/**
* Encoding only, and only if opaque_alloc is set to non-zero. On return
* from avcodec_open2(), this field will be set to the surface type used in
* the opaque allocation request.
*/
int opaque_alloc_type;
} AVQSVContext;
/**
* Allocate a new context.
*
* It must be freed by the caller with av_free().
*/
AVQSVContext *av_qsv_alloc_context(void);
#endif /* AVCODEC_QSV_H */

View File

@ -0,0 +1,86 @@
/*
* Video Acceleration API (shared data between FFmpeg and the video player)
* HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1
*
* Copyright (C) 2008-2009 Splitted-Desktop Systems
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_VAAPI_H
#define AVCODEC_VAAPI_H
/**
* @file
* @ingroup lavc_codec_hwaccel_vaapi
* Public libavcodec VA API header.
*/
#include <stdint.h>
#include "libavutil/attributes.h"
#include "version.h"
#if FF_API_STRUCT_VAAPI_CONTEXT
/**
* @defgroup lavc_codec_hwaccel_vaapi VA API Decoding
* @ingroup lavc_codec_hwaccel
* @{
*/
/**
* This structure is used to share data between the FFmpeg library and
* the client video application.
* This shall be zero-allocated and available as
* AVCodecContext.hwaccel_context. All user members can be set once
* during initialization or through each AVCodecContext.get_buffer()
* function call. In any case, they must be valid prior to calling
* decoding functions.
*
* Deprecated: use AVCodecContext.hw_frames_ctx instead.
*/
struct attribute_deprecated vaapi_context {
/**
* Window system dependent data
*
* - encoding: unused
* - decoding: Set by user
*/
void *display;
/**
* Configuration ID
*
* - encoding: unused
* - decoding: Set by user
*/
uint32_t config_id;
/**
* Context ID (video decode pipeline)
*
* - encoding: unused
* - decoding: Set by user
*/
uint32_t context_id;
};
/* @} */
#endif /* FF_API_STRUCT_VAAPI_CONTEXT */
#endif /* AVCODEC_VAAPI_H */

View File

@ -0,0 +1,213 @@
/*
* VDA HW acceleration
*
* copyright (c) 2011 Sebastien Zwickert
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_VDA_H
#define AVCODEC_VDA_H
/**
* @file
* @ingroup lavc_codec_hwaccel_vda
* Public libavcodec VDA header.
*/
#include "libavcodec/avcodec.h"
#include <stdint.h>
// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes
// http://openradar.appspot.com/8026390
#undef __GNUC_STDC_INLINE__
#define Picture QuickdrawPicture
#include <VideoDecodeAcceleration/VDADecoder.h>
#undef Picture
#include "libavcodec/version.h"
// extra flags not defined in VDADecoder.h
enum {
kVDADecodeInfo_Asynchronous = 1UL << 0,
kVDADecodeInfo_FrameDropped = 1UL << 1
};
/**
* @defgroup lavc_codec_hwaccel_vda VDA
* @ingroup lavc_codec_hwaccel
*
* @{
*/
/**
* This structure is used to provide the necessary configurations and data
* to the VDA FFmpeg HWAccel implementation.
*
* The application must make it available as AVCodecContext.hwaccel_context.
*/
struct vda_context {
/**
* VDA decoder object.
*
* - encoding: unused
* - decoding: Set/Unset by libavcodec.
*/
VDADecoder decoder;
/**
* The Core Video pixel buffer that contains the current image data.
*
* encoding: unused
* decoding: Set by libavcodec. Unset by user.
*/
CVPixelBufferRef cv_buffer;
/**
* Use the hardware decoder in synchronous mode.
*
* encoding: unused
* decoding: Set by user.
*/
int use_sync_decoding;
/**
* The frame width.
*
* - encoding: unused
* - decoding: Set/Unset by user.
*/
int width;
/**
* The frame height.
*
* - encoding: unused
* - decoding: Set/Unset by user.
*/
int height;
/**
* The frame format.
*
* - encoding: unused
* - decoding: Set/Unset by user.
*/
int format;
/**
* The pixel format for output image buffers.
*
* - encoding: unused
* - decoding: Set/Unset by user.
*/
OSType cv_pix_fmt_type;
/**
* unused
*/
uint8_t *priv_bitstream;
/**
* unused
*/
int priv_bitstream_size;
/**
* unused
*/
int priv_allocated_size;
/**
* Use av_buffer to manage buffer.
* When the flag is set, the CVPixelBuffers returned by the decoder will
* be released automatically, so you have to retain them if necessary.
* Not setting this flag may cause memory leak.
*
* encoding: unused
* decoding: Set by user.
*/
int use_ref_buffer;
};
/** Create the video decoder. */
int ff_vda_create_decoder(struct vda_context *vda_ctx,
uint8_t *extradata,
int extradata_size);
/** Destroy the video decoder. */
int ff_vda_destroy_decoder(struct vda_context *vda_ctx);
/**
* This struct holds all the information that needs to be passed
* between the caller and libavcodec for initializing VDA decoding.
* Its size is not a part of the public ABI, it must be allocated with
* av_vda_alloc_context() and freed with av_free().
*/
typedef struct AVVDAContext {
/**
* VDA decoder object. Created and freed by the caller.
*/
VDADecoder decoder;
/**
* The output callback that must be passed to VDADecoderCreate.
* Set by av_vda_alloc_context().
*/
VDADecoderOutputCallback output_callback;
} AVVDAContext;
/**
* Allocate and initialize a VDA context.
*
* This function should be called from the get_format() callback when the caller
* selects the AV_PIX_FMT_VDA format. The caller must then create the decoder
* object (using the output callback provided by libavcodec) that will be used
* for VDA-accelerated decoding.
*
* When decoding with VDA is finished, the caller must destroy the decoder
* object and free the VDA context using av_free().
*
* @return the newly allocated context or NULL on failure
*/
AVVDAContext *av_vda_alloc_context(void);
/**
* This is a convenience function that creates and sets up the VDA context using
* an internal implementation.
*
* @param avctx the corresponding codec context
*
* @return >= 0 on success, a negative AVERROR code on failure
*/
int av_vda_default_init(AVCodecContext *avctx);
/**
* This function must be called to free the VDA context initialized with
* av_vda_default_init().
*
* @param avctx the corresponding codec context
*/
void av_vda_default_free(AVCodecContext *avctx);
/**
* @}
*/
#endif /* AVCODEC_VDA_H */

View File

@ -0,0 +1,176 @@
/*
* The Video Decode and Presentation API for UNIX (VDPAU) is used for
* hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1.
*
* Copyright (C) 2008 NVIDIA
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_VDPAU_H
#define AVCODEC_VDPAU_H
/**
* @file
* @ingroup lavc_codec_hwaccel_vdpau
* Public libavcodec VDPAU header.
*/
/**
* @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer
* @ingroup lavc_codec_hwaccel
*
* VDPAU hardware acceleration has two modules
* - VDPAU decoding
* - VDPAU presentation
*
* The VDPAU decoding module parses all headers using FFmpeg
* parsing mechanisms and uses VDPAU for the actual decoding.
*
* As per the current implementation, the actual decoding
* and rendering (API calls) are done as part of the VDPAU
* presentation (vo_vdpau.c) module.
*
* @{
*/
#include <vdpau/vdpau.h>
#include "libavutil/avconfig.h"
#include "libavutil/attributes.h"
#include "avcodec.h"
#include "version.h"
struct AVCodecContext;
struct AVFrame;
typedef int (*AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *,
const VdpPictureInfo *, uint32_t,
const VdpBitstreamBuffer *);
/**
* This structure is used to share data between the libavcodec library and
* the client video application.
* The user shall allocate the structure via the av_alloc_vdpau_hwaccel
* function and make it available as
* AVCodecContext.hwaccel_context. Members can be set by the user once
* during initialization or through each AVCodecContext.get_buffer()
* function call. In any case, they must be valid prior to calling
* decoding functions.
*
* The size of this structure is not a part of the public ABI and must not
* be used outside of libavcodec. Use av_vdpau_alloc_context() to allocate an
* AVVDPAUContext.
*/
typedef struct AVVDPAUContext {
/**
* VDPAU decoder handle
*
* Set by user.
*/
VdpDecoder decoder;
/**
* VDPAU decoder render callback
*
* Set by the user.
*/
VdpDecoderRender *render;
AVVDPAU_Render2 render2;
} AVVDPAUContext;
/**
* @brief allocation function for AVVDPAUContext
*
* Allows extending the struct without breaking API/ABI
*/
AVVDPAUContext *av_alloc_vdpaucontext(void);
AVVDPAU_Render2 av_vdpau_hwaccel_get_render2(const AVVDPAUContext *);
void av_vdpau_hwaccel_set_render2(AVVDPAUContext *, AVVDPAU_Render2);
/**
* Associate a VDPAU device with a codec context for hardware acceleration.
* This function is meant to be called from the get_format() codec callback,
* or earlier. It can also be called after avcodec_flush_buffers() to change
* the underlying VDPAU device mid-stream (e.g. to recover from non-transparent
* display preemption).
*
* @note get_format() must return AV_PIX_FMT_VDPAU if this function completes
* successfully.
*
* @param avctx decoding context whose get_format() callback is invoked
* @param device VDPAU device handle to use for hardware acceleration
* @param get_proc_address VDPAU device driver
* @param flags zero of more OR'd AV_HWACCEL_FLAG_* flags
*
* @return 0 on success, an AVERROR code on failure.
*/
int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device,
VdpGetProcAddress *get_proc_address, unsigned flags);
/**
* Gets the parameters to create an adequate VDPAU video surface for the codec
* context using VDPAU hardware decoding acceleration.
*
* @note Behavior is undefined if the context was not successfully bound to a
* VDPAU device using av_vdpau_bind_context().
*
* @param avctx the codec context being used for decoding the stream
* @param type storage space for the VDPAU video surface chroma type
* (or NULL to ignore)
* @param width storage space for the VDPAU video surface pixel width
* (or NULL to ignore)
* @param height storage space for the VDPAU video surface pixel height
* (or NULL to ignore)
*
* @return 0 on success, a negative AVERROR code on failure.
*/
int av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType *type,
uint32_t *width, uint32_t *height);
/**
* Allocate an AVVDPAUContext.
*
* @return Newly-allocated AVVDPAUContext or NULL on failure.
*/
AVVDPAUContext *av_vdpau_alloc_context(void);
#if FF_API_VDPAU_PROFILE
/**
* Get a decoder profile that should be used for initializing a VDPAU decoder.
* Should be called from the AVCodecContext.get_format() callback.
*
* @deprecated Use av_vdpau_bind_context() instead.
*
* @param avctx the codec context being used for decoding the stream
* @param profile a pointer into which the result will be written on success.
* The contents of profile are undefined if this function returns
* an error.
*
* @return 0 on success (non-negative), a negative AVERROR on failure.
*/
attribute_deprecated
int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile);
#endif
/* @}*/
#endif /* AVCODEC_VDPAU_H */

View File

@ -0,0 +1,140 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_VERSION_H
#define AVCODEC_VERSION_H
/**
* @file
* @ingroup libavc
* Libavcodec version macros.
*/
#include "libavutil/version.h"
#define LIBAVCODEC_VERSION_MAJOR 58
#define LIBAVCODEC_VERSION_MINOR 54
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \
LIBAVCODEC_VERSION_MICRO)
#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \
LIBAVCODEC_VERSION_MICRO)
#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT
#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*
* @note, when bumping the major version it is recommended to manually
* disable each FF_API_* in its own commit instead of disabling them all
* at once through the bump. This improves the git bisect-ability of the change.
*/
#ifndef FF_API_LOWRES
#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_DEBUG_MV
#define FF_API_DEBUG_MV (LIBAVCODEC_VERSION_MAJOR < 58)
#endif
#ifndef FF_API_AVCTX_TIMEBASE
#define FF_API_AVCTX_TIMEBASE (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_CODED_FRAME
#define FF_API_CODED_FRAME (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_SIDEDATA_ONLY_PKT
#define FF_API_SIDEDATA_ONLY_PKT (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_VDPAU_PROFILE
#define FF_API_VDPAU_PROFILE (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_CONVERGENCE_DURATION
#define FF_API_CONVERGENCE_DURATION (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_AVPICTURE
#define FF_API_AVPICTURE (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_AVPACKET_OLD_API
#define FF_API_AVPACKET_OLD_API (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_RTP_CALLBACK
#define FF_API_RTP_CALLBACK (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_VBV_DELAY
#define FF_API_VBV_DELAY (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_CODER_TYPE
#define FF_API_CODER_TYPE (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_STAT_BITS
#define FF_API_STAT_BITS (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_PRIVATE_OPT
#define FF_API_PRIVATE_OPT (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_ASS_TIMING
#define FF_API_ASS_TIMING (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_OLD_BSF
#define FF_API_OLD_BSF (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_COPY_CONTEXT
#define FF_API_COPY_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_GET_CONTEXT_DEFAULTS
#define FF_API_GET_CONTEXT_DEFAULTS (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_NVENC_OLD_NAME
#define FF_API_NVENC_OLD_NAME (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_STRUCT_VAAPI_CONTEXT
#define FF_API_STRUCT_VAAPI_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_MERGE_SD_API
#define FF_API_MERGE_SD_API (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_TAG_STRING
#define FF_API_TAG_STRING (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_GETCHROMA
#define FF_API_GETCHROMA (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_CODEC_GET_SET
#define FF_API_CODEC_GET_SET (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_USER_VISIBLE_AVHWACCEL
#define FF_API_USER_VISIBLE_AVHWACCEL (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_LOCKMGR
#define FF_API_LOCKMGR (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_NEXT
#define FF_API_NEXT (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_UNSANITIZED_BITRATES
#define FF_API_UNSANITIZED_BITRATES (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#endif /* AVCODEC_VERSION_H */

View File

@ -0,0 +1,127 @@
/*
* Videotoolbox hardware acceleration
*
* copyright (c) 2012 Sebastien Zwickert
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_VIDEOTOOLBOX_H
#define AVCODEC_VIDEOTOOLBOX_H
/**
* @file
* @ingroup lavc_codec_hwaccel_videotoolbox
* Public libavcodec Videotoolbox header.
*/
#include <stdint.h>
#define Picture QuickdrawPicture
#include <VideoToolbox/VideoToolbox.h>
#undef Picture
#include "libavcodec/avcodec.h"
/**
* This struct holds all the information that needs to be passed
* between the caller and libavcodec for initializing Videotoolbox decoding.
* Its size is not a part of the public ABI, it must be allocated with
* av_videotoolbox_alloc_context() and freed with av_free().
*/
typedef struct AVVideotoolboxContext {
/**
* Videotoolbox decompression session object.
* Created and freed the caller.
*/
VTDecompressionSessionRef session;
/**
* The output callback that must be passed to the session.
* Set by av_videottoolbox_default_init()
*/
VTDecompressionOutputCallback output_callback;
/**
* CVPixelBuffer Format Type that Videotoolbox will use for decoded frames.
* set by the caller. If this is set to 0, then no specific format is
* requested from the decoder, and its native format is output.
*/
OSType cv_pix_fmt_type;
/**
* CoreMedia Format Description that Videotoolbox will use to create the decompression session.
* Set by the caller.
*/
CMVideoFormatDescriptionRef cm_fmt_desc;
/**
* CoreMedia codec type that Videotoolbox will use to create the decompression session.
* Set by the caller.
*/
int cm_codec_type;
} AVVideotoolboxContext;
/**
* Allocate and initialize a Videotoolbox context.
*
* This function should be called from the get_format() callback when the caller
* selects the AV_PIX_FMT_VIDETOOLBOX format. The caller must then create
* the decoder object (using the output callback provided by libavcodec) that
* will be used for Videotoolbox-accelerated decoding.
*
* When decoding with Videotoolbox is finished, the caller must destroy the decoder
* object and free the Videotoolbox context using av_free().
*
* @return the newly allocated context or NULL on failure
*/
AVVideotoolboxContext *av_videotoolbox_alloc_context(void);
/**
* This is a convenience function that creates and sets up the Videotoolbox context using
* an internal implementation.
*
* @param avctx the corresponding codec context
*
* @return >= 0 on success, a negative AVERROR code on failure
*/
int av_videotoolbox_default_init(AVCodecContext *avctx);
/**
* This is a convenience function that creates and sets up the Videotoolbox context using
* an internal implementation.
*
* @param avctx the corresponding codec context
* @param vtctx the Videotoolbox context to use
*
* @return >= 0 on success, a negative AVERROR code on failure
*/
int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx);
/**
* This function must be called to free the Videotoolbox context initialized with
* av_videotoolbox_default_init().
*
* @param avctx the corresponding codec context
*/
void av_videotoolbox_default_free(AVCodecContext *avctx);
/**
* @}
*/
#endif /* AVCODEC_VIDEOTOOLBOX_H */

View File

@ -0,0 +1,74 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* A public API for Vorbis parsing
*
* Determines the duration for each packet.
*/
#ifndef AVCODEC_VORBIS_PARSER_H
#define AVCODEC_VORBIS_PARSER_H
#include <stdint.h>
typedef struct AVVorbisParseContext AVVorbisParseContext;
/**
* Allocate and initialize the Vorbis parser using headers in the extradata.
*/
AVVorbisParseContext *av_vorbis_parse_init(const uint8_t *extradata,
int extradata_size);
/**
* Free the parser and everything associated with it.
*/
void av_vorbis_parse_free(AVVorbisParseContext **s);
#define VORBIS_FLAG_HEADER 0x00000001
#define VORBIS_FLAG_COMMENT 0x00000002
#define VORBIS_FLAG_SETUP 0x00000004
/**
* Get the duration for a Vorbis packet.
*
* If @p flags is @c NULL,
* special frames are considered invalid.
*
* @param s Vorbis parser context
* @param buf buffer containing a Vorbis frame
* @param buf_size size of the buffer
* @param flags flags for special frames
*/
int av_vorbis_parse_frame_flags(AVVorbisParseContext *s, const uint8_t *buf,
int buf_size, int *flags);
/**
* Get the duration for a Vorbis packet.
*
* @param s Vorbis parser context
* @param buf buffer containing a Vorbis frame
* @param buf_size size of the buffer
*/
int av_vorbis_parse_frame(AVVorbisParseContext *s, const uint8_t *buf,
int buf_size);
void av_vorbis_parse_reset(AVVorbisParseContext *s);
#endif /* AVCODEC_VORBIS_PARSER_H */

View File

@ -0,0 +1,170 @@
/*
* Copyright (C) 2003 Ivan Kalvachev
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_XVMC_H
#define AVCODEC_XVMC_H
/**
* @file
* @ingroup lavc_codec_hwaccel_xvmc
* Public libavcodec XvMC header.
*/
#include <X11/extensions/XvMC.h>
#include "libavutil/attributes.h"
#include "version.h"
#include "avcodec.h"
/**
* @defgroup lavc_codec_hwaccel_xvmc XvMC
* @ingroup lavc_codec_hwaccel
*
* @{
*/
#define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct
the number is 1337 speak for the letters IDCT MCo (motion compensation) */
struct attribute_deprecated xvmc_pix_fmt {
/** The field contains the special constant value AV_XVMC_ID.
It is used as a test that the application correctly uses the API,
and that there is no corruption caused by pixel routines.
- application - set during initialization
- libavcodec - unchanged
*/
int xvmc_id;
/** Pointer to the block array allocated by XvMCCreateBlocks().
The array has to be freed by XvMCDestroyBlocks().
Each group of 64 values represents one data block of differential
pixel information (in MoCo mode) or coefficients for IDCT.
- application - set the pointer during initialization
- libavcodec - fills coefficients/pixel data into the array
*/
short* data_blocks;
/** Pointer to the macroblock description array allocated by
XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks().
- application - set the pointer during initialization
- libavcodec - fills description data into the array
*/
XvMCMacroBlock* mv_blocks;
/** Number of macroblock descriptions that can be stored in the mv_blocks
array.
- application - set during initialization
- libavcodec - unchanged
*/
int allocated_mv_blocks;
/** Number of blocks that can be stored at once in the data_blocks array.
- application - set during initialization
- libavcodec - unchanged
*/
int allocated_data_blocks;
/** Indicate that the hardware would interpret data_blocks as IDCT
coefficients and perform IDCT on them.
- application - set during initialization
- libavcodec - unchanged
*/
int idct;
/** In MoCo mode it indicates that intra macroblocks are assumed to be in
unsigned format; same as the XVMC_INTRA_UNSIGNED flag.
- application - set during initialization
- libavcodec - unchanged
*/
int unsigned_intra;
/** Pointer to the surface allocated by XvMCCreateSurface().
It has to be freed by XvMCDestroySurface() on application exit.
It identifies the frame and its state on the video hardware.
- application - set during initialization
- libavcodec - unchanged
*/
XvMCSurface* p_surface;
/** Set by the decoder before calling ff_draw_horiz_band(),
needed by the XvMCRenderSurface function. */
//@{
/** Pointer to the surface used as past reference
- application - unchanged
- libavcodec - set
*/
XvMCSurface* p_past_surface;
/** Pointer to the surface used as future reference
- application - unchanged
- libavcodec - set
*/
XvMCSurface* p_future_surface;
/** top/bottom field or frame
- application - unchanged
- libavcodec - set
*/
unsigned int picture_structure;
/** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence
- application - unchanged
- libavcodec - set
*/
unsigned int flags;
//}@
/** Number of macroblock descriptions in the mv_blocks array
that have already been passed to the hardware.
- application - zeroes it on get_buffer().
A successful ff_draw_horiz_band() may increment it
with filled_mb_block_num or zero both.
- libavcodec - unchanged
*/
int start_mv_blocks_num;
/** Number of new macroblock descriptions in the mv_blocks array (after
start_mv_blocks_num) that are filled by libavcodec and have to be
passed to the hardware.
- application - zeroes it on get_buffer() or after successful
ff_draw_horiz_band().
- libavcodec - increment with one of each stored MB
*/
int filled_mv_blocks_num;
/** Number of the next free data block; one data block consists of
64 short values in the data_blocks array.
All blocks before this one have already been claimed by placing their
position into the corresponding block description structure field,
that are part of the mv_blocks array.
- application - zeroes it on get_buffer().
A successful ff_draw_horiz_band() may zero it together
with start_mb_blocks_num.
- libavcodec - each decoded macroblock increases it by the number
of coded blocks it contains.
*/
int next_free_data_block_num;
};
/**
* @}
*/
#endif /* AVCODEC_XVMC_H */

View File

@ -0,0 +1,514 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_AVDEVICE_H
#define AVDEVICE_AVDEVICE_H
#include "version.h"
/**
* @file
* @ingroup lavd
* Main libavdevice API header
*/
/**
* @defgroup lavd libavdevice
* Special devices muxing/demuxing library.
*
* Libavdevice is a complementary library to @ref libavf "libavformat". It
* provides various "special" platform-specific muxers and demuxers, e.g. for
* grabbing devices, audio capture and playback etc. As a consequence, the
* (de)muxers in libavdevice are of the AVFMT_NOFILE type (they use their own
* I/O functions). The filename passed to avformat_open_input() often does not
* refer to an actually existing file, but has some special device-specific
* meaning - e.g. for xcbgrab it is the display name.
*
* To use libavdevice, simply call avdevice_register_all() to register all
* compiled muxers and demuxers. They all use standard libavformat API.
*
* @{
*/
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/dict.h"
#include "libavformat/avformat.h"
/**
* Return the LIBAVDEVICE_VERSION_INT constant.
*/
unsigned avdevice_version(void);
/**
* Return the libavdevice build-time configuration.
*/
const char *avdevice_configuration(void);
/**
* Return the libavdevice license.
*/
const char *avdevice_license(void);
/**
* Initialize libavdevice and register all the input and output devices.
*/
void avdevice_register_all(void);
/**
* Audio input devices iterator.
*
* If d is NULL, returns the first registered input audio/video device,
* if d is non-NULL, returns the next registered input audio/video device after d
* or NULL if d is the last one.
*/
AVInputFormat *av_input_audio_device_next(AVInputFormat *d);
/**
* Video input devices iterator.
*
* If d is NULL, returns the first registered input audio/video device,
* if d is non-NULL, returns the next registered input audio/video device after d
* or NULL if d is the last one.
*/
AVInputFormat *av_input_video_device_next(AVInputFormat *d);
/**
* Audio output devices iterator.
*
* If d is NULL, returns the first registered output audio/video device,
* if d is non-NULL, returns the next registered output audio/video device after d
* or NULL if d is the last one.
*/
AVOutputFormat *av_output_audio_device_next(AVOutputFormat *d);
/**
* Video output devices iterator.
*
* If d is NULL, returns the first registered output audio/video device,
* if d is non-NULL, returns the next registered output audio/video device after d
* or NULL if d is the last one.
*/
AVOutputFormat *av_output_video_device_next(AVOutputFormat *d);
typedef struct AVDeviceRect {
int x; /**< x coordinate of top left corner */
int y; /**< y coordinate of top left corner */
int width; /**< width */
int height; /**< height */
} AVDeviceRect;
/**
* Message types used by avdevice_app_to_dev_control_message().
*/
enum AVAppToDevMessageType {
/**
* Dummy message.
*/
AV_APP_TO_DEV_NONE = MKBETAG('N','O','N','E'),
/**
* Window size change message.
*
* Message is sent to the device every time the application changes the size
* of the window device renders to.
* Message should also be sent right after window is created.
*
* data: AVDeviceRect: new window size.
*/
AV_APP_TO_DEV_WINDOW_SIZE = MKBETAG('G','E','O','M'),
/**
* Repaint request message.
*
* Message is sent to the device when window has to be repainted.
*
* data: AVDeviceRect: area required to be repainted.
* NULL: whole area is required to be repainted.
*/
AV_APP_TO_DEV_WINDOW_REPAINT = MKBETAG('R','E','P','A'),
/**
* Request pause/play.
*
* Application requests pause/unpause playback.
* Mostly usable with devices that have internal buffer.
* By default devices are not paused.
*
* data: NULL
*/
AV_APP_TO_DEV_PAUSE = MKBETAG('P', 'A', 'U', ' '),
AV_APP_TO_DEV_PLAY = MKBETAG('P', 'L', 'A', 'Y'),
AV_APP_TO_DEV_TOGGLE_PAUSE = MKBETAG('P', 'A', 'U', 'T'),
/**
* Volume control message.
*
* Set volume level. It may be device-dependent if volume
* is changed per stream or system wide. Per stream volume
* change is expected when possible.
*
* data: double: new volume with range of 0.0 - 1.0.
*/
AV_APP_TO_DEV_SET_VOLUME = MKBETAG('S', 'V', 'O', 'L'),
/**
* Mute control messages.
*
* Change mute state. It may be device-dependent if mute status
* is changed per stream or system wide. Per stream mute status
* change is expected when possible.
*
* data: NULL.
*/
AV_APP_TO_DEV_MUTE = MKBETAG(' ', 'M', 'U', 'T'),
AV_APP_TO_DEV_UNMUTE = MKBETAG('U', 'M', 'U', 'T'),
AV_APP_TO_DEV_TOGGLE_MUTE = MKBETAG('T', 'M', 'U', 'T'),
/**
* Get volume/mute messages.
*
* Force the device to send AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED or
* AV_DEV_TO_APP_MUTE_STATE_CHANGED command respectively.
*
* data: NULL.
*/
AV_APP_TO_DEV_GET_VOLUME = MKBETAG('G', 'V', 'O', 'L'),
AV_APP_TO_DEV_GET_MUTE = MKBETAG('G', 'M', 'U', 'T'),
};
/**
* Message types used by avdevice_dev_to_app_control_message().
*/
enum AVDevToAppMessageType {
/**
* Dummy message.
*/
AV_DEV_TO_APP_NONE = MKBETAG('N','O','N','E'),
/**
* Create window buffer message.
*
* Device requests to create a window buffer. Exact meaning is device-
* and application-dependent. Message is sent before rendering first
* frame and all one-shot initializations should be done here.
* Application is allowed to ignore preferred window buffer size.
*
* @note: Application is obligated to inform about window buffer size
* with AV_APP_TO_DEV_WINDOW_SIZE message.
*
* data: AVDeviceRect: preferred size of the window buffer.
* NULL: no preferred size of the window buffer.
*/
AV_DEV_TO_APP_CREATE_WINDOW_BUFFER = MKBETAG('B','C','R','E'),
/**
* Prepare window buffer message.
*
* Device requests to prepare a window buffer for rendering.
* Exact meaning is device- and application-dependent.
* Message is sent before rendering of each frame.
*
* data: NULL.
*/
AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER = MKBETAG('B','P','R','E'),
/**
* Display window buffer message.
*
* Device requests to display a window buffer.
* Message is sent when new frame is ready to be displayed.
* Usually buffers need to be swapped in handler of this message.
*
* data: NULL.
*/
AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER = MKBETAG('B','D','I','S'),
/**
* Destroy window buffer message.
*
* Device requests to destroy a window buffer.
* Message is sent when device is about to be destroyed and window
* buffer is not required anymore.
*
* data: NULL.
*/
AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER = MKBETAG('B','D','E','S'),
/**
* Buffer fullness status messages.
*
* Device signals buffer overflow/underflow.
*
* data: NULL.
*/
AV_DEV_TO_APP_BUFFER_OVERFLOW = MKBETAG('B','O','F','L'),
AV_DEV_TO_APP_BUFFER_UNDERFLOW = MKBETAG('B','U','F','L'),
/**
* Buffer readable/writable.
*
* Device informs that buffer is readable/writable.
* When possible, device informs how many bytes can be read/write.
*
* @warning Device may not inform when number of bytes than can be read/write changes.
*
* data: int64_t: amount of bytes available to read/write.
* NULL: amount of bytes available to read/write is not known.
*/
AV_DEV_TO_APP_BUFFER_READABLE = MKBETAG('B','R','D',' '),
AV_DEV_TO_APP_BUFFER_WRITABLE = MKBETAG('B','W','R',' '),
/**
* Mute state change message.
*
* Device informs that mute state has changed.
*
* data: int: 0 for not muted state, non-zero for muted state.
*/
AV_DEV_TO_APP_MUTE_STATE_CHANGED = MKBETAG('C','M','U','T'),
/**
* Volume level change message.
*
* Device informs that volume level has changed.
*
* data: double: new volume with range of 0.0 - 1.0.
*/
AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED = MKBETAG('C','V','O','L'),
};
/**
* Send control message from application to device.
*
* @param s device context.
* @param type message type.
* @param data message data. Exact type depends on message type.
* @param data_size size of message data.
* @return >= 0 on success, negative on error.
* AVERROR(ENOSYS) when device doesn't implement handler of the message.
*/
int avdevice_app_to_dev_control_message(struct AVFormatContext *s,
enum AVAppToDevMessageType type,
void *data, size_t data_size);
/**
* Send control message from device to application.
*
* @param s device context.
* @param type message type.
* @param data message data. Can be NULL.
* @param data_size size of message data.
* @return >= 0 on success, negative on error.
* AVERROR(ENOSYS) when application doesn't implement handler of the message.
*/
int avdevice_dev_to_app_control_message(struct AVFormatContext *s,
enum AVDevToAppMessageType type,
void *data, size_t data_size);
/**
* Following API allows user to probe device capabilities (supported codecs,
* pixel formats, sample formats, resolutions, channel counts, etc).
* It is build on top op AVOption API.
* Queried capabilities make it possible to set up converters of video or audio
* parameters that fit to the device.
*
* List of capabilities that can be queried:
* - Capabilities valid for both audio and video devices:
* - codec: supported audio/video codecs.
* type: AV_OPT_TYPE_INT (AVCodecID value)
* - Capabilities valid for audio devices:
* - sample_format: supported sample formats.
* type: AV_OPT_TYPE_INT (AVSampleFormat value)
* - sample_rate: supported sample rates.
* type: AV_OPT_TYPE_INT
* - channels: supported number of channels.
* type: AV_OPT_TYPE_INT
* - channel_layout: supported channel layouts.
* type: AV_OPT_TYPE_INT64
* - Capabilities valid for video devices:
* - pixel_format: supported pixel formats.
* type: AV_OPT_TYPE_INT (AVPixelFormat value)
* - window_size: supported window sizes (describes size of the window size presented to the user).
* type: AV_OPT_TYPE_IMAGE_SIZE
* - frame_size: supported frame sizes (describes size of provided video frames).
* type: AV_OPT_TYPE_IMAGE_SIZE
* - fps: supported fps values
* type: AV_OPT_TYPE_RATIONAL
*
* Value of the capability may be set by user using av_opt_set() function
* and AVDeviceCapabilitiesQuery object. Following queries will
* limit results to the values matching already set capabilities.
* For example, setting a codec may impact number of formats or fps values
* returned during next query. Setting invalid value may limit results to zero.
*
* Example of the usage basing on opengl output device:
*
* @code
* AVFormatContext *oc = NULL;
* AVDeviceCapabilitiesQuery *caps = NULL;
* AVOptionRanges *ranges;
* int ret;
*
* if ((ret = avformat_alloc_output_context2(&oc, NULL, "opengl", NULL)) < 0)
* goto fail;
* if (avdevice_capabilities_create(&caps, oc, NULL) < 0)
* goto fail;
*
* //query codecs
* if (av_opt_query_ranges(&ranges, caps, "codec", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
* goto fail;
* //pick codec here and set it
* av_opt_set(caps, "codec", AV_CODEC_ID_RAWVIDEO, 0);
*
* //query format
* if (av_opt_query_ranges(&ranges, caps, "pixel_format", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
* goto fail;
* //pick format here and set it
* av_opt_set(caps, "pixel_format", AV_PIX_FMT_YUV420P, 0);
*
* //query and set more capabilities
*
* fail:
* //clean up code
* avdevice_capabilities_free(&query, oc);
* avformat_free_context(oc);
* @endcode
*/
/**
* Structure describes device capabilities.
*
* It is used by devices in conjunction with av_device_capabilities AVOption table
* to implement capabilities probing API based on AVOption API. Should not be used directly.
*/
typedef struct AVDeviceCapabilitiesQuery {
const AVClass *av_class;
AVFormatContext *device_context;
enum AVCodecID codec;
enum AVSampleFormat sample_format;
enum AVPixelFormat pixel_format;
int sample_rate;
int channels;
int64_t channel_layout;
int window_width;
int window_height;
int frame_width;
int frame_height;
AVRational fps;
} AVDeviceCapabilitiesQuery;
/**
* AVOption table used by devices to implement device capabilities API. Should not be used by a user.
*/
extern const AVOption av_device_capabilities[];
/**
* Initialize capabilities probing API based on AVOption API.
*
* avdevice_capabilities_free() must be called when query capabilities API is
* not used anymore.
*
* @param[out] caps Device capabilities data. Pointer to a NULL pointer must be passed.
* @param s Context of the device.
* @param device_options An AVDictionary filled with device-private options.
* On return this parameter will be destroyed and replaced with a dict
* containing options that were not found. May be NULL.
* The same options must be passed later to avformat_write_header() for output
* devices or avformat_open_input() for input devices, or at any other place
* that affects device-private options.
*
* @return >= 0 on success, negative otherwise.
*/
int avdevice_capabilities_create(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s,
AVDictionary **device_options);
/**
* Free resources created by avdevice_capabilities_create()
*
* @param caps Device capabilities data to be freed.
* @param s Context of the device.
*/
void avdevice_capabilities_free(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s);
/**
* Structure describes basic parameters of the device.
*/
typedef struct AVDeviceInfo {
char *device_name; /**< device name, format depends on device */
char *device_description; /**< human friendly name */
} AVDeviceInfo;
/**
* List of devices.
*/
typedef struct AVDeviceInfoList {
AVDeviceInfo **devices; /**< list of autodetected devices */
int nb_devices; /**< number of autodetected devices */
int default_device; /**< index of default device or -1 if no default */
} AVDeviceInfoList;
/**
* List devices.
*
* Returns available device names and their parameters.
*
* @note: Some devices may accept system-dependent device names that cannot be
* autodetected. The list returned by this function cannot be assumed to
* be always completed.
*
* @param s device context.
* @param[out] device_list list of autodetected devices.
* @return count of autodetected devices, negative on error.
*/
int avdevice_list_devices(struct AVFormatContext *s, AVDeviceInfoList **device_list);
/**
* Convenient function to free result of avdevice_list_devices().
*
* @param devices device list to be freed.
*/
void avdevice_free_list_devices(AVDeviceInfoList **device_list);
/**
* List devices.
*
* Returns available device names and their parameters.
* These are convinient wrappers for avdevice_list_devices().
* Device context is allocated and deallocated internally.
*
* @param device device format. May be NULL if device name is set.
* @param device_name device name. May be NULL if device format is set.
* @param device_options An AVDictionary filled with device-private options. May be NULL.
* The same options must be passed later to avformat_write_header() for output
* devices or avformat_open_input() for input devices, or at any other place
* that affects device-private options.
* @param[out] device_list list of autodetected devices
* @return count of autodetected devices, negative on error.
* @note device argument takes precedence over device_name when both are set.
*/
int avdevice_list_input_sources(struct AVInputFormat *device, const char *device_name,
AVDictionary *device_options, AVDeviceInfoList **device_list);
int avdevice_list_output_sinks(struct AVOutputFormat *device, const char *device_name,
AVDictionary *device_options, AVDeviceInfoList **device_list);
/**
* @}
*/
#endif /* AVDEVICE_AVDEVICE_H */

View File

@ -0,0 +1,50 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_VERSION_H
#define AVDEVICE_VERSION_H
/**
* @file
* @ingroup lavd
* Libavdevice version macros
*/
#include "libavutil/version.h"
#define LIBAVDEVICE_VERSION_MAJOR 58
#define LIBAVDEVICE_VERSION_MINOR 8
#define LIBAVDEVICE_VERSION_MICRO 100
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
LIBAVDEVICE_VERSION_MINOR, \
LIBAVDEVICE_VERSION_MICRO)
#define LIBAVDEVICE_VERSION AV_VERSION(LIBAVDEVICE_VERSION_MAJOR, \
LIBAVDEVICE_VERSION_MINOR, \
LIBAVDEVICE_VERSION_MICRO)
#define LIBAVDEVICE_BUILD LIBAVDEVICE_VERSION_INT
#define LIBAVDEVICE_IDENT "Lavd" AV_STRINGIFY(LIBAVDEVICE_VERSION)
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*/
#endif /* AVDEVICE_VERSION_H */

View File

@ -0,0 +1,91 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_ASRC_ABUFFER_H
#define AVFILTER_ASRC_ABUFFER_H
#include "avfilter.h"
/**
* @file
* memory buffer source for audio
*
* @deprecated use buffersrc.h instead.
*/
/**
* Queue an audio buffer to the audio buffer source.
*
* @param abuffersrc audio source buffer context
* @param data pointers to the samples planes
* @param linesize linesizes of each audio buffer plane
* @param nb_samples number of samples per channel
* @param sample_fmt sample format of the audio data
* @param ch_layout channel layout of the audio data
* @param planar flag to indicate if audio data is planar or packed
* @param pts presentation timestamp of the audio buffer
* @param flags unused
*
* @deprecated use av_buffersrc_add_ref() instead.
*/
attribute_deprecated
int av_asrc_buffer_add_samples(AVFilterContext *abuffersrc,
uint8_t *data[8], int linesize[8],
int nb_samples, int sample_rate,
int sample_fmt, int64_t ch_layout, int planar,
int64_t pts, int av_unused flags);
/**
* Queue an audio buffer to the audio buffer source.
*
* This is similar to av_asrc_buffer_add_samples(), but the samples
* are stored in a buffer with known size.
*
* @param abuffersrc audio source buffer context
* @param buf pointer to the samples data, packed is assumed
* @param size the size in bytes of the buffer, it must contain an
* integer number of samples
* @param sample_fmt sample format of the audio data
* @param ch_layout channel layout of the audio data
* @param pts presentation timestamp of the audio buffer
* @param flags unused
*
* @deprecated use av_buffersrc_add_ref() instead.
*/
attribute_deprecated
int av_asrc_buffer_add_buffer(AVFilterContext *abuffersrc,
uint8_t *buf, int buf_size,
int sample_rate,
int sample_fmt, int64_t ch_layout, int planar,
int64_t pts, int av_unused flags);
/**
* Queue an audio buffer to the audio buffer source.
*
* @param abuffersrc audio source buffer context
* @param samplesref buffer ref to queue
* @param flags unused
*
* @deprecated use av_buffersrc_add_ref() instead.
*/
attribute_deprecated
int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *abuffersrc,
AVFilterBufferRef *samplesref,
int av_unused flags);
#endif /* AVFILTER_ASRC_ABUFFER_H */

View File

@ -0,0 +1,110 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_AVCODEC_H
#define AVFILTER_AVCODEC_H
/**
* @file
* libavcodec/libavfilter gluing utilities
*
* This should be included in an application ONLY if the installed
* libavfilter has been compiled with libavcodec support, otherwise
* symbols defined below will not be available.
*/
#include "avfilter.h"
#if FF_API_AVFILTERBUFFER
/**
* Create and return a picref reference from the data and properties
* contained in frame.
*
* @param perms permissions to assign to the new buffer reference
* @deprecated avfilter APIs work natively with AVFrame instead.
*/
attribute_deprecated
AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, int perms);
/**
* Create and return a picref reference from the data and properties
* contained in frame.
*
* @param perms permissions to assign to the new buffer reference
* @deprecated avfilter APIs work natively with AVFrame instead.
*/
attribute_deprecated
AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame,
int perms);
/**
* Create and return a buffer reference from the data and properties
* contained in frame.
*
* @param perms permissions to assign to the new buffer reference
* @deprecated avfilter APIs work natively with AVFrame instead.
*/
attribute_deprecated
AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type,
const AVFrame *frame,
int perms);
#endif
#if FF_API_FILL_FRAME
/**
* Fill an AVFrame with the information stored in samplesref.
*
* @param frame an already allocated AVFrame
* @param samplesref an audio buffer reference
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure
* @deprecated Use avfilter_copy_buf_props() instead.
*/
attribute_deprecated
int avfilter_fill_frame_from_audio_buffer_ref(AVFrame *frame,
const AVFilterBufferRef *samplesref);
/**
* Fill an AVFrame with the information stored in picref.
*
* @param frame an already allocated AVFrame
* @param picref a video buffer reference
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure
* @deprecated Use avfilter_copy_buf_props() instead.
*/
attribute_deprecated
int avfilter_fill_frame_from_video_buffer_ref(AVFrame *frame,
const AVFilterBufferRef *picref);
/**
* Fill an AVFrame with information stored in ref.
*
* @param frame an already allocated AVFrame
* @param ref a video or audio buffer reference
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure
* @deprecated Use avfilter_copy_buf_props() instead.
*/
attribute_deprecated
int avfilter_fill_frame_from_buffer_ref(AVFrame *frame,
const AVFilterBufferRef *ref);
#endif
#endif /* AVFILTER_AVCODEC_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,28 @@
/*
* Filter graphs
* copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_AVFILTERGRAPH_H
#define AVFILTER_AVFILTERGRAPH_H
#include "avfilter.h"
#include "libavutil/log.h"
#endif /* AVFILTER_AVFILTERGRAPH_H */

View File

@ -0,0 +1,165 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_BUFFERSINK_H
#define AVFILTER_BUFFERSINK_H
/**
* @file
* @ingroup lavfi_buffersink
* memory buffer sink API for audio and video
*/
#include "avfilter.h"
/**
* @defgroup lavfi_buffersink Buffer sink API
* @ingroup lavfi
* @{
*/
/**
* Get a frame with filtered data from sink and put it in frame.
*
* @param ctx pointer to a buffersink or abuffersink filter context.
* @param frame pointer to an allocated frame that will be filled with data.
* The data must be freed using av_frame_unref() / av_frame_free()
* @param flags a combination of AV_BUFFERSINK_FLAG_* flags
*
* @return >= 0 in for success, a negative AVERROR code for failure.
*/
int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags);
/**
* Tell av_buffersink_get_buffer_ref() to read video/samples buffer
* reference, but not remove it from the buffer. This is useful if you
* need only to read a video/samples buffer, without to fetch it.
*/
#define AV_BUFFERSINK_FLAG_PEEK 1
/**
* Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
* If a frame is already buffered, it is read (and removed from the buffer),
* but if no frame is present, return AVERROR(EAGAIN).
*/
#define AV_BUFFERSINK_FLAG_NO_REQUEST 2
/**
* Struct to use for initializing a buffersink context.
*/
typedef struct AVBufferSinkParams {
const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE
} AVBufferSinkParams;
/**
* Create an AVBufferSinkParams structure.
*
* Must be freed with av_free().
*/
AVBufferSinkParams *av_buffersink_params_alloc(void);
/**
* Struct to use for initializing an abuffersink context.
*/
typedef struct AVABufferSinkParams {
const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE
const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1
const int *channel_counts; ///< list of allowed channel counts, terminated by -1
int all_channel_counts; ///< if not 0, accept any channel count or layout
int *sample_rates; ///< list of allowed sample rates, terminated by -1
} AVABufferSinkParams;
/**
* Create an AVABufferSinkParams structure.
*
* Must be freed with av_free().
*/
AVABufferSinkParams *av_abuffersink_params_alloc(void);
/**
* Set the frame size for an audio buffer sink.
*
* All calls to av_buffersink_get_buffer_ref will return a buffer with
* exactly the specified number of samples, or AVERROR(EAGAIN) if there is
* not enough. The last buffer at EOF will be padded with 0.
*/
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size);
/**
* @defgroup lavfi_buffersink_accessors Buffer sink accessors
* Get the properties of the stream
* @{
*/
enum AVMediaType av_buffersink_get_type (const AVFilterContext *ctx);
AVRational av_buffersink_get_time_base (const AVFilterContext *ctx);
int av_buffersink_get_format (const AVFilterContext *ctx);
AVRational av_buffersink_get_frame_rate (const AVFilterContext *ctx);
int av_buffersink_get_w (const AVFilterContext *ctx);
int av_buffersink_get_h (const AVFilterContext *ctx);
AVRational av_buffersink_get_sample_aspect_ratio (const AVFilterContext *ctx);
int av_buffersink_get_channels (const AVFilterContext *ctx);
uint64_t av_buffersink_get_channel_layout (const AVFilterContext *ctx);
int av_buffersink_get_sample_rate (const AVFilterContext *ctx);
AVBufferRef * av_buffersink_get_hw_frames_ctx (const AVFilterContext *ctx);
/** @} */
/**
* Get a frame with filtered data from sink and put it in frame.
*
* @param ctx pointer to a context of a buffersink or abuffersink AVFilter.
* @param frame pointer to an allocated frame that will be filled with data.
* The data must be freed using av_frame_unref() / av_frame_free()
*
* @return
* - >= 0 if a frame was successfully returned.
* - AVERROR(EAGAIN) if no frames are available at this point; more
* input frames must be added to the filtergraph to get more output.
* - AVERROR_EOF if there will be no more output frames on this sink.
* - A different negative AVERROR code in other failure cases.
*/
int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame);
/**
* Same as av_buffersink_get_frame(), but with the ability to specify the number
* of samples read. This function is less efficient than
* av_buffersink_get_frame(), because it copies the data around.
*
* @param ctx pointer to a context of the abuffersink AVFilter.
* @param frame pointer to an allocated frame that will be filled with data.
* The data must be freed using av_frame_unref() / av_frame_free()
* frame will contain exactly nb_samples audio samples, except at
* the end of stream, when it can contain less than nb_samples.
*
* @return The return codes have the same meaning as for
* av_buffersink_get_frame().
*
* @warning do not mix this function with av_buffersink_get_frame(). Use only one or
* the other with a single sink, not both.
*/
int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples);
/**
* @}
*/
#endif /* AVFILTER_BUFFERSINK_H */

View File

@ -0,0 +1,209 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_BUFFERSRC_H
#define AVFILTER_BUFFERSRC_H
/**
* @file
* @ingroup lavfi_buffersrc
* Memory buffer source API.
*/
#include "avfilter.h"
/**
* @defgroup lavfi_buffersrc Buffer source API
* @ingroup lavfi
* @{
*/
enum {
/**
* Do not check for format changes.
*/
AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1,
/**
* Immediately push the frame to the output.
*/
AV_BUFFERSRC_FLAG_PUSH = 4,
/**
* Keep a reference to the frame.
* If the frame if reference-counted, create a new reference; otherwise
* copy the frame data.
*/
AV_BUFFERSRC_FLAG_KEEP_REF = 8,
};
/**
* Get the number of failed requests.
*
* A failed request is when the request_frame method is called while no
* frame is present in the buffer.
* The number is reset when a frame is added.
*/
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src);
/**
* This structure contains the parameters describing the frames that will be
* passed to this filter.
*
* It should be allocated with av_buffersrc_parameters_alloc() and freed with
* av_free(). All the allocated fields in it remain owned by the caller.
*/
typedef struct AVBufferSrcParameters {
/**
* video: the pixel format, value corresponds to enum AVPixelFormat
* audio: the sample format, value corresponds to enum AVSampleFormat
*/
int format;
/**
* The timebase to be used for the timestamps on the input frames.
*/
AVRational time_base;
/**
* Video only, the display dimensions of the input frames.
*/
int width, height;
/**
* Video only, the sample (pixel) aspect ratio.
*/
AVRational sample_aspect_ratio;
/**
* Video only, the frame rate of the input video. This field must only be
* set to a non-zero value if input stream has a known constant framerate
* and should be left at its initial value if the framerate is variable or
* unknown.
*/
AVRational frame_rate;
/**
* Video with a hwaccel pixel format only. This should be a reference to an
* AVHWFramesContext instance describing the input frames.
*/
AVBufferRef *hw_frames_ctx;
/**
* Audio only, the audio sampling rate in samples per second.
*/
int sample_rate;
/**
* Audio only, the audio channel layout
*/
uint64_t channel_layout;
} AVBufferSrcParameters;
/**
* Allocate a new AVBufferSrcParameters instance. It should be freed by the
* caller with av_free().
*/
AVBufferSrcParameters *av_buffersrc_parameters_alloc(void);
/**
* Initialize the buffersrc or abuffersrc filter with the provided parameters.
* This function may be called multiple times, the later calls override the
* previous ones. Some of the parameters may also be set through AVOptions, then
* whatever method is used last takes precedence.
*
* @param ctx an instance of the buffersrc or abuffersrc filter
* @param param the stream parameters. The frames later passed to this filter
* must conform to those parameters. All the allocated fields in
* param remain owned by the caller, libavfilter will make internal
* copies or references when necessary.
* @return 0 on success, a negative AVERROR code on failure.
*/
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param);
/**
* Add a frame to the buffer source.
*
* @param ctx an instance of the buffersrc filter
* @param frame frame to be added. If the frame is reference counted, this
* function will make a new reference to it. Otherwise the frame data will be
* copied.
*
* @return 0 on success, a negative AVERROR on error
*
* This function is equivalent to av_buffersrc_add_frame_flags() with the
* AV_BUFFERSRC_FLAG_KEEP_REF flag.
*/
av_warn_unused_result
int av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame);
/**
* Add a frame to the buffer source.
*
* @param ctx an instance of the buffersrc filter
* @param frame frame to be added. If the frame is reference counted, this
* function will take ownership of the reference(s) and reset the frame.
* Otherwise the frame data will be copied. If this function returns an error,
* the input frame is not touched.
*
* @return 0 on success, a negative AVERROR on error.
*
* @note the difference between this function and av_buffersrc_write_frame() is
* that av_buffersrc_write_frame() creates a new reference to the input frame,
* while this function takes ownership of the reference passed to it.
*
* This function is equivalent to av_buffersrc_add_frame_flags() without the
* AV_BUFFERSRC_FLAG_KEEP_REF flag.
*/
av_warn_unused_result
int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);
/**
* Add a frame to the buffer source.
*
* By default, if the frame is reference-counted, this function will take
* ownership of the reference(s) and reset the frame. This can be controlled
* using the flags.
*
* If this function returns an error, the input frame is not touched.
*
* @param buffer_src pointer to a buffer source context
* @param frame a frame, or NULL to mark EOF
* @param flags a combination of AV_BUFFERSRC_FLAG_*
* @return >= 0 in case of success, a negative AVERROR code
* in case of failure
*/
av_warn_unused_result
int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src,
AVFrame *frame, int flags);
/**
* Close the buffer source after EOF.
*
* This is similar to passing NULL to av_buffersrc_add_frame_flags()
* except it takes the timestamp of the EOF, i.e. the timestamp of the end
* of the last frame.
*/
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags);
/**
* @}
*/
#endif /* AVFILTER_BUFFERSRC_H */

View File

@ -0,0 +1,66 @@
/*
* Version macros.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_VERSION_H
#define AVFILTER_VERSION_H
/**
* @file
* @ingroup lavfi
* Libavfilter version macros
*/
#include "libavutil/version.h"
#define LIBAVFILTER_VERSION_MAJOR 7
#define LIBAVFILTER_VERSION_MINOR 57
#define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
LIBAVFILTER_VERSION_MINOR, \
LIBAVFILTER_VERSION_MICRO)
#define LIBAVFILTER_VERSION AV_VERSION(LIBAVFILTER_VERSION_MAJOR, \
LIBAVFILTER_VERSION_MINOR, \
LIBAVFILTER_VERSION_MICRO)
#define LIBAVFILTER_BUILD LIBAVFILTER_VERSION_INT
#define LIBAVFILTER_IDENT "Lavfi" AV_STRINGIFY(LIBAVFILTER_VERSION)
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*/
#ifndef FF_API_OLD_FILTER_OPTS_ERROR
#define FF_API_OLD_FILTER_OPTS_ERROR (LIBAVFILTER_VERSION_MAJOR < 8)
#endif
#ifndef FF_API_LAVR_OPTS
#define FF_API_LAVR_OPTS (LIBAVFILTER_VERSION_MAJOR < 8)
#endif
#ifndef FF_API_FILTER_GET_SET
#define FF_API_FILTER_GET_SET (LIBAVFILTER_VERSION_MAJOR < 8)
#endif
#ifndef FF_API_NEXT
#define FF_API_NEXT (LIBAVFILTER_VERSION_MAJOR < 8)
#endif
#endif /* AVFILTER_VERSION_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,861 @@
/*
* copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_AVIO_H
#define AVFORMAT_AVIO_H
/**
* @file
* @ingroup lavf_io
* Buffered I/O operations
*/
#include <stdint.h>
#include "libavutil/common.h"
#include "libavutil/dict.h"
#include "libavutil/log.h"
#include "libavformat/version.h"
/**
* Seeking works like for a local file.
*/
#define AVIO_SEEKABLE_NORMAL (1 << 0)
/**
* Seeking by timestamp with avio_seek_time() is possible.
*/
#define AVIO_SEEKABLE_TIME (1 << 1)
/**
* Callback for checking whether to abort blocking functions.
* AVERROR_EXIT is returned in this case by the interrupted
* function. During blocking operations, callback is called with
* opaque as parameter. If the callback returns 1, the
* blocking operation will be aborted.
*
* No members can be added to this struct without a major bump, if
* new elements have been added after this struct in AVFormatContext
* or AVIOContext.
*/
typedef struct AVIOInterruptCB {
int (*callback)(void*);
void *opaque;
} AVIOInterruptCB;
/**
* Directory entry types.
*/
enum AVIODirEntryType {
AVIO_ENTRY_UNKNOWN,
AVIO_ENTRY_BLOCK_DEVICE,
AVIO_ENTRY_CHARACTER_DEVICE,
AVIO_ENTRY_DIRECTORY,
AVIO_ENTRY_NAMED_PIPE,
AVIO_ENTRY_SYMBOLIC_LINK,
AVIO_ENTRY_SOCKET,
AVIO_ENTRY_FILE,
AVIO_ENTRY_SERVER,
AVIO_ENTRY_SHARE,
AVIO_ENTRY_WORKGROUP,
};
/**
* Describes single entry of the directory.
*
* Only name and type fields are guaranteed be set.
* Rest of fields are protocol or/and platform dependent and might be unknown.
*/
typedef struct AVIODirEntry {
char *name; /**< Filename */
int type; /**< Type of the entry */
int utf8; /**< Set to 1 when name is encoded with UTF-8, 0 otherwise.
Name can be encoded with UTF-8 even though 0 is set. */
int64_t size; /**< File size in bytes, -1 if unknown. */
int64_t modification_timestamp; /**< Time of last modification in microseconds since unix
epoch, -1 if unknown. */
int64_t access_timestamp; /**< Time of last access in microseconds since unix epoch,
-1 if unknown. */
int64_t status_change_timestamp; /**< Time of last status change in microseconds since unix
epoch, -1 if unknown. */
int64_t user_id; /**< User ID of owner, -1 if unknown. */
int64_t group_id; /**< Group ID of owner, -1 if unknown. */
int64_t filemode; /**< Unix file mode, -1 if unknown. */
} AVIODirEntry;
typedef struct AVIODirContext {
struct URLContext *url_context;
} AVIODirContext;
/**
* Different data types that can be returned via the AVIO
* write_data_type callback.
*/
enum AVIODataMarkerType {
/**
* Header data; this needs to be present for the stream to be decodeable.
*/
AVIO_DATA_MARKER_HEADER,
/**
* A point in the output bytestream where a decoder can start decoding
* (i.e. a keyframe). A demuxer/decoder given the data flagged with
* AVIO_DATA_MARKER_HEADER, followed by any AVIO_DATA_MARKER_SYNC_POINT,
* should give decodeable results.
*/
AVIO_DATA_MARKER_SYNC_POINT,
/**
* A point in the output bytestream where a demuxer can start parsing
* (for non self synchronizing bytestream formats). That is, any
* non-keyframe packet start point.
*/
AVIO_DATA_MARKER_BOUNDARY_POINT,
/**
* This is any, unlabelled data. It can either be a muxer not marking
* any positions at all, it can be an actual boundary/sync point
* that the muxer chooses not to mark, or a later part of a packet/fragment
* that is cut into multiple write callbacks due to limited IO buffer size.
*/
AVIO_DATA_MARKER_UNKNOWN,
/**
* Trailer data, which doesn't contain actual content, but only for
* finalizing the output file.
*/
AVIO_DATA_MARKER_TRAILER,
/**
* A point in the output bytestream where the underlying AVIOContext might
* flush the buffer depending on latency or buffering requirements. Typically
* means the end of a packet.
*/
AVIO_DATA_MARKER_FLUSH_POINT,
};
/**
* Bytestream IO Context.
* New fields can be added to the end with minor version bumps.
* Removal, reordering and changes to existing fields require a major
* version bump.
* sizeof(AVIOContext) must not be used outside libav*.
*
* @note None of the function pointers in AVIOContext should be called
* directly, they should only be set by the client application
* when implementing custom I/O. Normally these are set to the
* function pointers specified in avio_alloc_context()
*/
typedef struct AVIOContext {
/**
* A class for private options.
*
* If this AVIOContext is created by avio_open2(), av_class is set and
* passes the options down to protocols.
*
* If this AVIOContext is manually allocated, then av_class may be set by
* the caller.
*
* warning -- this field can be NULL, be sure to not pass this AVIOContext
* to any av_opt_* functions in that case.
*/
const AVClass *av_class;
/*
* The following shows the relationship between buffer, buf_ptr,
* buf_ptr_max, buf_end, buf_size, and pos, when reading and when writing
* (since AVIOContext is used for both):
*
**********************************************************************************
* READING
**********************************************************************************
*
* | buffer_size |
* |---------------------------------------|
* | |
*
* buffer buf_ptr buf_end
* +---------------+-----------------------+
* |/ / / / / / / /|/ / / / / / /| |
* read buffer: |/ / consumed / | to be read /| |
* |/ / / / / / / /|/ / / / / / /| |
* +---------------+-----------------------+
*
* pos
* +-------------------------------------------+-----------------+
* input file: | | |
* +-------------------------------------------+-----------------+
*
*
**********************************************************************************
* WRITING
**********************************************************************************
*
* | buffer_size |
* |--------------------------------------|
* | |
*
* buf_ptr_max
* buffer (buf_ptr) buf_end
* +-----------------------+--------------+
* |/ / / / / / / / / / / /| |
* write buffer: | / / to be flushed / / | |
* |/ / / / / / / / / / / /| |
* +-----------------------+--------------+
* buf_ptr can be in this
* due to a backward seek
*
* pos
* +-------------+----------------------------------------------+
* output file: | | |
* +-------------+----------------------------------------------+
*
*/
unsigned char *buffer; /**< Start of the buffer. */
int buffer_size; /**< Maximum buffer size */
unsigned char *buf_ptr; /**< Current position in the buffer */
unsigned char *buf_end; /**< End of the data, may be less than
buffer+buffer_size if the read function returned
less data than requested, e.g. for streams where
no more data has been received yet. */
void *opaque; /**< A private pointer, passed to the read/write/seek/...
functions. */
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size);
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size);
int64_t (*seek)(void *opaque, int64_t offset, int whence);
int64_t pos; /**< position in the file of the current buffer */
int eof_reached; /**< true if was unable to read due to error or eof */
int write_flag; /**< true if open for writing */
int max_packet_size;
unsigned long checksum;
unsigned char *checksum_ptr;
unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size);
int error; /**< contains the error code or 0 if no error happened */
/**
* Pause or resume playback for network streaming protocols - e.g. MMS.
*/
int (*read_pause)(void *opaque, int pause);
/**
* Seek to a given timestamp in stream with the specified stream_index.
* Needed for some network streaming protocols which don't support seeking
* to byte position.
*/
int64_t (*read_seek)(void *opaque, int stream_index,
int64_t timestamp, int flags);
/**
* A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
*/
int seekable;
/**
* max filesize, used to limit allocations
* This field is internal to libavformat and access from outside is not allowed.
*/
int64_t maxsize;
/**
* avio_read and avio_write should if possible be satisfied directly
* instead of going through a buffer, and avio_seek will always
* call the underlying seek function directly.
*/
int direct;
/**
* Bytes read statistic
* This field is internal to libavformat and access from outside is not allowed.
*/
int64_t bytes_read;
/**
* seek statistic
* This field is internal to libavformat and access from outside is not allowed.
*/
int seek_count;
/**
* writeout statistic
* This field is internal to libavformat and access from outside is not allowed.
*/
int writeout_count;
/**
* Original buffer size
* used internally after probing and ensure seekback to reset the buffer size
* This field is internal to libavformat and access from outside is not allowed.
*/
int orig_buffer_size;
/**
* Threshold to favor readahead over seek.
* This is current internal only, do not use from outside.
*/
int short_seek_threshold;
/**
* ',' separated list of allowed protocols.
*/
const char *protocol_whitelist;
/**
* ',' separated list of disallowed protocols.
*/
const char *protocol_blacklist;
/**
* A callback that is used instead of write_packet.
*/
int (*write_data_type)(void *opaque, uint8_t *buf, int buf_size,
enum AVIODataMarkerType type, int64_t time);
/**
* If set, don't call write_data_type separately for AVIO_DATA_MARKER_BOUNDARY_POINT,
* but ignore them and treat them as AVIO_DATA_MARKER_UNKNOWN (to avoid needlessly
* small chunks of data returned from the callback).
*/
int ignore_boundary_point;
/**
* Internal, not meant to be used from outside of AVIOContext.
*/
enum AVIODataMarkerType current_type;
int64_t last_time;
/**
* A callback that is used instead of short_seek_threshold.
* This is current internal only, do not use from outside.
*/
int (*short_seek_get)(void *opaque);
int64_t written;
/**
* Maximum reached position before a backward seek in the write buffer,
* used keeping track of already written data for a later flush.
*/
unsigned char *buf_ptr_max;
/**
* Try to buffer at least this amount of data before flushing it
*/
int min_packet_size;
} AVIOContext;
/**
* Return the name of the protocol that will handle the passed URL.
*
* NULL is returned if no protocol could be found for the given URL.
*
* @return Name of the protocol or NULL.
*/
const char *avio_find_protocol_name(const char *url);
/**
* Return AVIO_FLAG_* access flags corresponding to the access permissions
* of the resource in url, or a negative value corresponding to an
* AVERROR code in case of failure. The returned access flags are
* masked by the value in flags.
*
* @note This function is intrinsically unsafe, in the sense that the
* checked resource may change its existence or permission status from
* one call to another. Thus you should not trust the returned value,
* unless you are sure that no other processes are accessing the
* checked resource.
*/
int avio_check(const char *url, int flags);
/**
* Move or rename a resource.
*
* @note url_src and url_dst should share the same protocol and authority.
*
* @param url_src url to resource to be moved
* @param url_dst new url to resource if the operation succeeded
* @return >=0 on success or negative on error.
*/
int avpriv_io_move(const char *url_src, const char *url_dst);
/**
* Delete a resource.
*
* @param url resource to be deleted.
* @return >=0 on success or negative on error.
*/
int avpriv_io_delete(const char *url);
/**
* Open directory for reading.
*
* @param s directory read context. Pointer to a NULL pointer must be passed.
* @param url directory to be listed.
* @param options A dictionary filled with protocol-private options. On return
* this parameter will be destroyed and replaced with a dictionary
* containing options that were not found. May be NULL.
* @return >=0 on success or negative on error.
*/
int avio_open_dir(AVIODirContext **s, const char *url, AVDictionary **options);
/**
* Get next directory entry.
*
* Returned entry must be freed with avio_free_directory_entry(). In particular
* it may outlive AVIODirContext.
*
* @param s directory read context.
* @param[out] next next entry or NULL when no more entries.
* @return >=0 on success or negative on error. End of list is not considered an
* error.
*/
int avio_read_dir(AVIODirContext *s, AVIODirEntry **next);
/**
* Close directory.
*
* @note Entries created using avio_read_dir() are not deleted and must be
* freeded with avio_free_directory_entry().
*
* @param s directory read context.
* @return >=0 on success or negative on error.
*/
int avio_close_dir(AVIODirContext **s);
/**
* Free entry allocated by avio_read_dir().
*
* @param entry entry to be freed.
*/
void avio_free_directory_entry(AVIODirEntry **entry);
/**
* Allocate and initialize an AVIOContext for buffered I/O. It must be later
* freed with avio_context_free().
*
* @param buffer Memory block for input/output operations via AVIOContext.
* The buffer must be allocated with av_malloc() and friends.
* It may be freed and replaced with a new buffer by libavformat.
* AVIOContext.buffer holds the buffer currently in use,
* which must be later freed with av_free().
* @param buffer_size The buffer size is very important for performance.
* For protocols with fixed blocksize it should be set to this blocksize.
* For others a typical size is a cache page, e.g. 4kb.
* @param write_flag Set to 1 if the buffer should be writable, 0 otherwise.
* @param opaque An opaque pointer to user-specific data.
* @param read_packet A function for refilling the buffer, may be NULL.
* For stream protocols, must never return 0 but rather
* a proper AVERROR code.
* @param write_packet A function for writing the buffer contents, may be NULL.
* The function may not change the input buffers content.
* @param seek A function for seeking to specified byte position, may be NULL.
*
* @return Allocated AVIOContext or NULL on failure.
*/
AVIOContext *avio_alloc_context(
unsigned char *buffer,
int buffer_size,
int write_flag,
void *opaque,
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
int64_t (*seek)(void *opaque, int64_t offset, int whence));
/**
* Free the supplied IO context and everything associated with it.
*
* @param s Double pointer to the IO context. This function will write NULL
* into s.
*/
void avio_context_free(AVIOContext **s);
void avio_w8(AVIOContext *s, int b);
void avio_write(AVIOContext *s, const unsigned char *buf, int size);
void avio_wl64(AVIOContext *s, uint64_t val);
void avio_wb64(AVIOContext *s, uint64_t val);
void avio_wl32(AVIOContext *s, unsigned int val);
void avio_wb32(AVIOContext *s, unsigned int val);
void avio_wl24(AVIOContext *s, unsigned int val);
void avio_wb24(AVIOContext *s, unsigned int val);
void avio_wl16(AVIOContext *s, unsigned int val);
void avio_wb16(AVIOContext *s, unsigned int val);
/**
* Write a NULL-terminated string.
* @return number of bytes written.
*/
int avio_put_str(AVIOContext *s, const char *str);
/**
* Convert an UTF-8 string to UTF-16LE and write it.
* @param s the AVIOContext
* @param str NULL-terminated UTF-8 string
*
* @return number of bytes written.
*/
int avio_put_str16le(AVIOContext *s, const char *str);
/**
* Convert an UTF-8 string to UTF-16BE and write it.
* @param s the AVIOContext
* @param str NULL-terminated UTF-8 string
*
* @return number of bytes written.
*/
int avio_put_str16be(AVIOContext *s, const char *str);
/**
* Mark the written bytestream as a specific type.
*
* Zero-length ranges are omitted from the output.
*
* @param time the stream time the current bytestream pos corresponds to
* (in AV_TIME_BASE units), or AV_NOPTS_VALUE if unknown or not
* applicable
* @param type the kind of data written starting at the current pos
*/
void avio_write_marker(AVIOContext *s, int64_t time, enum AVIODataMarkerType type);
/**
* ORing this as the "whence" parameter to a seek function causes it to
* return the filesize without seeking anywhere. Supporting this is optional.
* If it is not supported then the seek function will return <0.
*/
#define AVSEEK_SIZE 0x10000
/**
* Passing this flag as the "whence" parameter to a seek function causes it to
* seek by any means (like reopening and linear reading) or other normally unreasonable
* means that can be extremely slow.
* This may be ignored by the seek code.
*/
#define AVSEEK_FORCE 0x20000
/**
* fseek() equivalent for AVIOContext.
* @return new position or AVERROR.
*/
int64_t avio_seek(AVIOContext *s, int64_t offset, int whence);
/**
* Skip given number of bytes forward
* @return new position or AVERROR.
*/
int64_t avio_skip(AVIOContext *s, int64_t offset);
/**
* ftell() equivalent for AVIOContext.
* @return position or AVERROR.
*/
static av_always_inline int64_t avio_tell(AVIOContext *s)
{
return avio_seek(s, 0, SEEK_CUR);
}
/**
* Get the filesize.
* @return filesize or AVERROR
*/
int64_t avio_size(AVIOContext *s);
/**
* Similar to feof() but also returns nonzero on read errors.
* @return non zero if and only if at end of file or a read error happened when reading.
*/
int avio_feof(AVIOContext *s);
/** @warning Writes up to 4 KiB per call */
int avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3);
/**
* Force flushing of buffered data.
*
* For write streams, force the buffered data to be immediately written to the output,
* without to wait to fill the internal buffer.
*
* For read streams, discard all currently buffered data, and advance the
* reported file position to that of the underlying stream. This does not
* read new data, and does not perform any seeks.
*/
void avio_flush(AVIOContext *s);
/**
* Read size bytes from AVIOContext into buf.
* @return number of bytes read or AVERROR
*/
int avio_read(AVIOContext *s, unsigned char *buf, int size);
/**
* Read size bytes from AVIOContext into buf. Unlike avio_read(), this is allowed
* to read fewer bytes than requested. The missing bytes can be read in the next
* call. This always tries to read at least 1 byte.
* Useful to reduce latency in certain cases.
* @return number of bytes read or AVERROR
*/
int avio_read_partial(AVIOContext *s, unsigned char *buf, int size);
/**
* @name Functions for reading from AVIOContext
* @{
*
* @note return 0 if EOF, so you cannot use it if EOF handling is
* necessary
*/
int avio_r8 (AVIOContext *s);
unsigned int avio_rl16(AVIOContext *s);
unsigned int avio_rl24(AVIOContext *s);
unsigned int avio_rl32(AVIOContext *s);
uint64_t avio_rl64(AVIOContext *s);
unsigned int avio_rb16(AVIOContext *s);
unsigned int avio_rb24(AVIOContext *s);
unsigned int avio_rb32(AVIOContext *s);
uint64_t avio_rb64(AVIOContext *s);
/**
* @}
*/
/**
* Read a string from pb into buf. The reading will terminate when either
* a NULL character was encountered, maxlen bytes have been read, or nothing
* more can be read from pb. The result is guaranteed to be NULL-terminated, it
* will be truncated if buf is too small.
* Note that the string is not interpreted or validated in any way, it
* might get truncated in the middle of a sequence for multi-byte encodings.
*
* @return number of bytes read (is always <= maxlen).
* If reading ends on EOF or error, the return value will be one more than
* bytes actually read.
*/
int avio_get_str(AVIOContext *pb, int maxlen, char *buf, int buflen);
/**
* Read a UTF-16 string from pb and convert it to UTF-8.
* The reading will terminate when either a null or invalid character was
* encountered or maxlen bytes have been read.
* @return number of bytes read (is always <= maxlen)
*/
int avio_get_str16le(AVIOContext *pb, int maxlen, char *buf, int buflen);
int avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen);
/**
* @name URL open modes
* The flags argument to avio_open must be one of the following
* constants, optionally ORed with other flags.
* @{
*/
#define AVIO_FLAG_READ 1 /**< read-only */
#define AVIO_FLAG_WRITE 2 /**< write-only */
#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE) /**< read-write pseudo flag */
/**
* @}
*/
/**
* Use non-blocking mode.
* If this flag is set, operations on the context will return
* AVERROR(EAGAIN) if they can not be performed immediately.
* If this flag is not set, operations on the context will never return
* AVERROR(EAGAIN).
* Note that this flag does not affect the opening/connecting of the
* context. Connecting a protocol will always block if necessary (e.g. on
* network protocols) but never hang (e.g. on busy devices).
* Warning: non-blocking protocols is work-in-progress; this flag may be
* silently ignored.
*/
#define AVIO_FLAG_NONBLOCK 8
/**
* Use direct mode.
* avio_read and avio_write should if possible be satisfied directly
* instead of going through a buffer, and avio_seek will always
* call the underlying seek function directly.
*/
#define AVIO_FLAG_DIRECT 0x8000
/**
* Create and initialize a AVIOContext for accessing the
* resource indicated by url.
* @note When the resource indicated by url has been opened in
* read+write mode, the AVIOContext can be used only for writing.
*
* @param s Used to return the pointer to the created AVIOContext.
* In case of failure the pointed to value is set to NULL.
* @param url resource to access
* @param flags flags which control how the resource indicated by url
* is to be opened
* @return >= 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int avio_open(AVIOContext **s, const char *url, int flags);
/**
* Create and initialize a AVIOContext for accessing the
* resource indicated by url.
* @note When the resource indicated by url has been opened in
* read+write mode, the AVIOContext can be used only for writing.
*
* @param s Used to return the pointer to the created AVIOContext.
* In case of failure the pointed to value is set to NULL.
* @param url resource to access
* @param flags flags which control how the resource indicated by url
* is to be opened
* @param int_cb an interrupt callback to be used at the protocols level
* @param options A dictionary filled with protocol-private options. On return
* this parameter will be destroyed and replaced with a dict containing options
* that were not found. May be NULL.
* @return >= 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int avio_open2(AVIOContext **s, const char *url, int flags,
const AVIOInterruptCB *int_cb, AVDictionary **options);
/**
* Close the resource accessed by the AVIOContext s and free it.
* This function can only be used if s was opened by avio_open().
*
* The internal buffer is automatically flushed before closing the
* resource.
*
* @return 0 on success, an AVERROR < 0 on error.
* @see avio_closep
*/
int avio_close(AVIOContext *s);
/**
* Close the resource accessed by the AVIOContext *s, free it
* and set the pointer pointing to it to NULL.
* This function can only be used if s was opened by avio_open().
*
* The internal buffer is automatically flushed before closing the
* resource.
*
* @return 0 on success, an AVERROR < 0 on error.
* @see avio_close
*/
int avio_closep(AVIOContext **s);
/**
* Open a write only memory stream.
*
* @param s new IO context
* @return zero if no error.
*/
int avio_open_dyn_buf(AVIOContext **s);
/**
* Return the written size and a pointer to the buffer.
* The AVIOContext stream is left intact.
* The buffer must NOT be freed.
* No padding is added to the buffer.
*
* @param s IO context
* @param pbuffer pointer to a byte buffer
* @return the length of the byte buffer
*/
int avio_get_dyn_buf(AVIOContext *s, uint8_t **pbuffer);
/**
* Return the written size and a pointer to the buffer. The buffer
* must be freed with av_free().
* Padding of AV_INPUT_BUFFER_PADDING_SIZE is added to the buffer.
*
* @param s IO context
* @param pbuffer pointer to a byte buffer
* @return the length of the byte buffer
*/
int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer);
/**
* Iterate through names of available protocols.
*
* @param opaque A private pointer representing current protocol.
* It must be a pointer to NULL on first iteration and will
* be updated by successive calls to avio_enum_protocols.
* @param output If set to 1, iterate over output protocols,
* otherwise over input protocols.
*
* @return A static string containing the name of current protocol or NULL
*/
const char *avio_enum_protocols(void **opaque, int output);
/**
* Pause and resume playing - only meaningful if using a network streaming
* protocol (e.g. MMS).
*
* @param h IO context from which to call the read_pause function pointer
* @param pause 1 for pause, 0 for resume
*/
int avio_pause(AVIOContext *h, int pause);
/**
* Seek to a given timestamp relative to some component stream.
* Only meaningful if using a network streaming protocol (e.g. MMS.).
*
* @param h IO context from which to call the seek function pointers
* @param stream_index The stream index that the timestamp is relative to.
* If stream_index is (-1) the timestamp should be in AV_TIME_BASE
* units from the beginning of the presentation.
* If a stream_index >= 0 is used and the protocol does not support
* seeking based on component streams, the call will fail.
* @param timestamp timestamp in AVStream.time_base units
* or if there is no stream specified then in AV_TIME_BASE units.
* @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE
* and AVSEEK_FLAG_ANY. The protocol may silently ignore
* AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will
* fail if used and not supported.
* @return >= 0 on success
* @see AVInputFormat::read_seek
*/
int64_t avio_seek_time(AVIOContext *h, int stream_index,
int64_t timestamp, int flags);
/* Avoid a warning. The header can not be included because it breaks c++. */
struct AVBPrint;
/**
* Read contents of h into print buffer, up to max_size bytes, or up to EOF.
*
* @return 0 for success (max_size bytes read or EOF reached), negative error
* code otherwise
*/
int avio_read_to_bprint(AVIOContext *h, struct AVBPrint *pb, size_t max_size);
/**
* Accept and allocate a client context on a server context.
* @param s the server context
* @param c the client context, must be unallocated
* @return >= 0 on success or a negative value corresponding
* to an AVERROR on failure
*/
int avio_accept(AVIOContext *s, AVIOContext **c);
/**
* Perform one step of the protocol handshake to accept a new client.
* This function must be called on a client returned by avio_accept() before
* using it as a read/write context.
* It is separate from avio_accept() because it may block.
* A step of the handshake is defined by places where the application may
* decide to change the proceedings.
* For example, on a protocol with a request header and a reply header, each
* one can constitute a step because the application may use the parameters
* from the request to change parameters in the reply; or each individual
* chunk of the request can constitute a step.
* If the handshake is already finished, avio_handshake() does nothing and
* returns 0 immediately.
*
* @param c the client context to perform the handshake on
* @return 0 on a complete and successful handshake
* > 0 if the handshake progressed, but is not complete
* < 0 for an AVERROR code
*/
int avio_handshake(AVIOContext *c);
#endif /* AVFORMAT_AVIO_H */

View File

@ -0,0 +1,114 @@
/*
* Version macros.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_VERSION_H
#define AVFORMAT_VERSION_H
/**
* @file
* @ingroup libavf
* Libavformat version macros
*/
#include "libavutil/version.h"
// Major bumping may affect Ticket5467, 5421, 5451(compatibility with Chromium)
// Also please add any ticket numbers that you believe might be affected here
#define LIBAVFORMAT_VERSION_MAJOR 58
#define LIBAVFORMAT_VERSION_MINOR 29
#define LIBAVFORMAT_VERSION_MICRO 100
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
LIBAVFORMAT_VERSION_MINOR, \
LIBAVFORMAT_VERSION_MICRO)
#define LIBAVFORMAT_VERSION AV_VERSION(LIBAVFORMAT_VERSION_MAJOR, \
LIBAVFORMAT_VERSION_MINOR, \
LIBAVFORMAT_VERSION_MICRO)
#define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT
#define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION)
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*
* @note, when bumping the major version it is recommended to manually
* disable each FF_API_* in its own commit instead of disabling them all
* at once through the bump. This improves the git bisect-ability of the change.
*
*/
#ifndef FF_API_COMPUTE_PKT_FIELDS2
#define FF_API_COMPUTE_PKT_FIELDS2 (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_OLD_OPEN_CALLBACKS
#define FF_API_OLD_OPEN_CALLBACKS (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_LAVF_AVCTX
#define FF_API_LAVF_AVCTX (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_HTTP_USER_AGENT
#define FF_API_HTTP_USER_AGENT (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_HLS_WRAP
#define FF_API_HLS_WRAP (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_HLS_USE_LOCALTIME
#define FF_API_HLS_USE_LOCALTIME (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_LAVF_KEEPSIDE_FLAG
#define FF_API_LAVF_KEEPSIDE_FLAG (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_OLD_ROTATE_API
#define FF_API_OLD_ROTATE_API (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_FORMAT_GET_SET
#define FF_API_FORMAT_GET_SET (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_OLD_AVIO_EOF_0
#define FF_API_OLD_AVIO_EOF_0 (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_LAVF_FFSERVER
#define FF_API_LAVF_FFSERVER (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_FORMAT_FILENAME
#define FF_API_FORMAT_FILENAME (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_OLD_RTSP_OPTIONS
#define FF_API_OLD_RTSP_OPTIONS (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_NEXT
#define FF_API_NEXT (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_DASH_MIN_SEG_DURATION
#define FF_API_DASH_MIN_SEG_DURATION (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_LAVF_MP4A_LATM
#define FF_API_LAVF_MP4A_LATM (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_AVIOFORMAT
#define FF_API_AVIOFORMAT (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_R_FRAME_RATE
#define FF_API_R_FRAME_RATE 1
#endif
#endif /* AVFORMAT_VERSION_H */

View File

@ -0,0 +1,595 @@
/*
* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVRESAMPLE_AVRESAMPLE_H
#define AVRESAMPLE_AVRESAMPLE_H
/**
* @file
* @ingroup lavr
* external API header
*/
/**
* @defgroup lavr libavresample
* @{
*
* Libavresample (lavr) is a library that handles audio resampling, sample
* format conversion and mixing.
*
* Interaction with lavr is done through AVAudioResampleContext, which is
* allocated with avresample_alloc_context(). It is opaque, so all parameters
* must be set with the @ref avoptions API.
*
* For example the following code will setup conversion from planar float sample
* format to interleaved signed 16-bit integer, downsampling from 48kHz to
* 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing
* matrix):
* @code
* AVAudioResampleContext *avr = avresample_alloc_context();
* av_opt_set_int(avr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0);
* av_opt_set_int(avr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
* av_opt_set_int(avr, "in_sample_rate", 48000, 0);
* av_opt_set_int(avr, "out_sample_rate", 44100, 0);
* av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);
* av_opt_set_int(avr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
* @endcode
*
* Once the context is initialized, it must be opened with avresample_open(). If
* you need to change the conversion parameters, you must close the context with
* avresample_close(), change the parameters as described above, then reopen it
* again.
*
* The conversion itself is done by repeatedly calling avresample_convert().
* Note that the samples may get buffered in two places in lavr. The first one
* is the output FIFO, where the samples end up if the output buffer is not
* large enough. The data stored in there may be retrieved at any time with
* avresample_read(). The second place is the resampling delay buffer,
* applicable only when resampling is done. The samples in it require more input
* before they can be processed. Their current amount is returned by
* avresample_get_delay(). At the end of conversion the resampling buffer can be
* flushed by calling avresample_convert() with NULL input.
*
* The following code demonstrates the conversion loop assuming the parameters
* from above and caller-defined functions get_input() and handle_output():
* @code
* uint8_t **input;
* int in_linesize, in_samples;
*
* while (get_input(&input, &in_linesize, &in_samples)) {
* uint8_t *output
* int out_linesize;
* int out_samples = avresample_get_out_samples(avr, in_samples);
*
* av_samples_alloc(&output, &out_linesize, 2, out_samples,
* AV_SAMPLE_FMT_S16, 0);
* out_samples = avresample_convert(avr, &output, out_linesize, out_samples,
* input, in_linesize, in_samples);
* handle_output(output, out_linesize, out_samples);
* av_freep(&output);
* }
* @endcode
*
* When the conversion is finished and the FIFOs are flushed if required, the
* conversion context and everything associated with it must be freed with
* avresample_free().
*/
#include "libavutil/avutil.h"
#include "libavutil/channel_layout.h"
#include "libavutil/dict.h"
#include "libavutil/frame.h"
#include "libavutil/log.h"
#include "libavutil/mathematics.h"
#include "libavresample/version.h"
#define AVRESAMPLE_MAX_CHANNELS 32
typedef struct AVAudioResampleContext AVAudioResampleContext;
/**
* @deprecated use libswresample
*
* Mixing Coefficient Types */
enum attribute_deprecated AVMixCoeffType {
AV_MIX_COEFF_TYPE_Q8, /** 16-bit 8.8 fixed-point */
AV_MIX_COEFF_TYPE_Q15, /** 32-bit 17.15 fixed-point */
AV_MIX_COEFF_TYPE_FLT, /** floating-point */
AV_MIX_COEFF_TYPE_NB, /** Number of coeff types. Not part of ABI */
};
/**
* @deprecated use libswresample
*
* Resampling Filter Types */
enum attribute_deprecated AVResampleFilterType {
AV_RESAMPLE_FILTER_TYPE_CUBIC, /**< Cubic */
AV_RESAMPLE_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall Windowed Sinc */
AV_RESAMPLE_FILTER_TYPE_KAISER, /**< Kaiser Windowed Sinc */
};
/**
* @deprecated use libswresample
*/
enum attribute_deprecated AVResampleDitherMethod {
AV_RESAMPLE_DITHER_NONE, /**< Do not use dithering */
AV_RESAMPLE_DITHER_RECTANGULAR, /**< Rectangular Dither */
AV_RESAMPLE_DITHER_TRIANGULAR, /**< Triangular Dither*/
AV_RESAMPLE_DITHER_TRIANGULAR_HP, /**< Triangular Dither with High Pass */
AV_RESAMPLE_DITHER_TRIANGULAR_NS, /**< Triangular Dither with Noise Shaping */
AV_RESAMPLE_DITHER_NB, /**< Number of dither types. Not part of ABI. */
};
/**
*
* @deprecated use libswresample
*
* Return the LIBAVRESAMPLE_VERSION_INT constant.
*/
attribute_deprecated
unsigned avresample_version(void);
/**
*
* @deprecated use libswresample
*
* Return the libavresample build-time configuration.
* @return configure string
*/
attribute_deprecated
const char *avresample_configuration(void);
/**
*
* @deprecated use libswresample
*
* Return the libavresample license.
*/
attribute_deprecated
const char *avresample_license(void);
/**
*
* @deprecated use libswresample
*
* Get the AVClass for AVAudioResampleContext.
*
* Can be used in combination with AV_OPT_SEARCH_FAKE_OBJ for examining options
* without allocating a context.
*
* @see av_opt_find().
*
* @return AVClass for AVAudioResampleContext
*/
attribute_deprecated
const AVClass *avresample_get_class(void);
/**
*
* @deprecated use libswresample
*
* Allocate AVAudioResampleContext and set options.
*
* @return allocated audio resample context, or NULL on failure
*/
attribute_deprecated
AVAudioResampleContext *avresample_alloc_context(void);
/**
*
* @deprecated use libswresample
*
* Initialize AVAudioResampleContext.
* @note The context must be configured using the AVOption API.
* @note The fields "in_channel_layout", "out_channel_layout",
* "in_sample_rate", "out_sample_rate", "in_sample_fmt",
* "out_sample_fmt" must be set.
*
* @see av_opt_set_int()
* @see av_opt_set_dict()
* @see av_get_default_channel_layout()
*
* @param avr audio resample context
* @return 0 on success, negative AVERROR code on failure
*/
attribute_deprecated
int avresample_open(AVAudioResampleContext *avr);
/**
*
* @deprecated use libswresample
*
* Check whether an AVAudioResampleContext is open or closed.
*
* @param avr AVAudioResampleContext to check
* @return 1 if avr is open, 0 if avr is closed.
*/
attribute_deprecated
int avresample_is_open(AVAudioResampleContext *avr);
/**
*
* @deprecated use libswresample
*
* Close AVAudioResampleContext.
*
* This closes the context, but it does not change the parameters. The context
* can be reopened with avresample_open(). It does, however, clear the output
* FIFO and any remaining leftover samples in the resampling delay buffer. If
* there was a custom matrix being used, that is also cleared.
*
* @see avresample_convert()
* @see avresample_set_matrix()
*
* @param avr audio resample context
*/
attribute_deprecated
void avresample_close(AVAudioResampleContext *avr);
/**
*
* @deprecated use libswresample
*
* Free AVAudioResampleContext and associated AVOption values.
*
* This also calls avresample_close() before freeing.
*
* @param avr audio resample context
*/
attribute_deprecated
void avresample_free(AVAudioResampleContext **avr);
/**
*
* @deprecated use libswresample
*
* Generate a channel mixing matrix.
*
* This function is the one used internally by libavresample for building the
* default mixing matrix. It is made public just as a utility function for
* building custom matrices.
*
* @param in_layout input channel layout
* @param out_layout output channel layout
* @param center_mix_level mix level for the center channel
* @param surround_mix_level mix level for the surround channel(s)
* @param lfe_mix_level mix level for the low-frequency effects channel
* @param normalize if 1, coefficients will be normalized to prevent
* overflow. if 0, coefficients will not be
* normalized.
* @param[out] matrix mixing coefficients; matrix[i + stride * o] is
* the weight of input channel i in output channel o.
* @param stride distance between adjacent input channels in the
* matrix array
* @param matrix_encoding matrixed stereo downmix mode (e.g. dplii)
* @return 0 on success, negative AVERROR code on failure
*/
attribute_deprecated
int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout,
double center_mix_level, double surround_mix_level,
double lfe_mix_level, int normalize, double *matrix,
int stride, enum AVMatrixEncoding matrix_encoding);
/**
*
* @deprecated use libswresample
*
* Get the current channel mixing matrix.
*
* If no custom matrix has been previously set or the AVAudioResampleContext is
* not open, an error is returned.
*
* @param avr audio resample context
* @param matrix mixing coefficients; matrix[i + stride * o] is the weight of
* input channel i in output channel o.
* @param stride distance between adjacent input channels in the matrix array
* @return 0 on success, negative AVERROR code on failure
*/
attribute_deprecated
int avresample_get_matrix(AVAudioResampleContext *avr, double *matrix,
int stride);
/**
*
* @deprecated use libswresample
*
* Set channel mixing matrix.
*
* Allows for setting a custom mixing matrix, overriding the default matrix
* generated internally during avresample_open(). This function can be called
* anytime on an allocated context, either before or after calling
* avresample_open(), as long as the channel layouts have been set.
* avresample_convert() always uses the current matrix.
* Calling avresample_close() on the context will clear the current matrix.
*
* @see avresample_close()
*
* @param avr audio resample context
* @param matrix mixing coefficients; matrix[i + stride * o] is the weight of
* input channel i in output channel o.
* @param stride distance between adjacent input channels in the matrix array
* @return 0 on success, negative AVERROR code on failure
*/
attribute_deprecated
int avresample_set_matrix(AVAudioResampleContext *avr, const double *matrix,
int stride);
/**
*
* @deprecated use libswresample
*
* Set a customized input channel mapping.
*
* This function can only be called when the allocated context is not open.
* Also, the input channel layout must have already been set.
*
* Calling avresample_close() on the context will clear the channel mapping.
*
* The map for each input channel specifies the channel index in the source to
* use for that particular channel, or -1 to mute the channel. Source channels
* can be duplicated by using the same index for multiple input channels.
*
* Examples:
*
* Reordering 5.1 AAC order (C,L,R,Ls,Rs,LFE) to FFmpeg order (L,R,C,LFE,Ls,Rs):
* { 1, 2, 0, 5, 3, 4 }
*
* Muting the 3rd channel in 4-channel input:
* { 0, 1, -1, 3 }
*
* Duplicating the left channel of stereo input:
* { 0, 0 }
*
* @param avr audio resample context
* @param channel_map customized input channel mapping
* @return 0 on success, negative AVERROR code on failure
*/
attribute_deprecated
int avresample_set_channel_mapping(AVAudioResampleContext *avr,
const int *channel_map);
/**
*
* @deprecated use libswresample
*
* Set compensation for resampling.
*
* This can be called anytime after avresample_open(). If resampling is not
* automatically enabled because of a sample rate conversion, the
* "force_resampling" option must have been set to 1 when opening the context
* in order to use resampling compensation.
*
* @param avr audio resample context
* @param sample_delta compensation delta, in samples
* @param compensation_distance compensation distance, in samples
* @return 0 on success, negative AVERROR code on failure
*/
attribute_deprecated
int avresample_set_compensation(AVAudioResampleContext *avr, int sample_delta,
int compensation_distance);
/**
*
* @deprecated use libswresample
*
* Provide the upper bound on the number of samples the configured
* conversion would output.
*
* @param avr audio resample context
* @param in_nb_samples number of input samples
*
* @return number of samples or AVERROR(EINVAL) if the value
* would exceed INT_MAX
*/
attribute_deprecated
int avresample_get_out_samples(AVAudioResampleContext *avr, int in_nb_samples);
/**
*
* @deprecated use libswresample
*
* Convert input samples and write them to the output FIFO.
*
* The upper bound on the number of output samples can be obtained through
* avresample_get_out_samples().
*
* The output data can be NULL or have fewer allocated samples than required.
* In this case, any remaining samples not written to the output will be added
* to an internal FIFO buffer, to be returned at the next call to this function
* or to avresample_read().
*
* If converting sample rate, there may be data remaining in the internal
* resampling delay buffer. avresample_get_delay() tells the number of remaining
* samples. To get this data as output, call avresample_convert() with NULL
* input.
*
* At the end of the conversion process, there may be data remaining in the
* internal FIFO buffer. avresample_available() tells the number of remaining
* samples. To get this data as output, either call avresample_convert() with
* NULL input or call avresample_read().
*
* @see avresample_get_out_samples()
* @see avresample_read()
* @see avresample_get_delay()
*
* @param avr audio resample context
* @param output output data pointers
* @param out_plane_size output plane size, in bytes.
* This can be 0 if unknown, but that will lead to
* optimized functions not being used directly on the
* output, which could slow down some conversions.
* @param out_samples maximum number of samples that the output buffer can hold
* @param input input data pointers
* @param in_plane_size input plane size, in bytes
* This can be 0 if unknown, but that will lead to
* optimized functions not being used directly on the
* input, which could slow down some conversions.
* @param in_samples number of input samples to convert
* @return number of samples written to the output buffer,
* not including converted samples added to the internal
* output FIFO
*/
attribute_deprecated
int avresample_convert(AVAudioResampleContext *avr, uint8_t **output,
int out_plane_size, int out_samples,
uint8_t * const *input, int in_plane_size,
int in_samples);
/**
*
* @deprecated use libswresample
*
* Return the number of samples currently in the resampling delay buffer.
*
* When resampling, there may be a delay between the input and output. Any
* unconverted samples in each call are stored internally in a delay buffer.
* This function allows the user to determine the current number of samples in
* the delay buffer, which can be useful for synchronization.
*
* @see avresample_convert()
*
* @param avr audio resample context
* @return number of samples currently in the resampling delay buffer
*/
attribute_deprecated
int avresample_get_delay(AVAudioResampleContext *avr);
/**
*
* @deprecated use libswresample
*
* Return the number of available samples in the output FIFO.
*
* During conversion, if the user does not specify an output buffer or
* specifies an output buffer that is smaller than what is needed, remaining
* samples that are not written to the output are stored to an internal FIFO
* buffer. The samples in the FIFO can be read with avresample_read() or
* avresample_convert().
*
* @see avresample_read()
* @see avresample_convert()
*
* @param avr audio resample context
* @return number of samples available for reading
*/
attribute_deprecated
int avresample_available(AVAudioResampleContext *avr);
/**
*
* @deprecated use libswresample
*
* Read samples from the output FIFO.
*
* During conversion, if the user does not specify an output buffer or
* specifies an output buffer that is smaller than what is needed, remaining
* samples that are not written to the output are stored to an internal FIFO
* buffer. This function can be used to read samples from that internal FIFO.
*
* @see avresample_available()
* @see avresample_convert()
*
* @param avr audio resample context
* @param output output data pointers. May be NULL, in which case
* nb_samples of data is discarded from output FIFO.
* @param nb_samples number of samples to read from the FIFO
* @return the number of samples written to output
*/
attribute_deprecated
int avresample_read(AVAudioResampleContext *avr, uint8_t **output, int nb_samples);
/**
*
* @deprecated use libswresample
*
* Convert the samples in the input AVFrame and write them to the output AVFrame.
*
* Input and output AVFrames must have channel_layout, sample_rate and format set.
*
* The upper bound on the number of output samples is obtained through
* avresample_get_out_samples().
*
* If the output AVFrame does not have the data pointers allocated the nb_samples
* field will be set using avresample_get_out_samples() and av_frame_get_buffer()
* is called to allocate the frame.
*
* The output AVFrame can be NULL or have fewer allocated samples than required.
* In this case, any remaining samples not written to the output will be added
* to an internal FIFO buffer, to be returned at the next call to this function
* or to avresample_convert() or to avresample_read().
*
* If converting sample rate, there may be data remaining in the internal
* resampling delay buffer. avresample_get_delay() tells the number of
* remaining samples. To get this data as output, call this function or
* avresample_convert() with NULL input.
*
* At the end of the conversion process, there may be data remaining in the
* internal FIFO buffer. avresample_available() tells the number of remaining
* samples. To get this data as output, either call this function or
* avresample_convert() with NULL input or call avresample_read().
*
* If the AVAudioResampleContext configuration does not match the output and
* input AVFrame settings the conversion does not take place and depending on
* which AVFrame is not matching AVERROR_OUTPUT_CHANGED, AVERROR_INPUT_CHANGED
* or AVERROR_OUTPUT_CHANGED|AVERROR_INPUT_CHANGED is returned.
*
* @see avresample_get_out_samples()
* @see avresample_available()
* @see avresample_convert()
* @see avresample_read()
* @see avresample_get_delay()
*
* @param avr audio resample context
* @param output output AVFrame
* @param input input AVFrame
* @return 0 on success, AVERROR on failure or nonmatching
* configuration.
*/
attribute_deprecated
int avresample_convert_frame(AVAudioResampleContext *avr,
AVFrame *output, AVFrame *input);
/**
*
* @deprecated use libswresample
*
* Configure or reconfigure the AVAudioResampleContext using the information
* provided by the AVFrames.
*
* The original resampling context is reset even on failure.
* The function calls avresample_close() internally if the context is open.
*
* @see avresample_open();
* @see avresample_close();
*
* @param avr audio resample context
* @param out output AVFrame
* @param in input AVFrame
* @return 0 on success, AVERROR on failure.
*/
attribute_deprecated
int avresample_config(AVAudioResampleContext *avr, AVFrame *out, AVFrame *in);
/**
* @}
*/
#endif /* AVRESAMPLE_AVRESAMPLE_H */

View File

@ -0,0 +1,50 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVRESAMPLE_VERSION_H
#define AVRESAMPLE_VERSION_H
/**
* @file
* @ingroup lavr
* Libavresample version macros.
*/
#include "libavutil/version.h"
#define LIBAVRESAMPLE_VERSION_MAJOR 4
#define LIBAVRESAMPLE_VERSION_MINOR 0
#define LIBAVRESAMPLE_VERSION_MICRO 0
#define LIBAVRESAMPLE_VERSION_INT AV_VERSION_INT(LIBAVRESAMPLE_VERSION_MAJOR, \
LIBAVRESAMPLE_VERSION_MINOR, \
LIBAVRESAMPLE_VERSION_MICRO)
#define LIBAVRESAMPLE_VERSION AV_VERSION(LIBAVRESAMPLE_VERSION_MAJOR, \
LIBAVRESAMPLE_VERSION_MINOR, \
LIBAVRESAMPLE_VERSION_MICRO)
#define LIBAVRESAMPLE_BUILD LIBAVRESAMPLE_VERSION_INT
#define LIBAVRESAMPLE_IDENT "Lavr" AV_STRINGIFY(LIBAVRESAMPLE_VERSION)
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*/
#endif /* AVRESAMPLE_VERSION_H */

View File

@ -0,0 +1,60 @@
/*
* copyright (c) 2006 Mans Rullgard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* @ingroup lavu_adler32
* Public header for Adler-32 hash function implementation.
*/
#ifndef AVUTIL_ADLER32_H
#define AVUTIL_ADLER32_H
#include <stdint.h>
#include "attributes.h"
/**
* @defgroup lavu_adler32 Adler-32
* @ingroup lavu_hash
* Adler-32 hash function implementation.
*
* @{
*/
/**
* Calculate the Adler32 checksum of a buffer.
*
* Passing the return value to a subsequent av_adler32_update() call
* allows the checksum of multiple buffers to be calculated as though
* they were concatenated.
*
* @param adler initial checksum value
* @param buf pointer to input buffer
* @param len size of input buffer
* @return updated checksum
*/
unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf,
unsigned int len) av_pure;
/**
* @}
*/
#endif /* AVUTIL_ADLER32_H */

View File

@ -0,0 +1,65 @@
/*
* copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_AES_H
#define AVUTIL_AES_H
#include <stdint.h>
#include "attributes.h"
#include "version.h"
/**
* @defgroup lavu_aes AES
* @ingroup lavu_crypto
* @{
*/
extern const int av_aes_size;
struct AVAES;
/**
* Allocate an AVAES context.
*/
struct AVAES *av_aes_alloc(void);
/**
* Initialize an AVAES context.
* @param key_bits 128, 192 or 256
* @param decrypt 0 for encryption, 1 for decryption
*/
int av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt);
/**
* Encrypt or decrypt a buffer using a previously initialized context.
* @param count number of 16 byte blocks
* @param dst destination array, can be equal to src
* @param src source array, can be equal to dst
* @param iv initialization vector for CBC mode, if NULL then ECB will be used
* @param decrypt 0 for encryption, 1 for decryption
*/
void av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt);
/**
* @}
*/
#endif /* AVUTIL_AES_H */

View File

@ -0,0 +1,88 @@
/*
* AES-CTR cipher
* Copyright (c) 2015 Eran Kornblau <erankor at gmail dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_AES_CTR_H
#define AVUTIL_AES_CTR_H
#include <stdint.h>
#include "attributes.h"
#include "version.h"
#define AES_CTR_KEY_SIZE (16)
#define AES_CTR_IV_SIZE (8)
struct AVAESCTR;
/**
* Allocate an AVAESCTR context.
*/
struct AVAESCTR *av_aes_ctr_alloc(void);
/**
* Initialize an AVAESCTR context.
* @param key encryption key, must have a length of AES_CTR_KEY_SIZE
*/
int av_aes_ctr_init(struct AVAESCTR *a, const uint8_t *key);
/**
* Release an AVAESCTR context.
*/
void av_aes_ctr_free(struct AVAESCTR *a);
/**
* Process a buffer using a previously initialized context.
* @param dst destination array, can be equal to src
* @param src source array, can be equal to dst
* @param size the size of src and dst
*/
void av_aes_ctr_crypt(struct AVAESCTR *a, uint8_t *dst, const uint8_t *src, int size);
/**
* Get the current iv
*/
const uint8_t* av_aes_ctr_get_iv(struct AVAESCTR *a);
/**
* Generate a random iv
*/
void av_aes_ctr_set_random_iv(struct AVAESCTR *a);
/**
* Forcefully change the 8-byte iv
*/
void av_aes_ctr_set_iv(struct AVAESCTR *a, const uint8_t* iv);
/**
* Forcefully change the "full" 16-byte iv, including the counter
*/
void av_aes_ctr_set_full_iv(struct AVAESCTR *a, const uint8_t* iv);
/**
* Increment the top 64 bit of the iv (performed after each frame)
*/
void av_aes_ctr_increment_iv(struct AVAESCTR *a);
/**
* @}
*/
#endif /* AVUTIL_AES_CTR_H */

View File

@ -0,0 +1,167 @@
/*
* copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Macro definitions for various function/variable attributes
*/
#ifndef AVUTIL_ATTRIBUTES_H
#define AVUTIL_ATTRIBUTES_H
#ifdef __GNUC__
# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y))
# define AV_GCC_VERSION_AT_MOST(x,y) (__GNUC__ < (x) || __GNUC__ == (x) && __GNUC_MINOR__ <= (y))
#else
# define AV_GCC_VERSION_AT_LEAST(x,y) 0
# define AV_GCC_VERSION_AT_MOST(x,y) 0
#endif
#ifndef av_always_inline
#if AV_GCC_VERSION_AT_LEAST(3,1)
# define av_always_inline __attribute__((always_inline)) inline
#elif defined(_MSC_VER)
# define av_always_inline __forceinline
#else
# define av_always_inline inline
#endif
#endif
#ifndef av_extern_inline
#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__)
# define av_extern_inline extern inline
#else
# define av_extern_inline inline
#endif
#endif
#if AV_GCC_VERSION_AT_LEAST(3,4)
# define av_warn_unused_result __attribute__((warn_unused_result))
#else
# define av_warn_unused_result
#endif
#if AV_GCC_VERSION_AT_LEAST(3,1)
# define av_noinline __attribute__((noinline))
#elif defined(_MSC_VER)
# define av_noinline __declspec(noinline)
#else
# define av_noinline
#endif
#if AV_GCC_VERSION_AT_LEAST(3,1) || defined(__clang__)
# define av_pure __attribute__((pure))
#else
# define av_pure
#endif
#if AV_GCC_VERSION_AT_LEAST(2,6) || defined(__clang__)
# define av_const __attribute__((const))
#else
# define av_const
#endif
#if AV_GCC_VERSION_AT_LEAST(4,3) || defined(__clang__)
# define av_cold __attribute__((cold))
#else
# define av_cold
#endif
#if AV_GCC_VERSION_AT_LEAST(4,1) && !defined(__llvm__)
# define av_flatten __attribute__((flatten))
#else
# define av_flatten
#endif
#if AV_GCC_VERSION_AT_LEAST(3,1)
# define attribute_deprecated __attribute__((deprecated))
#elif defined(_MSC_VER)
# define attribute_deprecated __declspec(deprecated)
#else
# define attribute_deprecated
#endif
/**
* Disable warnings about deprecated features
* This is useful for sections of code kept for backward compatibility and
* scheduled for removal.
*/
#ifndef AV_NOWARN_DEPRECATED
#if AV_GCC_VERSION_AT_LEAST(4,6)
# define AV_NOWARN_DEPRECATED(code) \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \
code \
_Pragma("GCC diagnostic pop")
#elif defined(_MSC_VER)
# define AV_NOWARN_DEPRECATED(code) \
__pragma(warning(push)) \
__pragma(warning(disable : 4996)) \
code; \
__pragma(warning(pop))
#else
# define AV_NOWARN_DEPRECATED(code) code
#endif
#endif
#if defined(__GNUC__) || defined(__clang__)
# define av_unused __attribute__((unused))
#else
# define av_unused
#endif
/**
* Mark a variable as used and prevent the compiler from optimizing it
* away. This is useful for variables accessed only from inline
* assembler without the compiler being aware.
*/
#if AV_GCC_VERSION_AT_LEAST(3,1) || defined(__clang__)
# define av_used __attribute__((used))
#else
# define av_used
#endif
#if AV_GCC_VERSION_AT_LEAST(3,3) || defined(__clang__)
# define av_alias __attribute__((may_alias))
#else
# define av_alias
#endif
#if (defined(__GNUC__) || defined(__clang__)) && !defined(__INTEL_COMPILER)
# define av_uninit(x) x=x
#else
# define av_uninit(x) x
#endif
#if defined(__GNUC__) || defined(__clang__)
# define av_builtin_constant_p __builtin_constant_p
# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos)))
#else
# define av_builtin_constant_p(x) 0
# define av_printf_format(fmtpos, attrpos)
#endif
#if AV_GCC_VERSION_AT_LEAST(2,5) || defined(__clang__)
# define av_noreturn __attribute__((noreturn))
#else
# define av_noreturn
#endif
#endif /* AVUTIL_ATTRIBUTES_H */

View File

@ -0,0 +1,187 @@
/*
* Audio FIFO
* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Audio FIFO Buffer
*/
#ifndef AVUTIL_AUDIO_FIFO_H
#define AVUTIL_AUDIO_FIFO_H
#include "avutil.h"
#include "fifo.h"
#include "samplefmt.h"
/**
* @addtogroup lavu_audio
* @{
*
* @defgroup lavu_audiofifo Audio FIFO Buffer
* @{
*/
/**
* Context for an Audio FIFO Buffer.
*
* - Operates at the sample level rather than the byte level.
* - Supports multiple channels with either planar or packed sample format.
* - Automatic reallocation when writing to a full buffer.
*/
typedef struct AVAudioFifo AVAudioFifo;
/**
* Free an AVAudioFifo.
*
* @param af AVAudioFifo to free
*/
void av_audio_fifo_free(AVAudioFifo *af);
/**
* Allocate an AVAudioFifo.
*
* @param sample_fmt sample format
* @param channels number of channels
* @param nb_samples initial allocation size, in samples
* @return newly allocated AVAudioFifo, or NULL on error
*/
AVAudioFifo *av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels,
int nb_samples);
/**
* Reallocate an AVAudioFifo.
*
* @param af AVAudioFifo to reallocate
* @param nb_samples new allocation size, in samples
* @return 0 if OK, or negative AVERROR code on failure
*/
av_warn_unused_result
int av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples);
/**
* Write data to an AVAudioFifo.
*
* The AVAudioFifo will be reallocated automatically if the available space
* is less than nb_samples.
*
* @see enum AVSampleFormat
* The documentation for AVSampleFormat describes the data layout.
*
* @param af AVAudioFifo to write to
* @param data audio data plane pointers
* @param nb_samples number of samples to write
* @return number of samples actually written, or negative AVERROR
* code on failure. If successful, the number of samples
* actually written will always be nb_samples.
*/
int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples);
/**
* Peek data from an AVAudioFifo.
*
* @see enum AVSampleFormat
* The documentation for AVSampleFormat describes the data layout.
*
* @param af AVAudioFifo to read from
* @param data audio data plane pointers
* @param nb_samples number of samples to peek
* @return number of samples actually peek, or negative AVERROR code
* on failure. The number of samples actually peek will not
* be greater than nb_samples, and will only be less than
* nb_samples if av_audio_fifo_size is less than nb_samples.
*/
int av_audio_fifo_peek(AVAudioFifo *af, void **data, int nb_samples);
/**
* Peek data from an AVAudioFifo.
*
* @see enum AVSampleFormat
* The documentation for AVSampleFormat describes the data layout.
*
* @param af AVAudioFifo to read from
* @param data audio data plane pointers
* @param nb_samples number of samples to peek
* @param offset offset from current read position
* @return number of samples actually peek, or negative AVERROR code
* on failure. The number of samples actually peek will not
* be greater than nb_samples, and will only be less than
* nb_samples if av_audio_fifo_size is less than nb_samples.
*/
int av_audio_fifo_peek_at(AVAudioFifo *af, void **data, int nb_samples, int offset);
/**
* Read data from an AVAudioFifo.
*
* @see enum AVSampleFormat
* The documentation for AVSampleFormat describes the data layout.
*
* @param af AVAudioFifo to read from
* @param data audio data plane pointers
* @param nb_samples number of samples to read
* @return number of samples actually read, or negative AVERROR code
* on failure. The number of samples actually read will not
* be greater than nb_samples, and will only be less than
* nb_samples if av_audio_fifo_size is less than nb_samples.
*/
int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples);
/**
* Drain data from an AVAudioFifo.
*
* Removes the data without reading it.
*
* @param af AVAudioFifo to drain
* @param nb_samples number of samples to drain
* @return 0 if OK, or negative AVERROR code on failure
*/
int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples);
/**
* Reset the AVAudioFifo buffer.
*
* This empties all data in the buffer.
*
* @param af AVAudioFifo to reset
*/
void av_audio_fifo_reset(AVAudioFifo *af);
/**
* Get the current number of samples in the AVAudioFifo available for reading.
*
* @param af the AVAudioFifo to query
* @return number of samples available for reading
*/
int av_audio_fifo_size(AVAudioFifo *af);
/**
* Get the current number of samples in the AVAudioFifo available for writing.
*
* @param af the AVAudioFifo to query
* @return number of samples available for writing
*/
int av_audio_fifo_space(AVAudioFifo *af);
/**
* @}
* @}
*/
#endif /* AVUTIL_AUDIO_FIFO_H */

View File

@ -0,0 +1,6 @@
#include "version.h"
#if FF_API_AUDIOCONVERT
#include "channel_layout.h"
#endif

View File

@ -0,0 +1,75 @@
/*
* copyright (c) 2010 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* simple assert() macros that are a bit more flexible than ISO C assert().
* @author Michael Niedermayer <michaelni@gmx.at>
*/
#ifndef AVUTIL_AVASSERT_H
#define AVUTIL_AVASSERT_H
#include <stdlib.h>
#include "avutil.h"
#include "log.h"
/**
* assert() equivalent, that is always enabled.
*/
#define av_assert0(cond) do { \
if (!(cond)) { \
av_log(NULL, AV_LOG_PANIC, "Assertion %s failed at %s:%d\n", \
AV_STRINGIFY(cond), __FILE__, __LINE__); \
abort(); \
} \
} while (0)
/**
* assert() equivalent, that does not lie in speed critical code.
* These asserts() thus can be enabled without fearing speed loss.
*/
#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0
#define av_assert1(cond) av_assert0(cond)
#else
#define av_assert1(cond) ((void)0)
#endif
/**
* assert() equivalent, that does lie in speed critical code.
*/
#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
#define av_assert2(cond) av_assert0(cond)
#define av_assert2_fpu() av_assert0_fpu()
#else
#define av_assert2(cond) ((void)0)
#define av_assert2_fpu() ((void)0)
#endif
/**
* Assert that floating point operations can be executed.
*
* This will av_assert0() that the cpu is not in MMX state on X86
*/
void av_assert0_fpu(void);
#endif /* AVUTIL_AVASSERT_H */

View File

@ -0,0 +1,6 @@
/* Generated by ffmpeg configure */
#ifndef AVUTIL_AVCONFIG_H
#define AVUTIL_AVCONFIG_H
#define AV_HAVE_BIGENDIAN 0
#define AV_HAVE_FAST_UNALIGNED 0
#endif /* AVUTIL_AVCONFIG_H */

View File

@ -0,0 +1,413 @@
/*
* Copyright (c) 2007 Mans Rullgard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_AVSTRING_H
#define AVUTIL_AVSTRING_H
#include <stddef.h>
#include <stdint.h>
#include "attributes.h"
/**
* @addtogroup lavu_string
* @{
*/
/**
* Return non-zero if pfx is a prefix of str. If it is, *ptr is set to
* the address of the first character in str after the prefix.
*
* @param str input string
* @param pfx prefix to test
* @param ptr updated if the prefix is matched inside str
* @return non-zero if the prefix matches, zero otherwise
*/
int av_strstart(const char *str, const char *pfx, const char **ptr);
/**
* Return non-zero if pfx is a prefix of str independent of case. If
* it is, *ptr is set to the address of the first character in str
* after the prefix.
*
* @param str input string
* @param pfx prefix to test
* @param ptr updated if the prefix is matched inside str
* @return non-zero if the prefix matches, zero otherwise
*/
int av_stristart(const char *str, const char *pfx, const char **ptr);
/**
* Locate the first case-independent occurrence in the string haystack
* of the string needle. A zero-length string needle is considered to
* match at the start of haystack.
*
* This function is a case-insensitive version of the standard strstr().
*
* @param haystack string to search in
* @param needle string to search for
* @return pointer to the located match within haystack
* or a null pointer if no match
*/
char *av_stristr(const char *haystack, const char *needle);
/**
* Locate the first occurrence of the string needle in the string haystack
* where not more than hay_length characters are searched. A zero-length
* string needle is considered to match at the start of haystack.
*
* This function is a length-limited version of the standard strstr().
*
* @param haystack string to search in
* @param needle string to search for
* @param hay_length length of string to search in
* @return pointer to the located match within haystack
* or a null pointer if no match
*/
char *av_strnstr(const char *haystack, const char *needle, size_t hay_length);
/**
* Copy the string src to dst, but no more than size - 1 bytes, and
* null-terminate dst.
*
* This function is the same as BSD strlcpy().
*
* @param dst destination buffer
* @param src source string
* @param size size of destination buffer
* @return the length of src
*
* @warning since the return value is the length of src, src absolutely
* _must_ be a properly 0-terminated string, otherwise this will read beyond
* the end of the buffer and possibly crash.
*/
size_t av_strlcpy(char *dst, const char *src, size_t size);
/**
* Append the string src to the string dst, but to a total length of
* no more than size - 1 bytes, and null-terminate dst.
*
* This function is similar to BSD strlcat(), but differs when
* size <= strlen(dst).
*
* @param dst destination buffer
* @param src source string
* @param size size of destination buffer
* @return the total length of src and dst
*
* @warning since the return value use the length of src and dst, these
* absolutely _must_ be a properly 0-terminated strings, otherwise this
* will read beyond the end of the buffer and possibly crash.
*/
size_t av_strlcat(char *dst, const char *src, size_t size);
/**
* Append output to a string, according to a format. Never write out of
* the destination buffer, and always put a terminating 0 within
* the buffer.
* @param dst destination buffer (string to which the output is
* appended)
* @param size total size of the destination buffer
* @param fmt printf-compatible format string, specifying how the
* following parameters are used
* @return the length of the string that would have been generated
* if enough space had been available
*/
size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4);
/**
* Get the count of continuous non zero chars starting from the beginning.
*
* @param len maximum number of characters to check in the string, that
* is the maximum value which is returned by the function
*/
static inline size_t av_strnlen(const char *s, size_t len)
{
size_t i;
for (i = 0; i < len && s[i]; i++)
;
return i;
}
/**
* Print arguments following specified format into a large enough auto
* allocated buffer. It is similar to GNU asprintf().
* @param fmt printf-compatible format string, specifying how the
* following parameters are used.
* @return the allocated string
* @note You have to free the string yourself with av_free().
*/
char *av_asprintf(const char *fmt, ...) av_printf_format(1, 2);
/**
* Convert a number to an av_malloced string.
*/
char *av_d2str(double d);
/**
* Unescape the given string until a non escaped terminating char,
* and return the token corresponding to the unescaped string.
*
* The normal \ and ' escaping is supported. Leading and trailing
* whitespaces are removed, unless they are escaped with '\' or are
* enclosed between ''.
*
* @param buf the buffer to parse, buf will be updated to point to the
* terminating char
* @param term a 0-terminated list of terminating chars
* @return the malloced unescaped string, which must be av_freed by
* the user, NULL in case of allocation failure
*/
char *av_get_token(const char **buf, const char *term);
/**
* Split the string into several tokens which can be accessed by
* successive calls to av_strtok().
*
* A token is defined as a sequence of characters not belonging to the
* set specified in delim.
*
* On the first call to av_strtok(), s should point to the string to
* parse, and the value of saveptr is ignored. In subsequent calls, s
* should be NULL, and saveptr should be unchanged since the previous
* call.
*
* This function is similar to strtok_r() defined in POSIX.1.
*
* @param s the string to parse, may be NULL
* @param delim 0-terminated list of token delimiters, must be non-NULL
* @param saveptr user-provided pointer which points to stored
* information necessary for av_strtok() to continue scanning the same
* string. saveptr is updated to point to the next character after the
* first delimiter found, or to NULL if the string was terminated
* @return the found token, or NULL when no token is found
*/
char *av_strtok(char *s, const char *delim, char **saveptr);
/**
* Locale-independent conversion of ASCII isdigit.
*/
static inline av_const int av_isdigit(int c)
{
return c >= '0' && c <= '9';
}
/**
* Locale-independent conversion of ASCII isgraph.
*/
static inline av_const int av_isgraph(int c)
{
return c > 32 && c < 127;
}
/**
* Locale-independent conversion of ASCII isspace.
*/
static inline av_const int av_isspace(int c)
{
return c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t' ||
c == '\v';
}
/**
* Locale-independent conversion of ASCII characters to uppercase.
*/
static inline av_const int av_toupper(int c)
{
if (c >= 'a' && c <= 'z')
c ^= 0x20;
return c;
}
/**
* Locale-independent conversion of ASCII characters to lowercase.
*/
static inline av_const int av_tolower(int c)
{
if (c >= 'A' && c <= 'Z')
c ^= 0x20;
return c;
}
/**
* Locale-independent conversion of ASCII isxdigit.
*/
static inline av_const int av_isxdigit(int c)
{
c = av_tolower(c);
return av_isdigit(c) || (c >= 'a' && c <= 'f');
}
/**
* Locale-independent case-insensitive compare.
* @note This means only ASCII-range characters are case-insensitive
*/
int av_strcasecmp(const char *a, const char *b);
/**
* Locale-independent case-insensitive compare.
* @note This means only ASCII-range characters are case-insensitive
*/
int av_strncasecmp(const char *a, const char *b, size_t n);
/**
* Locale-independent strings replace.
* @note This means only ASCII-range characters are replace
*/
char *av_strireplace(const char *str, const char *from, const char *to);
/**
* Thread safe basename.
* @param path the path, on DOS both \ and / are considered separators.
* @return pointer to the basename substring.
*/
const char *av_basename(const char *path);
/**
* Thread safe dirname.
* @param path the path, on DOS both \ and / are considered separators.
* @return the path with the separator replaced by the string terminator or ".".
* @note the function may change the input string.
*/
const char *av_dirname(char *path);
/**
* Match instances of a name in a comma-separated list of names.
* List entries are checked from the start to the end of the names list,
* the first match ends further processing. If an entry prefixed with '-'
* matches, then 0 is returned. The "ALL" list entry is considered to
* match all names.
*
* @param name Name to look for.
* @param names List of names.
* @return 1 on match, 0 otherwise.
*/
int av_match_name(const char *name, const char *names);
/**
* Append path component to the existing path.
* Path separator '/' is placed between when needed.
* Resulting string have to be freed with av_free().
* @param path base path
* @param component component to be appended
* @return new path or NULL on error.
*/
char *av_append_path_component(const char *path, const char *component);
enum AVEscapeMode {
AV_ESCAPE_MODE_AUTO, ///< Use auto-selected escaping mode.
AV_ESCAPE_MODE_BACKSLASH, ///< Use backslash escaping.
AV_ESCAPE_MODE_QUOTE, ///< Use single-quote escaping.
};
/**
* Consider spaces special and escape them even in the middle of the
* string.
*
* This is equivalent to adding the whitespace characters to the special
* characters lists, except it is guaranteed to use the exact same list
* of whitespace characters as the rest of libavutil.
*/
#define AV_ESCAPE_FLAG_WHITESPACE (1 << 0)
/**
* Escape only specified special characters.
* Without this flag, escape also any characters that may be considered
* special by av_get_token(), such as the single quote.
*/
#define AV_ESCAPE_FLAG_STRICT (1 << 1)
/**
* Escape string in src, and put the escaped string in an allocated
* string in *dst, which must be freed with av_free().
*
* @param dst pointer where an allocated string is put
* @param src string to escape, must be non-NULL
* @param special_chars string containing the special characters which
* need to be escaped, can be NULL
* @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros.
* Any unknown value for mode will be considered equivalent to
* AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without
* notice.
* @param flags flags which control how to escape, see AV_ESCAPE_FLAG_ macros
* @return the length of the allocated string, or a negative error code in case of error
* @see av_bprint_escape()
*/
av_warn_unused_result
int av_escape(char **dst, const char *src, const char *special_chars,
enum AVEscapeMode mode, int flags);
#define AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES 1 ///< accept codepoints over 0x10FFFF
#define AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS 2 ///< accept non-characters - 0xFFFE and 0xFFFF
#define AV_UTF8_FLAG_ACCEPT_SURROGATES 4 ///< accept UTF-16 surrogates codes
#define AV_UTF8_FLAG_EXCLUDE_XML_INVALID_CONTROL_CODES 8 ///< exclude control codes not accepted by XML
#define AV_UTF8_FLAG_ACCEPT_ALL \
AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES|AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS|AV_UTF8_FLAG_ACCEPT_SURROGATES
/**
* Read and decode a single UTF-8 code point (character) from the
* buffer in *buf, and update *buf to point to the next byte to
* decode.
*
* In case of an invalid byte sequence, the pointer will be updated to
* the next byte after the invalid sequence and the function will
* return an error code.
*
* Depending on the specified flags, the function will also fail in
* case the decoded code point does not belong to a valid range.
*
* @note For speed-relevant code a carefully implemented use of
* GET_UTF8() may be preferred.
*
* @param codep pointer used to return the parsed code in case of success.
* The value in *codep is set even in case the range check fails.
* @param bufp pointer to the address the first byte of the sequence
* to decode, updated by the function to point to the
* byte next after the decoded sequence
* @param buf_end pointer to the end of the buffer, points to the next
* byte past the last in the buffer. This is used to
* avoid buffer overreads (in case of an unfinished
* UTF-8 sequence towards the end of the buffer).
* @param flags a collection of AV_UTF8_FLAG_* flags
* @return >= 0 in case a sequence was successfully read, a negative
* value in case of invalid sequence
*/
av_warn_unused_result
int av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end,
unsigned int flags);
/**
* Check if a name is in a list.
* @returns 0 if not found, or the 1 based index where it has been found in the
* list.
*/
int av_match_list(const char *name, const char *list, char separator);
/**
* See libc sscanf manual for more information.
* Locale-independent sscanf implementation.
*/
int av_sscanf(const char *string, const char *format, ...);
/**
* @}
*/
#endif /* AVUTIL_AVSTRING_H */

View File

@ -0,0 +1,365 @@
/*
* copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_AVUTIL_H
#define AVUTIL_AVUTIL_H
/**
* @file
* @ingroup lavu
* Convenience header that includes @ref lavu "libavutil"'s core.
*/
/**
* @mainpage
*
* @section ffmpeg_intro Introduction
*
* This document describes the usage of the different libraries
* provided by FFmpeg.
*
* @li @ref libavc "libavcodec" encoding/decoding library
* @li @ref lavfi "libavfilter" graph-based frame editing library
* @li @ref libavf "libavformat" I/O and muxing/demuxing library
* @li @ref lavd "libavdevice" special devices muxing/demuxing library
* @li @ref lavu "libavutil" common utility library
* @li @ref lswr "libswresample" audio resampling, format conversion and mixing
* @li @ref lpp "libpostproc" post processing library
* @li @ref libsws "libswscale" color conversion and scaling library
*
* @section ffmpeg_versioning Versioning and compatibility
*
* Each of the FFmpeg libraries contains a version.h header, which defines a
* major, minor and micro version number with the
* <em>LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO}</em> macros. The major version
* number is incremented with backward incompatible changes - e.g. removing
* parts of the public API, reordering public struct members, etc. The minor
* version number is incremented for backward compatible API changes or major
* new features - e.g. adding a new public function or a new decoder. The micro
* version number is incremented for smaller changes that a calling program
* might still want to check for - e.g. changing behavior in a previously
* unspecified situation.
*
* FFmpeg guarantees backward API and ABI compatibility for each library as long
* as its major version number is unchanged. This means that no public symbols
* will be removed or renamed. Types and names of the public struct members and
* values of public macros and enums will remain the same (unless they were
* explicitly declared as not part of the public API). Documented behavior will
* not change.
*
* In other words, any correct program that works with a given FFmpeg snapshot
* should work just as well without any changes with any later snapshot with the
* same major versions. This applies to both rebuilding the program against new
* FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program
* links against.
*
* However, new public symbols may be added and new members may be appended to
* public structs whose size is not part of public ABI (most public structs in
* FFmpeg). New macros and enum values may be added. Behavior in undocumented
* situations may change slightly (and be documented). All those are accompanied
* by an entry in doc/APIchanges and incrementing either the minor or micro
* version number.
*/
/**
* @defgroup lavu libavutil
* Common code shared across all FFmpeg libraries.
*
* @note
* libavutil is designed to be modular. In most cases, in order to use the
* functions provided by one component of libavutil you must explicitly include
* the specific header containing that feature. If you are only using
* media-related components, you could simply include libavutil/avutil.h, which
* brings in most of the "core" components.
*
* @{
*
* @defgroup lavu_crypto Crypto and Hashing
*
* @{
* @}
*
* @defgroup lavu_math Mathematics
* @{
*
* @}
*
* @defgroup lavu_string String Manipulation
*
* @{
*
* @}
*
* @defgroup lavu_mem Memory Management
*
* @{
*
* @}
*
* @defgroup lavu_data Data Structures
* @{
*
* @}
*
* @defgroup lavu_video Video related
*
* @{
*
* @}
*
* @defgroup lavu_audio Audio related
*
* @{
*
* @}
*
* @defgroup lavu_error Error Codes
*
* @{
*
* @}
*
* @defgroup lavu_log Logging Facility
*
* @{
*
* @}
*
* @defgroup lavu_misc Other
*
* @{
*
* @defgroup preproc_misc Preprocessor String Macros
*
* @{
*
* @}
*
* @defgroup version_utils Library Version Macros
*
* @{
*
* @}
*/
/**
* @addtogroup lavu_ver
* @{
*/
/**
* Return the LIBAVUTIL_VERSION_INT constant.
*/
unsigned avutil_version(void);
/**
* Return an informative version string. This usually is the actual release
* version number or a git commit description. This string has no fixed format
* and can change any time. It should never be parsed by code.
*/
const char *av_version_info(void);
/**
* Return the libavutil build-time configuration.
*/
const char *avutil_configuration(void);
/**
* Return the libavutil license.
*/
const char *avutil_license(void);
/**
* @}
*/
/**
* @addtogroup lavu_media Media Type
* @brief Media Type
*/
enum AVMediaType {
AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA
AVMEDIA_TYPE_VIDEO,
AVMEDIA_TYPE_AUDIO,
AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous
AVMEDIA_TYPE_SUBTITLE,
AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse
AVMEDIA_TYPE_NB
};
/**
* Return a string describing the media_type enum, NULL if media_type
* is unknown.
*/
const char *av_get_media_type_string(enum AVMediaType media_type);
/**
* @defgroup lavu_const Constants
* @{
*
* @defgroup lavu_enc Encoding specific
*
* @note those definition should move to avcodec
* @{
*/
#define FF_LAMBDA_SHIFT 7
#define FF_LAMBDA_SCALE (1<<FF_LAMBDA_SHIFT)
#define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda
#define FF_LAMBDA_MAX (256*128-1)
#define FF_QUALITY_SCALE FF_LAMBDA_SCALE //FIXME maybe remove
/**
* @}
* @defgroup lavu_time Timestamp specific
*
* FFmpeg internal timebase and timestamp definitions
*
* @{
*/
/**
* @brief Undefined timestamp value
*
* Usually reported by demuxer that work on containers that do not provide
* either pts or dts.
*/
#define AV_NOPTS_VALUE ((int64_t)UINT64_C(0x8000000000000000))
/**
* Internal time base represented as integer
*/
#define AV_TIME_BASE 1000000
/**
* Internal time base represented as fractional value
*/
#define AV_TIME_BASE_Q (AVRational){1, AV_TIME_BASE}
/**
* @}
* @}
* @defgroup lavu_picture Image related
*
* AVPicture types, pixel formats and basic image planes manipulation.
*
* @{
*/
enum AVPictureType {
AV_PICTURE_TYPE_NONE = 0, ///< Undefined
AV_PICTURE_TYPE_I, ///< Intra
AV_PICTURE_TYPE_P, ///< Predicted
AV_PICTURE_TYPE_B, ///< Bi-dir predicted
AV_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG-4
AV_PICTURE_TYPE_SI, ///< Switching Intra
AV_PICTURE_TYPE_SP, ///< Switching Predicted
AV_PICTURE_TYPE_BI, ///< BI type
};
/**
* Return a single letter to describe the given picture type
* pict_type.
*
* @param[in] pict_type the picture type @return a single character
* representing the picture type, '?' if pict_type is unknown
*/
char av_get_picture_type_char(enum AVPictureType pict_type);
/**
* @}
*/
#include "common.h"
#include "error.h"
#include "rational.h"
#include "version.h"
#include "macros.h"
#include "mathematics.h"
#include "log.h"
#include "pixfmt.h"
/**
* Return x default pointer in case p is NULL.
*/
static inline void *av_x_if_null(const void *p, const void *x)
{
return (void *)(intptr_t)(p ? p : x);
}
/**
* Compute the length of an integer list.
*
* @param elsize size in bytes of each list element (only 1, 2, 4 or 8)
* @param term list terminator (usually 0 or -1)
* @param list pointer to the list
* @return length of the list, in elements, not counting the terminator
*/
unsigned av_int_list_length_for_size(unsigned elsize,
const void *list, uint64_t term) av_pure;
/**
* Compute the length of an integer list.
*
* @param term list terminator (usually 0 or -1)
* @param list pointer to the list
* @return length of the list, in elements, not counting the terminator
*/
#define av_int_list_length(list, term) \
av_int_list_length_for_size(sizeof(*(list)), list, term)
/**
* Open a file using a UTF-8 filename.
* The API of this function matches POSIX fopen(), errors are returned through
* errno.
*/
FILE *av_fopen_utf8(const char *path, const char *mode);
/**
* Return the fractional representation of the internal time base.
*/
AVRational av_get_time_base_q(void);
#define AV_FOURCC_MAX_STRING_SIZE 32
#define av_fourcc2str(fourcc) av_fourcc_make_string((char[AV_FOURCC_MAX_STRING_SIZE]){0}, fourcc)
/**
* Fill the provided buffer with a string containing a FourCC (four-character
* code) representation.
*
* @param buf a buffer with size in bytes of at least AV_FOURCC_MAX_STRING_SIZE
* @param fourcc the fourcc to represent
* @return the buffer in input
*/
char *av_fourcc_make_string(char *buf, uint32_t fourcc);
/**
* @}
* @}
*/
#endif /* AVUTIL_AVUTIL_H */

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_BASE64_H
#define AVUTIL_BASE64_H
#include <stdint.h>
/**
* @defgroup lavu_base64 Base64
* @ingroup lavu_crypto
* @{
*/
/**
* Decode a base64-encoded string.
*
* @param out buffer for decoded data
* @param in null-terminated input string
* @param out_size size in bytes of the out buffer, must be at
* least 3/4 of the length of in, that is AV_BASE64_DECODE_SIZE(strlen(in))
* @return number of bytes written, or a negative value in case of
* invalid input
*/
int av_base64_decode(uint8_t *out, const char *in, int out_size);
/**
* Calculate the output size in bytes needed to decode a base64 string
* with length x to a data buffer.
*/
#define AV_BASE64_DECODE_SIZE(x) ((x) * 3LL / 4)
/**
* Encode data to base64 and null-terminate.
*
* @param out buffer for encoded data
* @param out_size size in bytes of the out buffer (including the
* null terminator), must be at least AV_BASE64_SIZE(in_size)
* @param in input buffer containing the data to encode
* @param in_size size in bytes of the in buffer
* @return out or NULL in case of error
*/
char *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size);
/**
* Calculate the output size needed to base64-encode x bytes to a
* null-terminated string.
*/
#define AV_BASE64_SIZE(x) (((x)+2) / 3 * 4 + 1)
/**
* @}
*/
#endif /* AVUTIL_BASE64_H */

View File

@ -0,0 +1,82 @@
/*
* Blowfish algorithm
* Copyright (c) 2012 Samuel Pitoiset
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_BLOWFISH_H
#define AVUTIL_BLOWFISH_H
#include <stdint.h>
/**
* @defgroup lavu_blowfish Blowfish
* @ingroup lavu_crypto
* @{
*/
#define AV_BF_ROUNDS 16
typedef struct AVBlowfish {
uint32_t p[AV_BF_ROUNDS + 2];
uint32_t s[4][256];
} AVBlowfish;
/**
* Allocate an AVBlowfish context.
*/
AVBlowfish *av_blowfish_alloc(void);
/**
* Initialize an AVBlowfish context.
*
* @param ctx an AVBlowfish context
* @param key a key
* @param key_len length of the key
*/
void av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len);
/**
* Encrypt or decrypt a buffer using a previously initialized context.
*
* @param ctx an AVBlowfish context
* @param xl left four bytes halves of input to be encrypted
* @param xr right four bytes halves of input to be encrypted
* @param decrypt 0 for encryption, 1 for decryption
*/
void av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr,
int decrypt);
/**
* Encrypt or decrypt a buffer using a previously initialized context.
*
* @param ctx an AVBlowfish context
* @param dst destination array, can be equal to src
* @param src source array, can be equal to dst
* @param count number of 8 byte blocks
* @param iv initialization vector for CBC mode, if NULL ECB will be used
* @param decrypt 0 for encryption, 1 for decryption
*/
void av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src,
int count, uint8_t *iv, int decrypt);
/**
* @}
*/
#endif /* AVUTIL_BLOWFISH_H */

View File

@ -0,0 +1,219 @@
/*
* Copyright (c) 2012 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_BPRINT_H
#define AVUTIL_BPRINT_H
#include <stdarg.h>
#include "attributes.h"
#include "avstring.h"
/**
* Define a structure with extra padding to a fixed size
* This helps ensuring binary compatibility with future versions.
*/
#define FF_PAD_STRUCTURE(name, size, ...) \
struct ff_pad_helper_##name { __VA_ARGS__ }; \
typedef struct name { \
__VA_ARGS__ \
char reserved_padding[size - sizeof(struct ff_pad_helper_##name)]; \
} name;
/**
* Buffer to print data progressively
*
* The string buffer grows as necessary and is always 0-terminated.
* The content of the string is never accessed, and thus is
* encoding-agnostic and can even hold binary data.
*
* Small buffers are kept in the structure itself, and thus require no
* memory allocation at all (unless the contents of the buffer is needed
* after the structure goes out of scope). This is almost as lightweight as
* declaring a local "char buf[512]".
*
* The length of the string can go beyond the allocated size: the buffer is
* then truncated, but the functions still keep account of the actual total
* length.
*
* In other words, buf->len can be greater than buf->size and records the
* total length of what would have been to the buffer if there had been
* enough memory.
*
* Append operations do not need to be tested for failure: if a memory
* allocation fails, data stop being appended to the buffer, but the length
* is still updated. This situation can be tested with
* av_bprint_is_complete().
*
* The size_max field determines several possible behaviours:
*
* size_max = -1 (= UINT_MAX) or any large value will let the buffer be
* reallocated as necessary, with an amortized linear cost.
*
* size_max = 0 prevents writing anything to the buffer: only the total
* length is computed. The write operations can then possibly be repeated in
* a buffer with exactly the necessary size
* (using size_init = size_max = len + 1).
*
* size_max = 1 is automatically replaced by the exact size available in the
* structure itself, thus ensuring no dynamic memory allocation. The
* internal buffer is large enough to hold a reasonable paragraph of text,
* such as the current paragraph.
*/
FF_PAD_STRUCTURE(AVBPrint, 1024,
char *str; /**< string so far */
unsigned len; /**< length so far */
unsigned size; /**< allocated memory */
unsigned size_max; /**< maximum allocated memory */
char reserved_internal_buffer[1];
)
/**
* Convenience macros for special values for av_bprint_init() size_max
* parameter.
*/
#define AV_BPRINT_SIZE_UNLIMITED ((unsigned)-1)
#define AV_BPRINT_SIZE_AUTOMATIC 1
#define AV_BPRINT_SIZE_COUNT_ONLY 0
/**
* Init a print buffer.
*
* @param buf buffer to init
* @param size_init initial size (including the final 0)
* @param size_max maximum size;
* 0 means do not write anything, just count the length;
* 1 is replaced by the maximum value for automatic storage;
* any large value means that the internal buffer will be
* reallocated as needed up to that limit; -1 is converted to
* UINT_MAX, the largest limit possible.
* Check also AV_BPRINT_SIZE_* macros.
*/
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max);
/**
* Init a print buffer using a pre-existing buffer.
*
* The buffer will not be reallocated.
*
* @param buf buffer structure to init
* @param buffer byte buffer to use for the string data
* @param size size of buffer
*/
void av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size);
/**
* Append a formatted string to a print buffer.
*/
void av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3);
/**
* Append a formatted string to a print buffer.
*/
void av_vbprintf(AVBPrint *buf, const char *fmt, va_list vl_arg);
/**
* Append char c n times to a print buffer.
*/
void av_bprint_chars(AVBPrint *buf, char c, unsigned n);
/**
* Append data to a print buffer.
*
* param buf bprint buffer to use
* param data pointer to data
* param size size of data
*/
void av_bprint_append_data(AVBPrint *buf, const char *data, unsigned size);
struct tm;
/**
* Append a formatted date and time to a print buffer.
*
* param buf bprint buffer to use
* param fmt date and time format string, see strftime()
* param tm broken-down time structure to translate
*
* @note due to poor design of the standard strftime function, it may
* produce poor results if the format string expands to a very long text and
* the bprint buffer is near the limit stated by the size_max option.
*/
void av_bprint_strftime(AVBPrint *buf, const char *fmt, const struct tm *tm);
/**
* Allocate bytes in the buffer for external use.
*
* @param[in] buf buffer structure
* @param[in] size required size
* @param[out] mem pointer to the memory area
* @param[out] actual_size size of the memory area after allocation;
* can be larger or smaller than size
*/
void av_bprint_get_buffer(AVBPrint *buf, unsigned size,
unsigned char **mem, unsigned *actual_size);
/**
* Reset the string to "" but keep internal allocated data.
*/
void av_bprint_clear(AVBPrint *buf);
/**
* Test if the print buffer is complete (not truncated).
*
* It may have been truncated due to a memory allocation failure
* or the size_max limit (compare size and size_max if necessary).
*/
static inline int av_bprint_is_complete(const AVBPrint *buf)
{
return buf->len < buf->size;
}
/**
* Finalize a print buffer.
*
* The print buffer can no longer be used afterwards,
* but the len and size fields are still valid.
*
* @arg[out] ret_str if not NULL, used to return a permanent copy of the
* buffer contents, or NULL if memory allocation fails;
* if NULL, the buffer is discarded and freed
* @return 0 for success or error code (probably AVERROR(ENOMEM))
*/
int av_bprint_finalize(AVBPrint *buf, char **ret_str);
/**
* Escape the content in src and append it to dstbuf.
*
* @param dstbuf already inited destination bprint buffer
* @param src string containing the text to escape
* @param special_chars string containing the special characters which
* need to be escaped, can be NULL
* @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros.
* Any unknown value for mode will be considered equivalent to
* AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without
* notice.
* @param flags flags which control how to escape, see AV_ESCAPE_FLAG_* macros
*/
void av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars,
enum AVEscapeMode mode, int flags);
#endif /* AVUTIL_BPRINT_H */

View File

@ -0,0 +1,109 @@
/*
* copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* byte swapping routines
*/
#ifndef AVUTIL_BSWAP_H
#define AVUTIL_BSWAP_H
#include <stdint.h>
#include "libavutil/avconfig.h"
#include "attributes.h"
#ifdef HAVE_AV_CONFIG_H
#include "config.h"
#if ARCH_AARCH64
# include "aarch64/bswap.h"
#elif ARCH_ARM
# include "arm/bswap.h"
#elif ARCH_AVR32
# include "avr32/bswap.h"
#elif ARCH_SH4
# include "sh4/bswap.h"
#elif ARCH_X86
# include "x86/bswap.h"
#endif
#endif /* HAVE_AV_CONFIG_H */
#define AV_BSWAP16C(x) (((x) << 8 & 0xff00) | ((x) >> 8 & 0x00ff))
#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16))
#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32))
#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x)
#ifndef av_bswap16
static av_always_inline av_const uint16_t av_bswap16(uint16_t x)
{
x= (x>>8) | (x<<8);
return x;
}
#endif
#ifndef av_bswap32
static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
{
return AV_BSWAP32C(x);
}
#endif
#ifndef av_bswap64
static inline uint64_t av_const av_bswap64(uint64_t x)
{
return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32);
}
#endif
// be2ne ... big-endian to native-endian
// le2ne ... little-endian to native-endian
#if AV_HAVE_BIGENDIAN
#define av_be2ne16(x) (x)
#define av_be2ne32(x) (x)
#define av_be2ne64(x) (x)
#define av_le2ne16(x) av_bswap16(x)
#define av_le2ne32(x) av_bswap32(x)
#define av_le2ne64(x) av_bswap64(x)
#define AV_BE2NEC(s, x) (x)
#define AV_LE2NEC(s, x) AV_BSWAPC(s, x)
#else
#define av_be2ne16(x) av_bswap16(x)
#define av_be2ne32(x) av_bswap32(x)
#define av_be2ne64(x) av_bswap64(x)
#define av_le2ne16(x) (x)
#define av_le2ne32(x) (x)
#define av_le2ne64(x) (x)
#define AV_BE2NEC(s, x) AV_BSWAPC(s, x)
#define AV_LE2NEC(s, x) (x)
#endif
#define AV_BE2NE16C(x) AV_BE2NEC(16, x)
#define AV_BE2NE32C(x) AV_BE2NEC(32, x)
#define AV_BE2NE64C(x) AV_BE2NEC(64, x)
#define AV_LE2NE16C(x) AV_LE2NEC(16, x)
#define AV_LE2NE32C(x) AV_LE2NEC(32, x)
#define AV_LE2NE64C(x) AV_LE2NEC(64, x)
#endif /* AVUTIL_BSWAP_H */

View File

@ -0,0 +1,291 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* @ingroup lavu_buffer
* refcounted data buffer API
*/
#ifndef AVUTIL_BUFFER_H
#define AVUTIL_BUFFER_H
#include <stdint.h>
/**
* @defgroup lavu_buffer AVBuffer
* @ingroup lavu_data
*
* @{
* AVBuffer is an API for reference-counted data buffers.
*
* There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer
* represents the data buffer itself; it is opaque and not meant to be accessed
* by the caller directly, but only through AVBufferRef. However, the caller may
* e.g. compare two AVBuffer pointers to check whether two different references
* are describing the same data buffer. AVBufferRef represents a single
* reference to an AVBuffer and it is the object that may be manipulated by the
* caller directly.
*
* There are two functions provided for creating a new AVBuffer with a single
* reference -- av_buffer_alloc() to just allocate a new buffer, and
* av_buffer_create() to wrap an existing array in an AVBuffer. From an existing
* reference, additional references may be created with av_buffer_ref().
* Use av_buffer_unref() to free a reference (this will automatically free the
* data once all the references are freed).
*
* The convention throughout this API and the rest of FFmpeg is such that the
* buffer is considered writable if there exists only one reference to it (and
* it has not been marked as read-only). The av_buffer_is_writable() function is
* provided to check whether this is true and av_buffer_make_writable() will
* automatically create a new writable buffer when necessary.
* Of course nothing prevents the calling code from violating this convention,
* however that is safe only when all the existing references are under its
* control.
*
* @note Referencing and unreferencing the buffers is thread-safe and thus
* may be done from multiple threads simultaneously without any need for
* additional locking.
*
* @note Two different references to the same buffer can point to different
* parts of the buffer (i.e. their AVBufferRef.data will not be equal).
*/
/**
* A reference counted buffer type. It is opaque and is meant to be used through
* references (AVBufferRef).
*/
typedef struct AVBuffer AVBuffer;
/**
* A reference to a data buffer.
*
* The size of this struct is not a part of the public ABI and it is not meant
* to be allocated directly.
*/
typedef struct AVBufferRef {
AVBuffer *buffer;
/**
* The data buffer. It is considered writable if and only if
* this is the only reference to the buffer, in which case
* av_buffer_is_writable() returns 1.
*/
uint8_t *data;
/**
* Size of data in bytes.
*/
int size;
} AVBufferRef;
/**
* Allocate an AVBuffer of the given size using av_malloc().
*
* @return an AVBufferRef of given size or NULL when out of memory
*/
AVBufferRef *av_buffer_alloc(int size);
/**
* Same as av_buffer_alloc(), except the returned buffer will be initialized
* to zero.
*/
AVBufferRef *av_buffer_allocz(int size);
/**
* Always treat the buffer as read-only, even when it has only one
* reference.
*/
#define AV_BUFFER_FLAG_READONLY (1 << 0)
/**
* Create an AVBuffer from an existing array.
*
* If this function is successful, data is owned by the AVBuffer. The caller may
* only access data through the returned AVBufferRef and references derived from
* it.
* If this function fails, data is left untouched.
* @param data data array
* @param size size of data in bytes
* @param free a callback for freeing this buffer's data
* @param opaque parameter to be got for processing or passed to free
* @param flags a combination of AV_BUFFER_FLAG_*
*
* @return an AVBufferRef referring to data on success, NULL on failure.
*/
AVBufferRef *av_buffer_create(uint8_t *data, int size,
void (*free)(void *opaque, uint8_t *data),
void *opaque, int flags);
/**
* Default free callback, which calls av_free() on the buffer data.
* This function is meant to be passed to av_buffer_create(), not called
* directly.
*/
void av_buffer_default_free(void *opaque, uint8_t *data);
/**
* Create a new reference to an AVBuffer.
*
* @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on
* failure.
*/
AVBufferRef *av_buffer_ref(AVBufferRef *buf);
/**
* Free a given reference and automatically free the buffer if there are no more
* references to it.
*
* @param buf the reference to be freed. The pointer is set to NULL on return.
*/
void av_buffer_unref(AVBufferRef **buf);
/**
* @return 1 if the caller may write to the data referred to by buf (which is
* true if and only if buf is the only reference to the underlying AVBuffer).
* Return 0 otherwise.
* A positive answer is valid until av_buffer_ref() is called on buf.
*/
int av_buffer_is_writable(const AVBufferRef *buf);
/**
* @return the opaque parameter set by av_buffer_create.
*/
void *av_buffer_get_opaque(const AVBufferRef *buf);
int av_buffer_get_ref_count(const AVBufferRef *buf);
/**
* Create a writable reference from a given buffer reference, avoiding data copy
* if possible.
*
* @param buf buffer reference to make writable. On success, buf is either left
* untouched, or it is unreferenced and a new writable AVBufferRef is
* written in its place. On failure, buf is left untouched.
* @return 0 on success, a negative AVERROR on failure.
*/
int av_buffer_make_writable(AVBufferRef **buf);
/**
* Reallocate a given buffer.
*
* @param buf a buffer reference to reallocate. On success, buf will be
* unreferenced and a new reference with the required size will be
* written in its place. On failure buf will be left untouched. *buf
* may be NULL, then a new buffer is allocated.
* @param size required new buffer size.
* @return 0 on success, a negative AVERROR on failure.
*
* @note the buffer is actually reallocated with av_realloc() only if it was
* initially allocated through av_buffer_realloc(NULL) and there is only one
* reference to it (i.e. the one passed to this function). In all other cases
* a new buffer is allocated and the data is copied.
*/
int av_buffer_realloc(AVBufferRef **buf, int size);
/**
* @}
*/
/**
* @defgroup lavu_bufferpool AVBufferPool
* @ingroup lavu_data
*
* @{
* AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers.
*
* Frequently allocating and freeing large buffers may be slow. AVBufferPool is
* meant to solve this in cases when the caller needs a set of buffers of the
* same size (the most obvious use case being buffers for raw video or audio
* frames).
*
* At the beginning, the user must call av_buffer_pool_init() to create the
* buffer pool. Then whenever a buffer is needed, call av_buffer_pool_get() to
* get a reference to a new buffer, similar to av_buffer_alloc(). This new
* reference works in all aspects the same way as the one created by
* av_buffer_alloc(). However, when the last reference to this buffer is
* unreferenced, it is returned to the pool instead of being freed and will be
* reused for subsequent av_buffer_pool_get() calls.
*
* When the caller is done with the pool and no longer needs to allocate any new
* buffers, av_buffer_pool_uninit() must be called to mark the pool as freeable.
* Once all the buffers are released, it will automatically be freed.
*
* Allocating and releasing buffers with this API is thread-safe as long as
* either the default alloc callback is used, or the user-supplied one is
* thread-safe.
*/
/**
* The buffer pool. This structure is opaque and not meant to be accessed
* directly. It is allocated with av_buffer_pool_init() and freed with
* av_buffer_pool_uninit().
*/
typedef struct AVBufferPool AVBufferPool;
/**
* Allocate and initialize a buffer pool.
*
* @param size size of each buffer in this pool
* @param alloc a function that will be used to allocate new buffers when the
* pool is empty. May be NULL, then the default allocator will be used
* (av_buffer_alloc()).
* @return newly created buffer pool on success, NULL on error.
*/
AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size));
/**
* Allocate and initialize a buffer pool with a more complex allocator.
*
* @param size size of each buffer in this pool
* @param opaque arbitrary user data used by the allocator
* @param alloc a function that will be used to allocate new buffers when the
* pool is empty.
* @param pool_free a function that will be called immediately before the pool
* is freed. I.e. after av_buffer_pool_uninit() is called
* by the caller and all the frames are returned to the pool
* and freed. It is intended to uninitialize the user opaque
* data.
* @return newly created buffer pool on success, NULL on error.
*/
AVBufferPool *av_buffer_pool_init2(int size, void *opaque,
AVBufferRef* (*alloc)(void *opaque, int size),
void (*pool_free)(void *opaque));
/**
* Mark the pool as being available for freeing. It will actually be freed only
* once all the allocated buffers associated with the pool are released. Thus it
* is safe to call this function while some of the allocated buffers are still
* in use.
*
* @param pool pointer to the pool to be freed. It will be set to NULL.
*/
void av_buffer_pool_uninit(AVBufferPool **pool);
/**
* Allocate a new AVBuffer, reusing an old buffer from the pool when available.
* This function may be called simultaneously from multiple threads.
*
* @return a reference to the new buffer on success, NULL on error.
*/
AVBufferRef *av_buffer_pool_get(AVBufferPool *pool);
/**
* @}
*/
#endif /* AVUTIL_BUFFER_H */

View File

@ -0,0 +1,70 @@
/*
* An implementation of the CAMELLIA algorithm as mentioned in RFC3713
* Copyright (c) 2014 Supraja Meedinti
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_CAMELLIA_H
#define AVUTIL_CAMELLIA_H
#include <stdint.h>
/**
* @file
* @brief Public header for libavutil CAMELLIA algorithm
* @defgroup lavu_camellia CAMELLIA
* @ingroup lavu_crypto
* @{
*/
extern const int av_camellia_size;
struct AVCAMELLIA;
/**
* Allocate an AVCAMELLIA context
* To free the struct: av_free(ptr)
*/
struct AVCAMELLIA *av_camellia_alloc(void);
/**
* Initialize an AVCAMELLIA context.
*
* @param ctx an AVCAMELLIA context
* @param key a key of 16, 24, 32 bytes used for encryption/decryption
* @param key_bits number of keybits: possible are 128, 192, 256
*/
int av_camellia_init(struct AVCAMELLIA *ctx, const uint8_t *key, int key_bits);
/**
* Encrypt or decrypt a buffer using a previously initialized context
*
* @param ctx an AVCAMELLIA context
* @param dst destination array, can be equal to src
* @param src source array, can be equal to dst
* @param count number of 16 byte blocks
* @paran iv initialization vector for CBC mode, NULL for ECB mode
* @param decrypt 0 for encryption, 1 for decryption
*/
void av_camellia_crypt(struct AVCAMELLIA *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t* iv, int decrypt);
/**
* @}
*/
#endif /* AVUTIL_CAMELLIA_H */

View File

@ -0,0 +1,80 @@
/*
* An implementation of the CAST128 algorithm as mentioned in RFC2144
* Copyright (c) 2014 Supraja Meedinti
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_CAST5_H
#define AVUTIL_CAST5_H
#include <stdint.h>
/**
* @file
* @brief Public header for libavutil CAST5 algorithm
* @defgroup lavu_cast5 CAST5
* @ingroup lavu_crypto
* @{
*/
extern const int av_cast5_size;
struct AVCAST5;
/**
* Allocate an AVCAST5 context
* To free the struct: av_free(ptr)
*/
struct AVCAST5 *av_cast5_alloc(void);
/**
* Initialize an AVCAST5 context.
*
* @param ctx an AVCAST5 context
* @param key a key of 5,6,...16 bytes used for encryption/decryption
* @param key_bits number of keybits: possible are 40,48,...,128
* @return 0 on success, less than 0 on failure
*/
int av_cast5_init(struct AVCAST5 *ctx, const uint8_t *key, int key_bits);
/**
* Encrypt or decrypt a buffer using a previously initialized context, ECB mode only
*
* @param ctx an AVCAST5 context
* @param dst destination array, can be equal to src
* @param src source array, can be equal to dst
* @param count number of 8 byte blocks
* @param decrypt 0 for encryption, 1 for decryption
*/
void av_cast5_crypt(struct AVCAST5 *ctx, uint8_t *dst, const uint8_t *src, int count, int decrypt);
/**
* Encrypt or decrypt a buffer using a previously initialized context
*
* @param ctx an AVCAST5 context
* @param dst destination array, can be equal to src
* @param src source array, can be equal to dst
* @param count number of 8 byte blocks
* @param iv initialization vector for CBC mode, NULL for ECB mode
* @param decrypt 0 for encryption, 1 for decryption
*/
void av_cast5_crypt2(struct AVCAST5 *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt);
/**
* @}
*/
#endif /* AVUTIL_CAST5_H */

View File

@ -0,0 +1,232 @@
/*
* Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2008 Peter Ross
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_CHANNEL_LAYOUT_H
#define AVUTIL_CHANNEL_LAYOUT_H
#include <stdint.h>
/**
* @file
* audio channel layout utility functions
*/
/**
* @addtogroup lavu_audio
* @{
*/
/**
* @defgroup channel_masks Audio channel masks
*
* A channel layout is a 64-bits integer with a bit set for every channel.
* The number of bits set must be equal to the number of channels.
* The value 0 means that the channel layout is not known.
* @note this data structure is not powerful enough to handle channels
* combinations that have the same channel multiple times, such as
* dual-mono.
*
* @{
*/
#define AV_CH_FRONT_LEFT 0x00000001
#define AV_CH_FRONT_RIGHT 0x00000002
#define AV_CH_FRONT_CENTER 0x00000004
#define AV_CH_LOW_FREQUENCY 0x00000008
#define AV_CH_BACK_LEFT 0x00000010
#define AV_CH_BACK_RIGHT 0x00000020
#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040
#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080
#define AV_CH_BACK_CENTER 0x00000100
#define AV_CH_SIDE_LEFT 0x00000200
#define AV_CH_SIDE_RIGHT 0x00000400
#define AV_CH_TOP_CENTER 0x00000800
#define AV_CH_TOP_FRONT_LEFT 0x00001000
#define AV_CH_TOP_FRONT_CENTER 0x00002000
#define AV_CH_TOP_FRONT_RIGHT 0x00004000
#define AV_CH_TOP_BACK_LEFT 0x00008000
#define AV_CH_TOP_BACK_CENTER 0x00010000
#define AV_CH_TOP_BACK_RIGHT 0x00020000
#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix.
#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT.
#define AV_CH_WIDE_LEFT 0x0000000080000000ULL
#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL
#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL
#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL
#define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL
/** Channel mask value used for AVCodecContext.request_channel_layout
to indicate that the user requests the channel order of the decoder output
to be the native codec channel order. */
#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL
/**
* @}
* @defgroup channel_mask_c Audio channel layouts
* @{
* */
#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER)
#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT)
#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY)
#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER)
#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER)
#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY)
#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER)
#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY)
#define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
#define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
#define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY)
#define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
#define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY)
#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER)
#define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER)
#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER)
#define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER)
#define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY)
#define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
#define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)
#define AV_CH_LAYOUT_HEXADECAGONAL (AV_CH_LAYOUT_OCTAGONAL|AV_CH_WIDE_LEFT|AV_CH_WIDE_RIGHT|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_FRONT_CENTER|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT)
#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)
enum AVMatrixEncoding {
AV_MATRIX_ENCODING_NONE,
AV_MATRIX_ENCODING_DOLBY,
AV_MATRIX_ENCODING_DPLII,
AV_MATRIX_ENCODING_DPLIIX,
AV_MATRIX_ENCODING_DPLIIZ,
AV_MATRIX_ENCODING_DOLBYEX,
AV_MATRIX_ENCODING_DOLBYHEADPHONE,
AV_MATRIX_ENCODING_NB
};
/**
* Return a channel layout id that matches name, or 0 if no match is found.
*
* name can be one or several of the following notations,
* separated by '+' or '|':
* - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0,
* 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix);
* - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC,
* SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR);
* - a number of channels, in decimal, followed by 'c', yielding
* the default channel layout for that number of channels (@see
* av_get_default_channel_layout);
* - a channel layout mask, in hexadecimal starting with "0x" (see the
* AV_CH_* macros).
*
* Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7"
*/
uint64_t av_get_channel_layout(const char *name);
/**
* Return a channel layout and the number of channels based on the specified name.
*
* This function is similar to (@see av_get_channel_layout), but can also parse
* unknown channel layout specifications.
*
* @param[in] name channel layout specification string
* @param[out] channel_layout parsed channel layout (0 if unknown)
* @param[out] nb_channels number of channels
*
* @return 0 on success, AVERROR(EINVAL) if the parsing fails.
*/
int av_get_extended_channel_layout(const char *name, uint64_t* channel_layout, int* nb_channels);
/**
* Return a description of a channel layout.
* If nb_channels is <= 0, it is guessed from the channel_layout.
*
* @param buf put here the string containing the channel layout
* @param buf_size size in bytes of the buffer
*/
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout);
struct AVBPrint;
/**
* Append a description of a channel layout to a bprint buffer.
*/
void av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout);
/**
* Return the number of channels in the channel layout.
*/
int av_get_channel_layout_nb_channels(uint64_t channel_layout);
/**
* Return default channel layout for a given number of channels.
*/
int64_t av_get_default_channel_layout(int nb_channels);
/**
* Get the index of a channel in channel_layout.
*
* @param channel a channel layout describing exactly one channel which must be
* present in channel_layout.
*
* @return index of channel in channel_layout on success, a negative AVERROR
* on error.
*/
int av_get_channel_layout_channel_index(uint64_t channel_layout,
uint64_t channel);
/**
* Get the channel with the given index in channel_layout.
*/
uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index);
/**
* Get the name of a given channel.
*
* @return channel name on success, NULL on error.
*/
const char *av_get_channel_name(uint64_t channel);
/**
* Get the description of a given channel.
*
* @param channel a channel layout with a single channel
* @return channel description on success, NULL on error
*/
const char *av_get_channel_description(uint64_t channel);
/**
* Get the value and name of a standard channel layout.
*
* @param[in] index index in an internal list, starting at 0
* @param[out] layout channel layout mask
* @param[out] name name of the layout
* @return 0 if the layout exists,
* <0 if index is beyond the limits
*/
int av_get_standard_channel_layout(unsigned index, uint64_t *layout,
const char **name);
/**
* @}
* @}
*/
#endif /* AVUTIL_CHANNEL_LAYOUT_H */

View File

@ -0,0 +1,560 @@
/*
* copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* common internal and external API header
*/
#ifndef AVUTIL_COMMON_H
#define AVUTIL_COMMON_H
#if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) && !defined(UINT64_C)
#error missing -D__STDC_CONSTANT_MACROS / #define __STDC_CONSTANT_MACROS
#endif
#include <errno.h>
#include <inttypes.h>
#include <limits.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "attributes.h"
#include "macros.h"
#include "version.h"
#include "libavutil/avconfig.h"
#if AV_HAVE_BIGENDIAN
# define AV_NE(be, le) (be)
#else
# define AV_NE(be, le) (le)
#endif
//rounded division & shift
#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))
/* assume b>0 */
#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
/* Fast a/(1<<b) rounded toward +inf. Assume a>=0 and b>=0 */
#define AV_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \
: ((a) + (1<<(b)) - 1) >> (b))
/* Backwards compat. */
#define FF_CEIL_RSHIFT AV_CEIL_RSHIFT
#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b))
#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b))
/**
* Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they
* are not representable as absolute values of their type. This is the same
* as with *abs()
* @see FFNABS()
*/
#define FFABS(a) ((a) >= 0 ? (a) : (-(a)))
#define FFSIGN(a) ((a) > 0 ? 1 : -1)
/**
* Negative Absolute value.
* this works for all integers of all types.
* As with many macros, this evaluates its argument twice, it thus must not have
* a sideeffect, that is FFNABS(x++) has undefined behavior.
*/
#define FFNABS(a) ((a) <= 0 ? (a) : (-(a)))
/**
* Comparator.
* For two numerical expressions x and y, gives 1 if x > y, -1 if x < y, and 0
* if x == y. This is useful for instance in a qsort comparator callback.
* Furthermore, compilers are able to optimize this to branchless code, and
* there is no risk of overflow with signed types.
* As with many macros, this evaluates its argument multiple times, it thus
* must not have a side-effect.
*/
#define FFDIFFSIGN(x,y) (((x)>(y)) - ((x)<(y)))
#define FFMAX(a,b) ((a) > (b) ? (a) : (b))
#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c)
#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c)
#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0)
#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
/* misc math functions */
#ifdef HAVE_AV_CONFIG_H
# include "config.h"
# include "intmath.h"
#endif
/* Pull in unguarded fallback defines at the end of this file. */
#include "common.h"
#ifndef av_log2
av_const int av_log2(unsigned v);
#endif
#ifndef av_log2_16bit
av_const int av_log2_16bit(unsigned v);
#endif
/**
* Clip a signed integer value into the amin-amax range.
* @param a value to clip
* @param amin minimum value of the clip range
* @param amax maximum value of the clip range
* @return clipped value
*/
static av_always_inline av_const int av_clip_c(int a, int amin, int amax)
{
#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
if (amin > amax) abort();
#endif
if (a < amin) return amin;
else if (a > amax) return amax;
else return a;
}
/**
* Clip a signed 64bit integer value into the amin-amax range.
* @param a value to clip
* @param amin minimum value of the clip range
* @param amax maximum value of the clip range
* @return clipped value
*/
static av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin, int64_t amax)
{
#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
if (amin > amax) abort();
#endif
if (a < amin) return amin;
else if (a > amax) return amax;
else return a;
}
/**
* Clip a signed integer value into the 0-255 range.
* @param a value to clip
* @return clipped value
*/
static av_always_inline av_const uint8_t av_clip_uint8_c(int a)
{
if (a&(~0xFF)) return (~a)>>31;
else return a;
}
/**
* Clip a signed integer value into the -128,127 range.
* @param a value to clip
* @return clipped value
*/
static av_always_inline av_const int8_t av_clip_int8_c(int a)
{
if ((a+0x80U) & ~0xFF) return (a>>31) ^ 0x7F;
else return a;
}
/**
* Clip a signed integer value into the 0-65535 range.
* @param a value to clip
* @return clipped value
*/
static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
{
if (a&(~0xFFFF)) return (~a)>>31;
else return a;
}
/**
* Clip a signed integer value into the -32768,32767 range.
* @param a value to clip
* @return clipped value
*/
static av_always_inline av_const int16_t av_clip_int16_c(int a)
{
if ((a+0x8000U) & ~0xFFFF) return (a>>31) ^ 0x7FFF;
else return a;
}
/**
* Clip a signed 64-bit integer value into the -2147483648,2147483647 range.
* @param a value to clip
* @return clipped value
*/
static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a)
{
if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (int32_t)((a>>63) ^ 0x7FFFFFFF);
else return (int32_t)a;
}
/**
* Clip a signed integer into the -(2^p),(2^p-1) range.
* @param a value to clip
* @param p bit position to clip at
* @return clipped value
*/
static av_always_inline av_const int av_clip_intp2_c(int a, int p)
{
if (((unsigned)a + (1 << p)) & ~((2 << p) - 1))
return (a >> 31) ^ ((1 << p) - 1);
else
return a;
}
/**
* Clip a signed integer to an unsigned power of two range.
* @param a value to clip
* @param p bit position to clip at
* @return clipped value
*/
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
{
if (a & ~((1<<p) - 1)) return (~a) >> 31 & ((1<<p) - 1);
else return a;
}
/**
* Clear high bits from an unsigned integer starting with specific bit position
* @param a value to clip
* @param p bit position to clip at
* @return clipped value
*/
static av_always_inline av_const unsigned av_mod_uintp2_c(unsigned a, unsigned p)
{
return a & ((1 << p) - 1);
}
/**
* Add two signed 32-bit values with saturation.
*
* @param a one value
* @param b another value
* @return sum with signed saturation
*/
static av_always_inline int av_sat_add32_c(int a, int b)
{
return av_clipl_int32((int64_t)a + b);
}
/**
* Add a doubled value to another value with saturation at both stages.
*
* @param a first value
* @param b value doubled and added to a
* @return sum sat(a + sat(2*b)) with signed saturation
*/
static av_always_inline int av_sat_dadd32_c(int a, int b)
{
return av_sat_add32(a, av_sat_add32(b, b));
}
/**
* Subtract two signed 32-bit values with saturation.
*
* @param a one value
* @param b another value
* @return difference with signed saturation
*/
static av_always_inline int av_sat_sub32_c(int a, int b)
{
return av_clipl_int32((int64_t)a - b);
}
/**
* Subtract a doubled value from another value with saturation at both stages.
*
* @param a first value
* @param b value doubled and subtracted from a
* @return difference sat(a - sat(2*b)) with signed saturation
*/
static av_always_inline int av_sat_dsub32_c(int a, int b)
{
return av_sat_sub32(a, av_sat_add32(b, b));
}
/**
* Clip a float value into the amin-amax range.
* @param a value to clip
* @param amin minimum value of the clip range
* @param amax maximum value of the clip range
* @return clipped value
*/
static av_always_inline av_const float av_clipf_c(float a, float amin, float amax)
{
#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
if (amin > amax) abort();
#endif
if (a < amin) return amin;
else if (a > amax) return amax;
else return a;
}
/**
* Clip a double value into the amin-amax range.
* @param a value to clip
* @param amin minimum value of the clip range
* @param amax maximum value of the clip range
* @return clipped value
*/
static av_always_inline av_const double av_clipd_c(double a, double amin, double amax)
{
#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
if (amin > amax) abort();
#endif
if (a < amin) return amin;
else if (a > amax) return amax;
else return a;
}
/** Compute ceil(log2(x)).
* @param x value used to compute ceil(log2(x))
* @return computed ceiling of log2(x)
*/
static av_always_inline av_const int av_ceil_log2_c(int x)
{
return av_log2((x - 1) << 1);
}
/**
* Count number of bits set to one in x
* @param x value to count bits of
* @return the number of bits set to one in x
*/
static av_always_inline av_const int av_popcount_c(uint32_t x)
{
x -= (x >> 1) & 0x55555555;
x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
x = (x + (x >> 4)) & 0x0F0F0F0F;
x += x >> 8;
return (x + (x >> 16)) & 0x3F;
}
/**
* Count number of bits set to one in x
* @param x value to count bits of
* @return the number of bits set to one in x
*/
static av_always_inline av_const int av_popcount64_c(uint64_t x)
{
return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32));
}
static av_always_inline av_const int av_parity_c(uint32_t v)
{
return av_popcount(v) & 1;
}
#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))
#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
/**
* Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
*
* @param val Output value, must be an lvalue of type uint32_t.
* @param GET_BYTE Expression reading one byte from the input.
* Evaluated up to 7 times (4 for the currently
* assigned Unicode range). With a memory buffer
* input, this could be *ptr++.
* @param ERROR Expression to be evaluated on invalid input,
* typically a goto statement.
*
* @warning ERROR should not contain a loop control statement which
* could interact with the internal while loop, and should force an
* exit from the macro code (e.g. through a goto or a return) in order
* to prevent undefined results.
*/
#define GET_UTF8(val, GET_BYTE, ERROR)\
val= (GET_BYTE);\
{\
uint32_t top = (val & 128) >> 1;\
if ((val & 0xc0) == 0x80 || val >= 0xFE)\
ERROR\
while (val & top) {\
int tmp= (GET_BYTE) - 128;\
if(tmp>>6)\
ERROR\
val= (val<<6) + tmp;\
top <<= 5;\
}\
val &= (top << 1) - 1;\
}
/**
* Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form.
*
* @param val Output value, must be an lvalue of type uint32_t.
* @param GET_16BIT Expression returning two bytes of UTF-16 data converted
* to native byte order. Evaluated one or two times.
* @param ERROR Expression to be evaluated on invalid input,
* typically a goto statement.
*/
#define GET_UTF16(val, GET_16BIT, ERROR)\
val = GET_16BIT;\
{\
unsigned int hi = val - 0xD800;\
if (hi < 0x800) {\
val = GET_16BIT - 0xDC00;\
if (val > 0x3FFU || hi > 0x3FFU)\
ERROR\
val += (hi<<10) + 0x10000;\
}\
}\
/**
* @def PUT_UTF8(val, tmp, PUT_BYTE)
* Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long).
* @param val is an input-only argument and should be of type uint32_t. It holds
* a UCS-4 encoded Unicode character that is to be converted to UTF-8. If
* val is given as a function it is executed only once.
* @param tmp is a temporary variable and should be of type uint8_t. It
* represents an intermediate value during conversion that is to be
* output by PUT_BYTE.
* @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination.
* It could be a function or a statement, and uses tmp as the input byte.
* For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be
* executed up to 4 times for values in the valid UTF-8 range and up to
* 7 times in the general case, depending on the length of the converted
* Unicode character.
*/
#define PUT_UTF8(val, tmp, PUT_BYTE)\
{\
int bytes, shift;\
uint32_t in = val;\
if (in < 0x80) {\
tmp = in;\
PUT_BYTE\
} else {\
bytes = (av_log2(in) + 4) / 5;\
shift = (bytes - 1) * 6;\
tmp = (256 - (256 >> bytes)) | (in >> shift);\
PUT_BYTE\
while (shift >= 6) {\
shift -= 6;\
tmp = 0x80 | ((in >> shift) & 0x3f);\
PUT_BYTE\
}\
}\
}
/**
* @def PUT_UTF16(val, tmp, PUT_16BIT)
* Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes).
* @param val is an input-only argument and should be of type uint32_t. It holds
* a UCS-4 encoded Unicode character that is to be converted to UTF-16. If
* val is given as a function it is executed only once.
* @param tmp is a temporary variable and should be of type uint16_t. It
* represents an intermediate value during conversion that is to be
* output by PUT_16BIT.
* @param PUT_16BIT writes the converted UTF-16 data to any proper destination
* in desired endianness. It could be a function or a statement, and uses tmp
* as the input byte. For example, PUT_BYTE could be "*output++ = tmp;"
* PUT_BYTE will be executed 1 or 2 times depending on input character.
*/
#define PUT_UTF16(val, tmp, PUT_16BIT)\
{\
uint32_t in = val;\
if (in < 0x10000) {\
tmp = in;\
PUT_16BIT\
} else {\
tmp = 0xD800 | ((in - 0x10000) >> 10);\
PUT_16BIT\
tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\
PUT_16BIT\
}\
}\
#include "mem.h"
#ifdef HAVE_AV_CONFIG_H
# include "internal.h"
#endif /* HAVE_AV_CONFIG_H */
#endif /* AVUTIL_COMMON_H */
/*
* The following definitions are outside the multiple inclusion guard
* to ensure they are immediately available in intmath.h.
*/
#ifndef av_ceil_log2
# define av_ceil_log2 av_ceil_log2_c
#endif
#ifndef av_clip
# define av_clip av_clip_c
#endif
#ifndef av_clip64
# define av_clip64 av_clip64_c
#endif
#ifndef av_clip_uint8
# define av_clip_uint8 av_clip_uint8_c
#endif
#ifndef av_clip_int8
# define av_clip_int8 av_clip_int8_c
#endif
#ifndef av_clip_uint16
# define av_clip_uint16 av_clip_uint16_c
#endif
#ifndef av_clip_int16
# define av_clip_int16 av_clip_int16_c
#endif
#ifndef av_clipl_int32
# define av_clipl_int32 av_clipl_int32_c
#endif
#ifndef av_clip_intp2
# define av_clip_intp2 av_clip_intp2_c
#endif
#ifndef av_clip_uintp2
# define av_clip_uintp2 av_clip_uintp2_c
#endif
#ifndef av_mod_uintp2
# define av_mod_uintp2 av_mod_uintp2_c
#endif
#ifndef av_sat_add32
# define av_sat_add32 av_sat_add32_c
#endif
#ifndef av_sat_dadd32
# define av_sat_dadd32 av_sat_dadd32_c
#endif
#ifndef av_sat_sub32
# define av_sat_sub32 av_sat_sub32_c
#endif
#ifndef av_sat_dsub32
# define av_sat_dsub32 av_sat_dsub32_c
#endif
#ifndef av_clipf
# define av_clipf av_clipf_c
#endif
#ifndef av_clipd
# define av_clipd av_clipd_c
#endif
#ifndef av_popcount
# define av_popcount av_popcount_c
#endif
#ifndef av_popcount64
# define av_popcount64 av_popcount64_c
#endif
#ifndef av_parity
# define av_parity av_parity_c
#endif

View File

@ -0,0 +1,130 @@
/*
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_CPU_H
#define AVUTIL_CPU_H
#include <stddef.h>
#include "attributes.h"
#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */
/* lower 16 bits - CPU features */
#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX
#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext
#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext
#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW
#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions
#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions
#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster
///< than regular MMX/SSE (e.g. Core1)
#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt
#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions
#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster
///< than regular MMX/SSE (e.g. Core1)
#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions
#define AV_CPU_FLAG_SSSE3SLOW 0x4000000 ///< SSSE3 supported, but usually not faster
#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower
#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions
#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions
#define AV_CPU_FLAG_AESNI 0x80000 ///< Advanced Encryption Standard functions
#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used
#define AV_CPU_FLAG_AVXSLOW 0x8000000 ///< AVX supported, but slow when using YMM registers (e.g. Bulldozer)
#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions
#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions
#define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction
#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used
#define AV_CPU_FLAG_FMA3 0x10000 ///< Haswell FMA3 functions
#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1
#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2
#define AV_CPU_FLAG_AVX512 0x100000 ///< AVX-512 functions: requires OS support even if YMM/ZMM registers aren't used
#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard
#define AV_CPU_FLAG_VSX 0x0002 ///< ISA 2.06
#define AV_CPU_FLAG_POWER8 0x0004 ///< ISA 2.07
#define AV_CPU_FLAG_ARMV5TE (1 << 0)
#define AV_CPU_FLAG_ARMV6 (1 << 1)
#define AV_CPU_FLAG_ARMV6T2 (1 << 2)
#define AV_CPU_FLAG_VFP (1 << 3)
#define AV_CPU_FLAG_VFPV3 (1 << 4)
#define AV_CPU_FLAG_NEON (1 << 5)
#define AV_CPU_FLAG_ARMV8 (1 << 6)
#define AV_CPU_FLAG_VFP_VM (1 << 7) ///< VFPv2 vector mode, deprecated in ARMv7-A and unavailable in various CPUs implementations
#define AV_CPU_FLAG_SETEND (1 <<16)
/**
* Return the flags which specify extensions supported by the CPU.
* The returned value is affected by av_force_cpu_flags() if that was used
* before. So av_get_cpu_flags() can easily be used in an application to
* detect the enabled cpu flags.
*/
int av_get_cpu_flags(void);
/**
* Disables cpu detection and forces the specified flags.
* -1 is a special case that disables forcing of specific flags.
*/
void av_force_cpu_flags(int flags);
/**
* Set a mask on flags returned by av_get_cpu_flags().
* This function is mainly useful for testing.
* Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible
*/
attribute_deprecated void av_set_cpu_flags_mask(int mask);
/**
* Parse CPU flags from a string.
*
* The returned flags contain the specified flags as well as related unspecified flags.
*
* This function exists only for compatibility with libav.
* Please use av_parse_cpu_caps() when possible.
* @return a combination of AV_CPU_* flags, negative on error.
*/
attribute_deprecated
int av_parse_cpu_flags(const char *s);
/**
* Parse CPU caps from a string and update the given AV_CPU_* flags based on that.
*
* @return negative on error.
*/
int av_parse_cpu_caps(unsigned *flags, const char *s);
/**
* @return the number of logical CPU cores present.
*/
int av_cpu_count(void);
/**
* Get the maximum data alignment that may be required by FFmpeg.
*
* Note that this is affected by the build configuration and the CPU flags mask,
* so e.g. if the CPU supports AVX, but libavutil has been built with
* --disable-avx or the AV_CPU_FLAG_AVX flag has been disabled through
* av_set_cpu_flags_mask(), then this function will behave as if AVX is not
* present.
*/
size_t av_cpu_max_align(void);
#endif /* AVUTIL_CPU_H */

View File

@ -0,0 +1,100 @@
/*
* copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* @ingroup lavu_crc32
* Public header for CRC hash function implementation.
*/
#ifndef AVUTIL_CRC_H
#define AVUTIL_CRC_H
#include <stdint.h>
#include <stddef.h>
#include "attributes.h"
#include "version.h"
/**
* @defgroup lavu_crc32 CRC
* @ingroup lavu_hash
* CRC (Cyclic Redundancy Check) hash function implementation.
*
* This module supports numerous CRC polynomials, in addition to the most
* widely used CRC-32-IEEE. See @ref AVCRCId for a list of available
* polynomials.
*
* @{
*/
typedef uint32_t AVCRC;
typedef enum {
AV_CRC_8_ATM,
AV_CRC_16_ANSI,
AV_CRC_16_CCITT,
AV_CRC_32_IEEE,
AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */
AV_CRC_16_ANSI_LE, /*< reversed bitorder version of AV_CRC_16_ANSI */
AV_CRC_24_IEEE,
AV_CRC_8_EBU,
AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */
}AVCRCId;
/**
* Initialize a CRC table.
* @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024
* @param le If 1, the lowest bit represents the coefficient for the highest
* exponent of the corresponding polynomial (both for poly and
* actual CRC).
* If 0, you must swap the CRC parameter and the result of av_crc
* if you need the standard representation (can be simplified in
* most cases to e.g. bswap16):
* av_bswap32(crc << (32-bits))
* @param bits number of bits for the CRC
* @param poly generator polynomial without the x**bits coefficient, in the
* representation as specified by le
* @param ctx_size size of ctx in bytes
* @return <0 on failure
*/
int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size);
/**
* Get an initialized standard CRC table.
* @param crc_id ID of a standard CRC
* @return a pointer to the CRC table or NULL on failure
*/
const AVCRC *av_crc_get_table(AVCRCId crc_id);
/**
* Calculate the CRC of a block.
* @param crc CRC of previous blocks if any or initial value for CRC
* @return CRC updated with the data from the given block
*
* @see av_crc_init() "le" parameter
*/
uint32_t av_crc(const AVCRC *ctx, uint32_t crc,
const uint8_t *buffer, size_t length) av_pure;
/**
* @}
*/
#endif /* AVUTIL_CRC_H */

View File

@ -0,0 +1,77 @@
/*
* DES encryption/decryption
* Copyright (c) 2007 Reimar Doeffinger
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_DES_H
#define AVUTIL_DES_H
#include <stdint.h>
/**
* @defgroup lavu_des DES
* @ingroup lavu_crypto
* @{
*/
typedef struct AVDES {
uint64_t round_keys[3][16];
int triple_des;
} AVDES;
/**
* Allocate an AVDES context.
*/
AVDES *av_des_alloc(void);
/**
* @brief Initializes an AVDES context.
*
* @param key_bits must be 64 or 192
* @param decrypt 0 for encryption/CBC-MAC, 1 for decryption
* @return zero on success, negative value otherwise
*/
int av_des_init(struct AVDES *d, const uint8_t *key, int key_bits, int decrypt);
/**
* @brief Encrypts / decrypts using the DES algorithm.
*
* @param count number of 8 byte blocks
* @param dst destination array, can be equal to src, must be 8-byte aligned
* @param src source array, can be equal to dst, must be 8-byte aligned, may be NULL
* @param iv initialization vector for CBC mode, if NULL then ECB will be used,
* must be 8-byte aligned
* @param decrypt 0 for encryption, 1 for decryption
*/
void av_des_crypt(struct AVDES *d, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt);
/**
* @brief Calculates CBC-MAC using the DES algorithm.
*
* @param count number of 8 byte blocks
* @param dst destination array, can be equal to src, must be 8-byte aligned
* @param src source array, can be equal to dst, must be 8-byte aligned, may be NULL
*/
void av_des_mac(struct AVDES *d, uint8_t *dst, const uint8_t *src, int count);
/**
* @}
*/
#endif /* AVUTIL_DES_H */

View File

@ -0,0 +1,200 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Public dictionary API.
* @deprecated
* AVDictionary is provided for compatibility with libav. It is both in
* implementation as well as API inefficient. It does not scale and is
* extremely slow with large dictionaries.
* It is recommended that new code uses our tree container from tree.c/h
* where applicable, which uses AVL trees to achieve O(log n) performance.
*/
#ifndef AVUTIL_DICT_H
#define AVUTIL_DICT_H
#include <stdint.h>
#include "version.h"
/**
* @addtogroup lavu_dict AVDictionary
* @ingroup lavu_data
*
* @brief Simple key:value store
*
* @{
* Dictionaries are used for storing key:value pairs. To create
* an AVDictionary, simply pass an address of a NULL pointer to
* av_dict_set(). NULL can be used as an empty dictionary wherever
* a pointer to an AVDictionary is required.
* Use av_dict_get() to retrieve an entry or iterate over all
* entries and finally av_dict_free() to free the dictionary
* and all its contents.
*
@code
AVDictionary *d = NULL; // "create" an empty dictionary
AVDictionaryEntry *t = NULL;
av_dict_set(&d, "foo", "bar", 0); // add an entry
char *k = av_strdup("key"); // if your strings are already allocated,
char *v = av_strdup("value"); // you can avoid copying them like this
av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) {
<....> // iterate over all entries in d
}
av_dict_free(&d);
@endcode
*/
#define AV_DICT_MATCH_CASE 1 /**< Only get an entry with exact-case key match. Only relevant in av_dict_get(). */
#define AV_DICT_IGNORE_SUFFIX 2 /**< Return first entry in a dictionary whose first part corresponds to the search key,
ignoring the suffix of the found key string. Only relevant in av_dict_get(). */
#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been
allocated with av_malloc() or another memory allocation function. */
#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been
allocated with av_malloc() or another memory allocation function. */
#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries.
#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no
delimiter is added, the strings are simply concatenated. */
#define AV_DICT_MULTIKEY 64 /**< Allow to store several equal keys in the dictionary */
typedef struct AVDictionaryEntry {
char *key;
char *value;
} AVDictionaryEntry;
typedef struct AVDictionary AVDictionary;
/**
* Get a dictionary entry with matching key.
*
* The returned entry key or value must not be changed, or it will
* cause undefined behavior.
*
* To iterate through all the dictionary entries, you can set the matching key
* to the null string "" and set the AV_DICT_IGNORE_SUFFIX flag.
*
* @param prev Set to the previous matching element to find the next.
* If set to NULL the first matching element is returned.
* @param key matching key
* @param flags a collection of AV_DICT_* flags controlling how the entry is retrieved
* @return found entry or NULL in case no matching entry was found in the dictionary
*/
AVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key,
const AVDictionaryEntry *prev, int flags);
/**
* Get number of entries in dictionary.
*
* @param m dictionary
* @return number of entries in dictionary
*/
int av_dict_count(const AVDictionary *m);
/**
* Set the given entry in *pm, overwriting an existing entry.
*
* Note: If AV_DICT_DONT_STRDUP_KEY or AV_DICT_DONT_STRDUP_VAL is set,
* these arguments will be freed on error.
*
* Warning: Adding a new entry to a dictionary invalidates all existing entries
* previously returned with av_dict_get.
*
* @param pm pointer to a pointer to a dictionary struct. If *pm is NULL
* a dictionary struct is allocated and put in *pm.
* @param key entry key to add to *pm (will either be av_strduped or added as a new key depending on flags)
* @param value entry value to add to *pm (will be av_strduped or added as a new key depending on flags).
* Passing a NULL value will cause an existing entry to be deleted.
* @return >= 0 on success otherwise an error code <0
*/
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags);
/**
* Convenience wrapper for av_dict_set that converts the value to a string
* and stores it.
*
* Note: If AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error.
*/
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags);
/**
* Parse the key/value pairs list and add the parsed entries to a dictionary.
*
* In case of failure, all the successfully set entries are stored in
* *pm. You may need to manually free the created dictionary.
*
* @param key_val_sep a 0-terminated list of characters used to separate
* key from value
* @param pairs_sep a 0-terminated list of characters used to separate
* two pairs from each other
* @param flags flags to use when adding to dictionary.
* AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL
* are ignored since the key/value tokens will always
* be duplicated.
* @return 0 on success, negative AVERROR code on failure
*/
int av_dict_parse_string(AVDictionary **pm, const char *str,
const char *key_val_sep, const char *pairs_sep,
int flags);
/**
* Copy entries from one AVDictionary struct into another.
* @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL,
* this function will allocate a struct for you and put it in *dst
* @param src pointer to source AVDictionary struct
* @param flags flags to use when setting entries in *dst
* @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag
* @return 0 on success, negative AVERROR code on failure. If dst was allocated
* by this function, callers should free the associated memory.
*/
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags);
/**
* Free all the memory allocated for an AVDictionary struct
* and all keys and values.
*/
void av_dict_free(AVDictionary **m);
/**
* Get dictionary entries as a string.
*
* Create a string containing dictionary's entries.
* Such string may be passed back to av_dict_parse_string().
* @note String is escaped with backslashes ('\').
*
* @param[in] m dictionary
* @param[out] buffer Pointer to buffer that will be allocated with string containg entries.
* Buffer must be freed by the caller when is no longer needed.
* @param[in] key_val_sep character used to separate key from value
* @param[in] pairs_sep character used to separate two pairs from each other
* @return >= 0 on success, negative on error
* @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same.
*/
int av_dict_get_string(const AVDictionary *m, char **buffer,
const char key_val_sep, const char pairs_sep);
/**
* @}
*/
#endif /* AVUTIL_DICT_H */

View File

@ -0,0 +1,114 @@
/*
* Copyright (c) 2014 Vittorio Giovara <vittorio.giovara@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Display matrix
*/
#ifndef AVUTIL_DISPLAY_H
#define AVUTIL_DISPLAY_H
#include <stdint.h>
#include "common.h"
/**
* @addtogroup lavu_video
* @{
*
* @defgroup lavu_video_display Display transformation matrix functions
* @{
*/
/**
* @addtogroup lavu_video_display
* The display transformation matrix specifies an affine transformation that
* should be applied to video frames for correct presentation. It is compatible
* with the matrices stored in the ISO/IEC 14496-12 container format.
*
* The data is a 3x3 matrix represented as a 9-element array:
*
* @code{.unparsed}
* | a b u |
* (a, b, u, c, d, v, x, y, w) -> | c d v |
* | x y w |
* @endcode
*
* All numbers are stored in native endianness, as 16.16 fixed-point values,
* except for u, v and w, which are stored as 2.30 fixed-point values.
*
* The transformation maps a point (p, q) in the source (pre-transformation)
* frame to the point (p', q') in the destination (post-transformation) frame as
* follows:
*
* @code{.unparsed}
* | a b u |
* (p, q, 1) . | c d v | = z * (p', q', 1)
* | x y w |
* @endcode
*
* The transformation can also be more explicitly written in components as
* follows:
*
* @code{.unparsed}
* p' = (a * p + c * q + x) / z;
* q' = (b * p + d * q + y) / z;
* z = u * p + v * q + w
* @endcode
*/
/**
* Extract the rotation component of the transformation matrix.
*
* @param matrix the transformation matrix
* @return the angle (in degrees) by which the transformation rotates the frame
* counterclockwise. The angle will be in range [-180.0, 180.0],
* or NaN if the matrix is singular.
*
* @note floating point numbers are inherently inexact, so callers are
* recommended to round the return value to nearest integer before use.
*/
double av_display_rotation_get(const int32_t matrix[9]);
/**
* Initialize a transformation matrix describing a pure counterclockwise
* rotation by the specified angle (in degrees).
*
* @param matrix an allocated transformation matrix (will be fully overwritten
* by this function)
* @param angle rotation angle in degrees.
*/
void av_display_rotation_set(int32_t matrix[9], double angle);
/**
* Flip the input matrix horizontally and/or vertically.
*
* @param matrix an allocated transformation matrix
* @param hflip whether the matrix should be flipped horizontally
* @param vflip whether the matrix should be flipped vertically
*/
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip);
/**
* @}
* @}
*/
#endif /* AVUTIL_DISPLAY_H */

View File

@ -0,0 +1,115 @@
/*
* Copyright (c) 2014 Tim Walker <tdskywalker@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_DOWNMIX_INFO_H
#define AVUTIL_DOWNMIX_INFO_H
#include "frame.h"
/**
* @file
* audio downmix medatata
*/
/**
* @addtogroup lavu_audio
* @{
*/
/**
* @defgroup downmix_info Audio downmix metadata
* @{
*/
/**
* Possible downmix types.
*/
enum AVDownmixType {
AV_DOWNMIX_TYPE_UNKNOWN, /**< Not indicated. */
AV_DOWNMIX_TYPE_LORO, /**< Lo/Ro 2-channel downmix (Stereo). */
AV_DOWNMIX_TYPE_LTRT, /**< Lt/Rt 2-channel downmix, Dolby Surround compatible. */
AV_DOWNMIX_TYPE_DPLII, /**< Lt/Rt 2-channel downmix, Dolby Pro Logic II compatible. */
AV_DOWNMIX_TYPE_NB /**< Number of downmix types. Not part of ABI. */
};
/**
* This structure describes optional metadata relevant to a downmix procedure.
*
* All fields are set by the decoder to the value indicated in the audio
* bitstream (if present), or to a "sane" default otherwise.
*/
typedef struct AVDownmixInfo {
/**
* Type of downmix preferred by the mastering engineer.
*/
enum AVDownmixType preferred_downmix_type;
/**
* Absolute scale factor representing the nominal level of the center
* channel during a regular downmix.
*/
double center_mix_level;
/**
* Absolute scale factor representing the nominal level of the center
* channel during an Lt/Rt compatible downmix.
*/
double center_mix_level_ltrt;
/**
* Absolute scale factor representing the nominal level of the surround
* channels during a regular downmix.
*/
double surround_mix_level;
/**
* Absolute scale factor representing the nominal level of the surround
* channels during an Lt/Rt compatible downmix.
*/
double surround_mix_level_ltrt;
/**
* Absolute scale factor representing the level at which the LFE data is
* mixed into L/R channels during downmixing.
*/
double lfe_mix_level;
} AVDownmixInfo;
/**
* Get a frame's AV_FRAME_DATA_DOWNMIX_INFO side data for editing.
*
* If the side data is absent, it is created and added to the frame.
*
* @param frame the frame for which the side data is to be obtained or created
*
* @return the AVDownmixInfo structure to be edited by the caller, or NULL if
* the structure cannot be allocated.
*/
AVDownmixInfo *av_downmix_info_update_side_data(AVFrame *frame);
/**
* @}
*/
/**
* @}
*/
#endif /* AVUTIL_DOWNMIX_INFO_H */

View File

@ -0,0 +1,205 @@
/**
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_ENCRYPTION_INFO_H
#define AVUTIL_ENCRYPTION_INFO_H
#include <stddef.h>
#include <stdint.h>
typedef struct AVSubsampleEncryptionInfo {
/** The number of bytes that are clear. */
unsigned int bytes_of_clear_data;
/**
* The number of bytes that are protected. If using pattern encryption,
* the pattern applies to only the protected bytes; if not using pattern
* encryption, all these bytes are encrypted.
*/
unsigned int bytes_of_protected_data;
} AVSubsampleEncryptionInfo;
/**
* This describes encryption info for a packet. This contains frame-specific
* info for how to decrypt the packet before passing it to the decoder.
*
* The size of this struct is not part of the public ABI.
*/
typedef struct AVEncryptionInfo {
/** The fourcc encryption scheme, in big-endian byte order. */
uint32_t scheme;
/**
* Only used for pattern encryption. This is the number of 16-byte blocks
* that are encrypted.
*/
uint32_t crypt_byte_block;
/**
* Only used for pattern encryption. This is the number of 16-byte blocks
* that are clear.
*/
uint32_t skip_byte_block;
/**
* The ID of the key used to encrypt the packet. This should always be
* 16 bytes long, but may be changed in the future.
*/
uint8_t *key_id;
uint32_t key_id_size;
/**
* The initialization vector. This may have been zero-filled to be the
* correct block size. This should always be 16 bytes long, but may be
* changed in the future.
*/
uint8_t *iv;
uint32_t iv_size;
/**
* An array of subsample encryption info specifying how parts of the sample
* are encrypted. If there are no subsamples, then the whole sample is
* encrypted.
*/
AVSubsampleEncryptionInfo *subsamples;
uint32_t subsample_count;
} AVEncryptionInfo;
/**
* This describes info used to initialize an encryption key system.
*
* The size of this struct is not part of the public ABI.
*/
typedef struct AVEncryptionInitInfo {
/**
* A unique identifier for the key system this is for, can be NULL if it
* is not known. This should always be 16 bytes, but may change in the
* future.
*/
uint8_t* system_id;
uint32_t system_id_size;
/**
* An array of key IDs this initialization data is for. All IDs are the
* same length. Can be NULL if there are no known key IDs.
*/
uint8_t** key_ids;
/** The number of key IDs. */
uint32_t num_key_ids;
/**
* The number of bytes in each key ID. This should always be 16, but may
* change in the future.
*/
uint32_t key_id_size;
/**
* Key-system specific initialization data. This data is copied directly
* from the file and the format depends on the specific key system. This
* can be NULL if there is no initialization data; in that case, there
* will be at least one key ID.
*/
uint8_t* data;
uint32_t data_size;
/**
* An optional pointer to the next initialization info in the list.
*/
struct AVEncryptionInitInfo *next;
} AVEncryptionInitInfo;
/**
* Allocates an AVEncryptionInfo structure and sub-pointers to hold the given
* number of subsamples. This will allocate pointers for the key ID, IV,
* and subsample entries, set the size members, and zero-initialize the rest.
*
* @param subsample_count The number of subsamples.
* @param key_id_size The number of bytes in the key ID, should be 16.
* @param iv_size The number of bytes in the IV, should be 16.
*
* @return The new AVEncryptionInfo structure, or NULL on error.
*/
AVEncryptionInfo *av_encryption_info_alloc(uint32_t subsample_count, uint32_t key_id_size, uint32_t iv_size);
/**
* Allocates an AVEncryptionInfo structure with a copy of the given data.
* @return The new AVEncryptionInfo structure, or NULL on error.
*/
AVEncryptionInfo *av_encryption_info_clone(const AVEncryptionInfo *info);
/**
* Frees the given encryption info object. This MUST NOT be used to free the
* side-data data pointer, that should use normal side-data methods.
*/
void av_encryption_info_free(AVEncryptionInfo *info);
/**
* Creates a copy of the AVEncryptionInfo that is contained in the given side
* data. The resulting object should be passed to av_encryption_info_free()
* when done.
*
* @return The new AVEncryptionInfo structure, or NULL on error.
*/
AVEncryptionInfo *av_encryption_info_get_side_data(const uint8_t *side_data, size_t side_data_size);
/**
* Allocates and initializes side data that holds a copy of the given encryption
* info. The resulting pointer should be either freed using av_free or given
* to av_packet_add_side_data().
*
* @return The new side-data pointer, or NULL.
*/
uint8_t *av_encryption_info_add_side_data(
const AVEncryptionInfo *info, size_t *side_data_size);
/**
* Allocates an AVEncryptionInitInfo structure and sub-pointers to hold the
* given sizes. This will allocate pointers and set all the fields.
*
* @return The new AVEncryptionInitInfo structure, or NULL on error.
*/
AVEncryptionInitInfo *av_encryption_init_info_alloc(
uint32_t system_id_size, uint32_t num_key_ids, uint32_t key_id_size, uint32_t data_size);
/**
* Frees the given encryption init info object. This MUST NOT be used to free
* the side-data data pointer, that should use normal side-data methods.
*/
void av_encryption_init_info_free(AVEncryptionInitInfo* info);
/**
* Creates a copy of the AVEncryptionInitInfo that is contained in the given
* side data. The resulting object should be passed to
* av_encryption_init_info_free() when done.
*
* @return The new AVEncryptionInitInfo structure, or NULL on error.
*/
AVEncryptionInitInfo *av_encryption_init_info_get_side_data(
const uint8_t* side_data, size_t side_data_size);
/**
* Allocates and initializes side data that holds a copy of the given encryption
* init info. The resulting pointer should be either freed using av_free or
* given to av_packet_add_side_data().
*
* @return The new side-data pointer, or NULL.
*/
uint8_t *av_encryption_init_info_add_side_data(
const AVEncryptionInitInfo *info, size_t *side_data_size);
#endif /* AVUTIL_ENCRYPTION_INFO_H */

View File

@ -0,0 +1,126 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* error code definitions
*/
#ifndef AVUTIL_ERROR_H
#define AVUTIL_ERROR_H
#include <errno.h>
#include <stddef.h>
/**
* @addtogroup lavu_error
*
* @{
*/
/* error handling */
#if EDOM > 0
#define AVERROR(e) (-(e)) ///< Returns a negative error code from a POSIX error code, to return from library functions.
#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value.
#else
/* Some platforms have E* and errno already negated. */
#define AVERROR(e) (e)
#define AVUNERROR(e) (e)
#endif
#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d))
#define AVERROR_BSF_NOT_FOUND FFERRTAG(0xF8,'B','S','F') ///< Bitstream filter not found
#define AVERROR_BUG FFERRTAG( 'B','U','G','!') ///< Internal bug, also see AVERROR_BUG2
#define AVERROR_BUFFER_TOO_SMALL FFERRTAG( 'B','U','F','S') ///< Buffer too small
#define AVERROR_DECODER_NOT_FOUND FFERRTAG(0xF8,'D','E','C') ///< Decoder not found
#define AVERROR_DEMUXER_NOT_FOUND FFERRTAG(0xF8,'D','E','M') ///< Demuxer not found
#define AVERROR_ENCODER_NOT_FOUND FFERRTAG(0xF8,'E','N','C') ///< Encoder not found
#define AVERROR_EOF FFERRTAG( 'E','O','F',' ') ///< End of file
#define AVERROR_EXIT FFERRTAG( 'E','X','I','T') ///< Immediate exit was requested; the called function should not be restarted
#define AVERROR_EXTERNAL FFERRTAG( 'E','X','T',' ') ///< Generic error in an external library
#define AVERROR_FILTER_NOT_FOUND FFERRTAG(0xF8,'F','I','L') ///< Filter not found
#define AVERROR_INVALIDDATA FFERRTAG( 'I','N','D','A') ///< Invalid data found when processing input
#define AVERROR_MUXER_NOT_FOUND FFERRTAG(0xF8,'M','U','X') ///< Muxer not found
#define AVERROR_OPTION_NOT_FOUND FFERRTAG(0xF8,'O','P','T') ///< Option not found
#define AVERROR_PATCHWELCOME FFERRTAG( 'P','A','W','E') ///< Not yet implemented in FFmpeg, patches welcome
#define AVERROR_PROTOCOL_NOT_FOUND FFERRTAG(0xF8,'P','R','O') ///< Protocol not found
#define AVERROR_STREAM_NOT_FOUND FFERRTAG(0xF8,'S','T','R') ///< Stream not found
/**
* This is semantically identical to AVERROR_BUG
* it has been introduced in Libav after our AVERROR_BUG and with a modified value.
*/
#define AVERROR_BUG2 FFERRTAG( 'B','U','G',' ')
#define AVERROR_UNKNOWN FFERRTAG( 'U','N','K','N') ///< Unknown error, typically from an external library
#define AVERROR_EXPERIMENTAL (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
#define AVERROR_INPUT_CHANGED (-0x636e6701) ///< Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED)
#define AVERROR_OUTPUT_CHANGED (-0x636e6702) ///< Output changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_INPUT_CHANGED)
/* HTTP & RTSP errors */
#define AVERROR_HTTP_BAD_REQUEST FFERRTAG(0xF8,'4','0','0')
#define AVERROR_HTTP_UNAUTHORIZED FFERRTAG(0xF8,'4','0','1')
#define AVERROR_HTTP_FORBIDDEN FFERRTAG(0xF8,'4','0','3')
#define AVERROR_HTTP_NOT_FOUND FFERRTAG(0xF8,'4','0','4')
#define AVERROR_HTTP_OTHER_4XX FFERRTAG(0xF8,'4','X','X')
#define AVERROR_HTTP_SERVER_ERROR FFERRTAG(0xF8,'5','X','X')
#define AV_ERROR_MAX_STRING_SIZE 64
/**
* Put a description of the AVERROR code errnum in errbuf.
* In case of failure the global variable errno is set to indicate the
* error. Even in case of failure av_strerror() will print a generic
* error message indicating the errnum provided to errbuf.
*
* @param errnum error code to describe
* @param errbuf buffer to which description is written
* @param errbuf_size the size in bytes of errbuf
* @return 0 on success, a negative value if a description for errnum
* cannot be found
*/
int av_strerror(int errnum, char *errbuf, size_t errbuf_size);
/**
* Fill the provided buffer with a string containing an error string
* corresponding to the AVERROR code errnum.
*
* @param errbuf a buffer
* @param errbuf_size size in bytes of errbuf
* @param errnum error code to describe
* @return the buffer in input, filled with the error description
* @see av_strerror()
*/
static inline char *av_make_error_string(char *errbuf, size_t errbuf_size, int errnum)
{
av_strerror(errnum, errbuf, errbuf_size);
return errbuf;
}
/**
* Convenience macro, the return value should be used only directly in
* function arguments but never stand-alone.
*/
#define av_err2str(errnum) \
av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum)
/**
* @}
*/
#endif /* AVUTIL_ERROR_H */

View File

@ -0,0 +1,113 @@
/*
* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* simple arithmetic expression evaluator
*/
#ifndef AVUTIL_EVAL_H
#define AVUTIL_EVAL_H
#include "avutil.h"
typedef struct AVExpr AVExpr;
/**
* Parse and evaluate an expression.
* Note, this is significantly slower than av_expr_eval().
*
* @param res a pointer to a double where is put the result value of
* the expression, or NAN in case of error
* @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)"
* @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0}
* @param const_values a zero terminated array of values for the identifiers from const_names
* @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers
* @param funcs1 NULL terminated array of function pointers for functions which take 1 argument
* @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
* @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
* @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
* @param log_ctx parent logging context
* @return >= 0 in case of success, a negative value corresponding to an
* AVERROR code otherwise
*/
int av_expr_parse_and_eval(double *res, const char *s,
const char * const *const_names, const double *const_values,
const char * const *func1_names, double (* const *funcs1)(void *, double),
const char * const *func2_names, double (* const *funcs2)(void *, double, double),
void *opaque, int log_offset, void *log_ctx);
/**
* Parse an expression.
*
* @param expr a pointer where is put an AVExpr containing the parsed
* value in case of successful parsing, or NULL otherwise.
* The pointed to AVExpr must be freed with av_expr_free() by the user
* when it is not needed anymore.
* @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)"
* @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0}
* @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers
* @param funcs1 NULL terminated array of function pointers for functions which take 1 argument
* @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
* @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
* @param log_ctx parent logging context
* @return >= 0 in case of success, a negative value corresponding to an
* AVERROR code otherwise
*/
int av_expr_parse(AVExpr **expr, const char *s,
const char * const *const_names,
const char * const *func1_names, double (* const *funcs1)(void *, double),
const char * const *func2_names, double (* const *funcs2)(void *, double, double),
int log_offset, void *log_ctx);
/**
* Evaluate a previously parsed expression.
*
* @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names
* @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
* @return the value of the expression
*/
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque);
/**
* Free a parsed expression previously created with av_expr_parse().
*/
void av_expr_free(AVExpr *e);
/**
* Parse the string in numstr and return its value as a double. If
* the string is empty, contains only whitespaces, or does not contain
* an initial substring that has the expected syntax for a
* floating-point number, no conversion is performed. In this case,
* returns a value of zero and the value returned in tail is the value
* of numstr.
*
* @param numstr a string representing a number, may contain one of
* the International System number postfixes, for example 'K', 'M',
* 'G'. If 'i' is appended after the postfix, powers of 2 are used
* instead of powers of 10. The 'B' postfix multiplies the value by
* 8, and can be appended after another postfix or used alone. This
* allows using for example 'KB', 'MiB', 'G' and 'B' as postfix.
* @param tail if non-NULL puts here the pointer to the char next
* after the last parsed character
*/
double av_strtod(const char *numstr, char **tail);
#endif /* AVUTIL_EVAL_H */

View File

@ -0,0 +1,5 @@
/* Automatically generated by version.sh, do not manually edit! */
#ifndef AVUTIL_FFVERSION_H
#define AVUTIL_FFVERSION_H
#define FFMPEG_VERSION "4.2.1"
#endif /* AVUTIL_FFVERSION_H */

View File

@ -0,0 +1,179 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* a very simple circular buffer FIFO implementation
*/
#ifndef AVUTIL_FIFO_H
#define AVUTIL_FIFO_H
#include <stdint.h>
#include "avutil.h"
#include "attributes.h"
typedef struct AVFifoBuffer {
uint8_t *buffer;
uint8_t *rptr, *wptr, *end;
uint32_t rndx, wndx;
} AVFifoBuffer;
/**
* Initialize an AVFifoBuffer.
* @param size of FIFO
* @return AVFifoBuffer or NULL in case of memory allocation failure
*/
AVFifoBuffer *av_fifo_alloc(unsigned int size);
/**
* Initialize an AVFifoBuffer.
* @param nmemb number of elements
* @param size size of the single element
* @return AVFifoBuffer or NULL in case of memory allocation failure
*/
AVFifoBuffer *av_fifo_alloc_array(size_t nmemb, size_t size);
/**
* Free an AVFifoBuffer.
* @param f AVFifoBuffer to free
*/
void av_fifo_free(AVFifoBuffer *f);
/**
* Free an AVFifoBuffer and reset pointer to NULL.
* @param f AVFifoBuffer to free
*/
void av_fifo_freep(AVFifoBuffer **f);
/**
* Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied.
* @param f AVFifoBuffer to reset
*/
void av_fifo_reset(AVFifoBuffer *f);
/**
* Return the amount of data in bytes in the AVFifoBuffer, that is the
* amount of data you can read from it.
* @param f AVFifoBuffer to read from
* @return size
*/
int av_fifo_size(const AVFifoBuffer *f);
/**
* Return the amount of space in bytes in the AVFifoBuffer, that is the
* amount of data you can write into it.
* @param f AVFifoBuffer to write into
* @return size
*/
int av_fifo_space(const AVFifoBuffer *f);
/**
* Feed data at specific position from an AVFifoBuffer to a user-supplied callback.
* Similar as av_fifo_gereric_read but without discarding data.
* @param f AVFifoBuffer to read from
* @param offset offset from current read position
* @param buf_size number of bytes to read
* @param func generic read function
* @param dest data destination
*/
int av_fifo_generic_peek_at(AVFifoBuffer *f, void *dest, int offset, int buf_size, void (*func)(void*, void*, int));
/**
* Feed data from an AVFifoBuffer to a user-supplied callback.
* Similar as av_fifo_gereric_read but without discarding data.
* @param f AVFifoBuffer to read from
* @param buf_size number of bytes to read
* @param func generic read function
* @param dest data destination
*/
int av_fifo_generic_peek(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));
/**
* Feed data from an AVFifoBuffer to a user-supplied callback.
* @param f AVFifoBuffer to read from
* @param buf_size number of bytes to read
* @param func generic read function
* @param dest data destination
*/
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));
/**
* Feed data from a user-supplied callback to an AVFifoBuffer.
* @param f AVFifoBuffer to write to
* @param src data source; non-const since it may be used as a
* modifiable context by the function defined in func
* @param size number of bytes to write
* @param func generic write function; the first parameter is src,
* the second is dest_buf, the third is dest_buf_size.
* func must return the number of bytes written to dest_buf, or <= 0 to
* indicate no more data available to write.
* If func is NULL, src is interpreted as a simple byte array for source data.
* @return the number of bytes written to the FIFO
*/
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int));
/**
* Resize an AVFifoBuffer.
* In case of reallocation failure, the old FIFO is kept unchanged.
*
* @param f AVFifoBuffer to resize
* @param size new AVFifoBuffer size in bytes
* @return <0 for failure, >=0 otherwise
*/
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size);
/**
* Enlarge an AVFifoBuffer.
* In case of reallocation failure, the old FIFO is kept unchanged.
* The new fifo size may be larger than the requested size.
*
* @param f AVFifoBuffer to resize
* @param additional_space the amount of space in bytes to allocate in addition to av_fifo_size()
* @return <0 for failure, >=0 otherwise
*/
int av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space);
/**
* Read and discard the specified amount of data from an AVFifoBuffer.
* @param f AVFifoBuffer to read from
* @param size amount of data to read in bytes
*/
void av_fifo_drain(AVFifoBuffer *f, int size);
/**
* Return a pointer to the data stored in a FIFO buffer at a certain offset.
* The FIFO buffer is not modified.
*
* @param f AVFifoBuffer to peek at, f must be non-NULL
* @param offs an offset in bytes, its absolute value must be less
* than the used buffer size or the returned pointer will
* point outside to the buffer data.
* The used buffer size can be checked with av_fifo_size().
*/
static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs)
{
uint8_t *ptr = f->rptr + offs;
if (ptr >= f->end)
ptr = f->buffer + (ptr - f->end);
else if (ptr < f->buffer)
ptr = f->end - (f->buffer - ptr);
return ptr;
}
#endif /* AVUTIL_FIFO_H */

View File

@ -0,0 +1,71 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_FILE_H
#define AVUTIL_FILE_H
#include <stdint.h>
#include "avutil.h"
/**
* @file
* Misc file utilities.
*/
/**
* Read the file with name filename, and put its content in a newly
* allocated buffer or map it with mmap() when available.
* In case of success set *bufptr to the read or mmapped buffer, and
* *size to the size in bytes of the buffer in *bufptr.
* Unlike mmap this function succeeds with zero sized files, in this
* case *bufptr will be set to NULL and *size will be set to 0.
* The returned buffer must be released with av_file_unmap().
*
* @param log_offset loglevel offset used for logging
* @param log_ctx context used for logging
* @return a non negative number in case of success, a negative value
* corresponding to an AVERROR error code in case of failure
*/
av_warn_unused_result
int av_file_map(const char *filename, uint8_t **bufptr, size_t *size,
int log_offset, void *log_ctx);
/**
* Unmap or free the buffer bufptr created by av_file_map().
*
* @param size size in bytes of bufptr, must be the same as returned
* by av_file_map()
*/
void av_file_unmap(uint8_t *bufptr, size_t size);
/**
* Wrapper to work around the lack of mkstemp() on mingw.
* Also, tries to create file in /tmp first, if possible.
* *prefix can be a character constant; *filename will be allocated internally.
* @return file descriptor of opened file (or negative value corresponding to an
* AVERROR code on error)
* and opened file name in **filename.
* @note On very old libcs it is necessary to set a secure umask before
* calling this, av_tempfile() can't call umask itself as it is used in
* libraries and could interfere with the calling application.
* @deprecated as fd numbers cannot be passed saftely between libs on some platforms
*/
int av_tempfile(const char *prefix, char **filename, int log_offset, void *log_ctx);
#endif /* AVUTIL_FILE_H */

View File

@ -0,0 +1,971 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* @ingroup lavu_frame
* reference-counted frame API
*/
#ifndef AVUTIL_FRAME_H
#define AVUTIL_FRAME_H
#include <stddef.h>
#include <stdint.h>
#include "avutil.h"
#include "buffer.h"
#include "dict.h"
#include "rational.h"
#include "samplefmt.h"
#include "pixfmt.h"
#include "version.h"
/**
* @defgroup lavu_frame AVFrame
* @ingroup lavu_data
*
* @{
* AVFrame is an abstraction for reference-counted raw multimedia data.
*/
enum AVFrameSideDataType {
/**
* The data is the AVPanScan struct defined in libavcodec.
*/
AV_FRAME_DATA_PANSCAN,
/**
* ATSC A53 Part 4 Closed Captions.
* A53 CC bitstream is stored as uint8_t in AVFrameSideData.data.
* The number of bytes of CC data is AVFrameSideData.size.
*/
AV_FRAME_DATA_A53_CC,
/**
* Stereoscopic 3d metadata.
* The data is the AVStereo3D struct defined in libavutil/stereo3d.h.
*/
AV_FRAME_DATA_STEREO3D,
/**
* The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
*/
AV_FRAME_DATA_MATRIXENCODING,
/**
* Metadata relevant to a downmix procedure.
* The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h.
*/
AV_FRAME_DATA_DOWNMIX_INFO,
/**
* ReplayGain information in the form of the AVReplayGain struct.
*/
AV_FRAME_DATA_REPLAYGAIN,
/**
* This side data contains a 3x3 transformation matrix describing an affine
* transformation that needs to be applied to the frame for correct
* presentation.
*
* See libavutil/display.h for a detailed description of the data.
*/
AV_FRAME_DATA_DISPLAYMATRIX,
/**
* Active Format Description data consisting of a single byte as specified
* in ETSI TS 101 154 using AVActiveFormatDescription enum.
*/
AV_FRAME_DATA_AFD,
/**
* Motion vectors exported by some codecs (on demand through the export_mvs
* flag set in the libavcodec AVCodecContext flags2 option).
* The data is the AVMotionVector struct defined in
* libavutil/motion_vector.h.
*/
AV_FRAME_DATA_MOTION_VECTORS,
/**
* Recommmends skipping the specified number of samples. This is exported
* only if the "skip_manual" AVOption is set in libavcodec.
* This has the same format as AV_PKT_DATA_SKIP_SAMPLES.
* @code
* u32le number of samples to skip from start of this packet
* u32le number of samples to skip from end of this packet
* u8 reason for start skip
* u8 reason for end skip (0=padding silence, 1=convergence)
* @endcode
*/
AV_FRAME_DATA_SKIP_SAMPLES,
/**
* This side data must be associated with an audio frame and corresponds to
* enum AVAudioServiceType defined in avcodec.h.
*/
AV_FRAME_DATA_AUDIO_SERVICE_TYPE,
/**
* Mastering display metadata associated with a video frame. The payload is
* an AVMasteringDisplayMetadata type and contains information about the
* mastering display color volume.
*/
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA,
/**
* The GOP timecode in 25 bit timecode format. Data format is 64-bit integer.
* This is set on the first frame of a GOP that has a temporal reference of 0.
*/
AV_FRAME_DATA_GOP_TIMECODE,
/**
* The data represents the AVSphericalMapping structure defined in
* libavutil/spherical.h.
*/
AV_FRAME_DATA_SPHERICAL,
/**
* Content light level (based on CTA-861.3). This payload contains data in
* the form of the AVContentLightMetadata struct.
*/
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,
/**
* The data contains an ICC profile as an opaque octet buffer following the
* format described by ISO 15076-1 with an optional name defined in the
* metadata key entry "name".
*/
AV_FRAME_DATA_ICC_PROFILE,
#if FF_API_FRAME_QP
/**
* Implementation-specific description of the format of AV_FRAME_QP_TABLE_DATA.
* The contents of this side data are undocumented and internal; use
* av_frame_set_qp_table() and av_frame_get_qp_table() to access this in a
* meaningful way instead.
*/
AV_FRAME_DATA_QP_TABLE_PROPERTIES,
/**
* Raw QP table data. Its format is described by
* AV_FRAME_DATA_QP_TABLE_PROPERTIES. Use av_frame_set_qp_table() and
* av_frame_get_qp_table() to access this instead.
*/
AV_FRAME_DATA_QP_TABLE_DATA,
#endif
/**
* Timecode which conforms to SMPTE ST 12-1. The data is an array of 4 uint32_t
* where the first uint32_t describes how many (1-3) of the other timecodes are used.
* The timecode format is described in the av_timecode_get_smpte_from_framenum()
* function in libavutil/timecode.c.
*/
AV_FRAME_DATA_S12M_TIMECODE,
/**
* HDR dynamic metadata associated with a video frame. The payload is
* an AVDynamicHDRPlus type and contains information for color
* volume transform - application 4 of SMPTE 2094-40:2016 standard.
*/
AV_FRAME_DATA_DYNAMIC_HDR_PLUS,
/**
* Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of
* array element is implied by AVFrameSideData.size / AVRegionOfInterest.self_size.
*/
AV_FRAME_DATA_REGIONS_OF_INTEREST,
};
enum AVActiveFormatDescription {
AV_AFD_SAME = 8,
AV_AFD_4_3 = 9,
AV_AFD_16_9 = 10,
AV_AFD_14_9 = 11,
AV_AFD_4_3_SP_14_9 = 13,
AV_AFD_16_9_SP_14_9 = 14,
AV_AFD_SP_4_3 = 15,
};
/**
* Structure to hold side data for an AVFrame.
*
* sizeof(AVFrameSideData) is not a part of the public ABI, so new fields may be added
* to the end with a minor bump.
*/
typedef struct AVFrameSideData {
enum AVFrameSideDataType type;
uint8_t *data;
int size;
AVDictionary *metadata;
AVBufferRef *buf;
} AVFrameSideData;
/**
* Structure describing a single Region Of Interest.
*
* When multiple regions are defined in a single side-data block, they
* should be ordered from most to least important - some encoders are only
* capable of supporting a limited number of distinct regions, so will have
* to truncate the list.
*
* When overlapping regions are defined, the first region containing a given
* area of the frame applies.
*/
typedef struct AVRegionOfInterest {
/**
* Must be set to the size of this data structure (that is,
* sizeof(AVRegionOfInterest)).
*/
uint32_t self_size;
/**
* Distance in pixels from the top edge of the frame to the top and
* bottom edges and from the left edge of the frame to the left and
* right edges of the rectangle defining this region of interest.
*
* The constraints on a region are encoder dependent, so the region
* actually affected may be slightly larger for alignment or other
* reasons.
*/
int top;
int bottom;
int left;
int right;
/**
* Quantisation offset.
*
* Must be in the range -1 to +1. A value of zero indicates no quality
* change. A negative value asks for better quality (less quantisation),
* while a positive value asks for worse quality (greater quantisation).
*
* The range is calibrated so that the extreme values indicate the
* largest possible offset - if the rest of the frame is encoded with the
* worst possible quality, an offset of -1 indicates that this region
* should be encoded with the best possible quality anyway. Intermediate
* values are then interpolated in some codec-dependent way.
*
* For example, in 10-bit H.264 the quantisation parameter varies between
* -12 and 51. A typical qoffset value of -1/10 therefore indicates that
* this region should be encoded with a QP around one-tenth of the full
* range better than the rest of the frame. So, if most of the frame
* were to be encoded with a QP of around 30, this region would get a QP
* of around 24 (an offset of approximately -1/10 * (51 - -12) = -6.3).
* An extreme value of -1 would indicate that this region should be
* encoded with the best possible quality regardless of the treatment of
* the rest of the frame - that is, should be encoded at a QP of -12.
*/
AVRational qoffset;
} AVRegionOfInterest;
/**
* This structure describes decoded (raw) audio or video data.
*
* AVFrame must be allocated using av_frame_alloc(). Note that this only
* allocates the AVFrame itself, the buffers for the data must be managed
* through other means (see below).
* AVFrame must be freed with av_frame_free().
*
* AVFrame is typically allocated once and then reused multiple times to hold
* different data (e.g. a single AVFrame to hold frames received from a
* decoder). In such a case, av_frame_unref() will free any references held by
* the frame and reset it to its original clean state before it
* is reused again.
*
* The data described by an AVFrame is usually reference counted through the
* AVBuffer API. The underlying buffer references are stored in AVFrame.buf /
* AVFrame.extended_buf. An AVFrame is considered to be reference counted if at
* least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case,
* every single data plane must be contained in one of the buffers in
* AVFrame.buf or AVFrame.extended_buf.
* There may be a single buffer for all the data, or one separate buffer for
* each plane, or anything in between.
*
* sizeof(AVFrame) is not a part of the public ABI, so new fields may be added
* to the end with a minor bump.
*
* Fields can be accessed through AVOptions, the name string used, matches the
* C structure field name for fields accessible through AVOptions. The AVClass
* for AVFrame can be obtained from avcodec_get_frame_class()
*/
typedef struct AVFrame {
#define AV_NUM_DATA_POINTERS 8
/**
* pointer to the picture/channel planes.
* This might be different from the first allocated byte
*
* Some decoders access areas outside 0,0 - width,height, please
* see avcodec_align_dimensions2(). Some filters and swscale can read
* up to 16 bytes beyond the planes, if these filters are to be used,
* then 16 extra bytes must be allocated.
*
* NOTE: Except for hwaccel formats, pointers not needed by the format
* MUST be set to NULL.
*/
uint8_t *data[AV_NUM_DATA_POINTERS];
/**
* For video, size in bytes of each picture line.
* For audio, size in bytes of each plane.
*
* For audio, only linesize[0] may be set. For planar audio, each channel
* plane must be the same size.
*
* For video the linesizes should be multiples of the CPUs alignment
* preference, this is 16 or 32 for modern desktop CPUs.
* Some code requires such alignment other code can be slower without
* correct alignment, for yet other it makes no difference.
*
* @note The linesize may be larger than the size of usable data -- there
* may be extra padding present for performance reasons.
*/
int linesize[AV_NUM_DATA_POINTERS];
/**
* pointers to the data planes/channels.
*
* For video, this should simply point to data[].
*
* For planar audio, each channel has a separate data pointer, and
* linesize[0] contains the size of each channel buffer.
* For packed audio, there is just one data pointer, and linesize[0]
* contains the total size of the buffer for all channels.
*
* Note: Both data and extended_data should always be set in a valid frame,
* but for planar audio with more channels that can fit in data,
* extended_data must be used in order to access all channels.
*/
uint8_t **extended_data;
/**
* @name Video dimensions
* Video frames only. The coded dimensions (in pixels) of the video frame,
* i.e. the size of the rectangle that contains some well-defined values.
*
* @note The part of the frame intended for display/presentation is further
* restricted by the @ref cropping "Cropping rectangle".
* @{
*/
int width, height;
/**
* @}
*/
/**
* number of audio samples (per channel) described by this frame
*/
int nb_samples;
/**
* format of the frame, -1 if unknown or unset
* Values correspond to enum AVPixelFormat for video frames,
* enum AVSampleFormat for audio)
*/
int format;
/**
* 1 -> keyframe, 0-> not
*/
int key_frame;
/**
* Picture type of the frame.
*/
enum AVPictureType pict_type;
/**
* Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
*/
AVRational sample_aspect_ratio;
/**
* Presentation timestamp in time_base units (time when frame should be shown to user).
*/
int64_t pts;
#if FF_API_PKT_PTS
/**
* PTS copied from the AVPacket that was decoded to produce this frame.
* @deprecated use the pts field instead
*/
attribute_deprecated
int64_t pkt_pts;
#endif
/**
* DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used)
* This is also the Presentation time of this AVFrame calculated from
* only AVPacket.dts values without pts values.
*/
int64_t pkt_dts;
/**
* picture number in bitstream order
*/
int coded_picture_number;
/**
* picture number in display order
*/
int display_picture_number;
/**
* quality (between 1 (good) and FF_LAMBDA_MAX (bad))
*/
int quality;
/**
* for some private data of the user
*/
void *opaque;
#if FF_API_ERROR_FRAME
/**
* @deprecated unused
*/
attribute_deprecated
uint64_t error[AV_NUM_DATA_POINTERS];
#endif
/**
* When decoding, this signals how much the picture must be delayed.
* extra_delay = repeat_pict / (2*fps)
*/
int repeat_pict;
/**
* The content of the picture is interlaced.
*/
int interlaced_frame;
/**
* If the content is interlaced, is top field displayed first.
*/
int top_field_first;
/**
* Tell user application that palette has changed from previous frame.
*/
int palette_has_changed;
/**
* reordered opaque 64 bits (generally an integer or a double precision float
* PTS but can be anything).
* The user sets AVCodecContext.reordered_opaque to represent the input at
* that time,
* the decoder reorders values as needed and sets AVFrame.reordered_opaque
* to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
*/
int64_t reordered_opaque;
/**
* Sample rate of the audio data.
*/
int sample_rate;
/**
* Channel layout of the audio data.
*/
uint64_t channel_layout;
/**
* AVBuffer references backing the data for this frame. If all elements of
* this array are NULL, then this frame is not reference counted. This array
* must be filled contiguously -- if buf[i] is non-NULL then buf[j] must
* also be non-NULL for all j < i.
*
* There may be at most one AVBuffer per data plane, so for video this array
* always contains all the references. For planar audio with more than
* AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in
* this array. Then the extra AVBufferRef pointers are stored in the
* extended_buf array.
*/
AVBufferRef *buf[AV_NUM_DATA_POINTERS];
/**
* For planar audio which requires more than AV_NUM_DATA_POINTERS
* AVBufferRef pointers, this array will hold all the references which
* cannot fit into AVFrame.buf.
*
* Note that this is different from AVFrame.extended_data, which always
* contains all the pointers. This array only contains the extra pointers,
* which cannot fit into AVFrame.buf.
*
* This array is always allocated using av_malloc() by whoever constructs
* the frame. It is freed in av_frame_unref().
*/
AVBufferRef **extended_buf;
/**
* Number of elements in extended_buf.
*/
int nb_extended_buf;
AVFrameSideData **side_data;
int nb_side_data;
/**
* @defgroup lavu_frame_flags AV_FRAME_FLAGS
* @ingroup lavu_frame
* Flags describing additional frame properties.
*
* @{
*/
/**
* The frame data may be corrupted, e.g. due to decoding errors.
*/
#define AV_FRAME_FLAG_CORRUPT (1 << 0)
/**
* A flag to mark the frames which need to be decoded, but shouldn't be output.
*/
#define AV_FRAME_FLAG_DISCARD (1 << 2)
/**
* @}
*/
/**
* Frame flags, a combination of @ref lavu_frame_flags
*/
int flags;
/**
* MPEG vs JPEG YUV range.
* - encoding: Set by user
* - decoding: Set by libavcodec
*/
enum AVColorRange color_range;
enum AVColorPrimaries color_primaries;
enum AVColorTransferCharacteristic color_trc;
/**
* YUV colorspace type.
* - encoding: Set by user
* - decoding: Set by libavcodec
*/
enum AVColorSpace colorspace;
enum AVChromaLocation chroma_location;
/**
* frame timestamp estimated using various heuristics, in stream time base
* - encoding: unused
* - decoding: set by libavcodec, read by user.
*/
int64_t best_effort_timestamp;
/**
* reordered pos from the last AVPacket that has been input into the decoder
* - encoding: unused
* - decoding: Read by user.
*/
int64_t pkt_pos;
/**
* duration of the corresponding packet, expressed in
* AVStream->time_base units, 0 if unknown.
* - encoding: unused
* - decoding: Read by user.
*/
int64_t pkt_duration;
/**
* metadata.
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
AVDictionary *metadata;
/**
* decode error flags of the frame, set to a combination of
* FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there
* were errors during the decoding.
* - encoding: unused
* - decoding: set by libavcodec, read by user.
*/
int decode_error_flags;
#define FF_DECODE_ERROR_INVALID_BITSTREAM 1
#define FF_DECODE_ERROR_MISSING_REFERENCE 2
#define FF_DECODE_ERROR_CONCEALMENT_ACTIVE 4
#define FF_DECODE_ERROR_DECODE_SLICES 8
/**
* number of audio channels, only used for audio.
* - encoding: unused
* - decoding: Read by user.
*/
int channels;
/**
* size of the corresponding packet containing the compressed
* frame.
* It is set to a negative value if unknown.
* - encoding: unused
* - decoding: set by libavcodec, read by user.
*/
int pkt_size;
#if FF_API_FRAME_QP
/**
* QP table
*/
attribute_deprecated
int8_t *qscale_table;
/**
* QP store stride
*/
attribute_deprecated
int qstride;
attribute_deprecated
int qscale_type;
attribute_deprecated
AVBufferRef *qp_table_buf;
#endif
/**
* For hwaccel-format frames, this should be a reference to the
* AVHWFramesContext describing the frame.
*/
AVBufferRef *hw_frames_ctx;
/**
* AVBufferRef for free use by the API user. FFmpeg will never check the
* contents of the buffer ref. FFmpeg calls av_buffer_unref() on it when
* the frame is unreferenced. av_frame_copy_props() calls create a new
* reference with av_buffer_ref() for the target frame's opaque_ref field.
*
* This is unrelated to the opaque field, although it serves a similar
* purpose.
*/
AVBufferRef *opaque_ref;
/**
* @anchor cropping
* @name Cropping
* Video frames only. The number of pixels to discard from the the
* top/bottom/left/right border of the frame to obtain the sub-rectangle of
* the frame intended for presentation.
* @{
*/
size_t crop_top;
size_t crop_bottom;
size_t crop_left;
size_t crop_right;
/**
* @}
*/
/**
* AVBufferRef for internal use by a single libav* library.
* Must not be used to transfer data between libraries.
* Has to be NULL when ownership of the frame leaves the respective library.
*
* Code outside the FFmpeg libs should never check or change the contents of the buffer ref.
*
* FFmpeg calls av_buffer_unref() on it when the frame is unreferenced.
* av_frame_copy_props() calls create a new reference with av_buffer_ref()
* for the target frame's private_ref field.
*/
AVBufferRef *private_ref;
} AVFrame;
#if FF_API_FRAME_GET_SET
/**
* Accessors for some AVFrame fields. These used to be provided for ABI
* compatibility, and do not need to be used anymore.
*/
attribute_deprecated
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame);
attribute_deprecated
void av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val);
attribute_deprecated
int64_t av_frame_get_pkt_duration (const AVFrame *frame);
attribute_deprecated
void av_frame_set_pkt_duration (AVFrame *frame, int64_t val);
attribute_deprecated
int64_t av_frame_get_pkt_pos (const AVFrame *frame);
attribute_deprecated
void av_frame_set_pkt_pos (AVFrame *frame, int64_t val);
attribute_deprecated
int64_t av_frame_get_channel_layout (const AVFrame *frame);
attribute_deprecated
void av_frame_set_channel_layout (AVFrame *frame, int64_t val);
attribute_deprecated
int av_frame_get_channels (const AVFrame *frame);
attribute_deprecated
void av_frame_set_channels (AVFrame *frame, int val);
attribute_deprecated
int av_frame_get_sample_rate (const AVFrame *frame);
attribute_deprecated
void av_frame_set_sample_rate (AVFrame *frame, int val);
attribute_deprecated
AVDictionary *av_frame_get_metadata (const AVFrame *frame);
attribute_deprecated
void av_frame_set_metadata (AVFrame *frame, AVDictionary *val);
attribute_deprecated
int av_frame_get_decode_error_flags (const AVFrame *frame);
attribute_deprecated
void av_frame_set_decode_error_flags (AVFrame *frame, int val);
attribute_deprecated
int av_frame_get_pkt_size(const AVFrame *frame);
attribute_deprecated
void av_frame_set_pkt_size(AVFrame *frame, int val);
#if FF_API_FRAME_QP
attribute_deprecated
int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type);
attribute_deprecated
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type);
#endif
attribute_deprecated
enum AVColorSpace av_frame_get_colorspace(const AVFrame *frame);
attribute_deprecated
void av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val);
attribute_deprecated
enum AVColorRange av_frame_get_color_range(const AVFrame *frame);
attribute_deprecated
void av_frame_set_color_range(AVFrame *frame, enum AVColorRange val);
#endif
/**
* Get the name of a colorspace.
* @return a static string identifying the colorspace; can be NULL.
*/
const char *av_get_colorspace_name(enum AVColorSpace val);
/**
* Allocate an AVFrame and set its fields to default values. The resulting
* struct must be freed using av_frame_free().
*
* @return An AVFrame filled with default values or NULL on failure.
*
* @note this only allocates the AVFrame itself, not the data buffers. Those
* must be allocated through other means, e.g. with av_frame_get_buffer() or
* manually.
*/
AVFrame *av_frame_alloc(void);
/**
* Free the frame and any dynamically allocated objects in it,
* e.g. extended_data. If the frame is reference counted, it will be
* unreferenced first.
*
* @param frame frame to be freed. The pointer will be set to NULL.
*/
void av_frame_free(AVFrame **frame);
/**
* Set up a new reference to the data described by the source frame.
*
* Copy frame properties from src to dst and create a new reference for each
* AVBufferRef from src.
*
* If src is not reference counted, new buffers are allocated and the data is
* copied.
*
* @warning: dst MUST have been either unreferenced with av_frame_unref(dst),
* or newly allocated with av_frame_alloc() before calling this
* function, or undefined behavior will occur.
*
* @return 0 on success, a negative AVERROR on error
*/
int av_frame_ref(AVFrame *dst, const AVFrame *src);
/**
* Create a new frame that references the same data as src.
*
* This is a shortcut for av_frame_alloc()+av_frame_ref().
*
* @return newly created AVFrame on success, NULL on error.
*/
AVFrame *av_frame_clone(const AVFrame *src);
/**
* Unreference all the buffers referenced by frame and reset the frame fields.
*/
void av_frame_unref(AVFrame *frame);
/**
* Move everything contained in src to dst and reset src.
*
* @warning: dst is not unreferenced, but directly overwritten without reading
* or deallocating its contents. Call av_frame_unref(dst) manually
* before calling this function to ensure that no memory is leaked.
*/
void av_frame_move_ref(AVFrame *dst, AVFrame *src);
/**
* Allocate new buffer(s) for audio or video data.
*
* The following fields must be set on frame before calling this function:
* - format (pixel format for video, sample format for audio)
* - width and height for video
* - nb_samples and channel_layout for audio
*
* This function will fill AVFrame.data and AVFrame.buf arrays and, if
* necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf.
* For planar formats, one buffer will be allocated for each plane.
*
* @warning: if frame already has been allocated, calling this function will
* leak memory. In addition, undefined behavior can occur in certain
* cases.
*
* @param frame frame in which to store the new buffers.
* @param align Required buffer size alignment. If equal to 0, alignment will be
* chosen automatically for the current CPU. It is highly
* recommended to pass 0 here unless you know what you are doing.
*
* @return 0 on success, a negative AVERROR on error.
*/
int av_frame_get_buffer(AVFrame *frame, int align);
/**
* Check if the frame data is writable.
*
* @return A positive value if the frame data is writable (which is true if and
* only if each of the underlying buffers has only one reference, namely the one
* stored in this frame). Return 0 otherwise.
*
* If 1 is returned the answer is valid until av_buffer_ref() is called on any
* of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly).
*
* @see av_frame_make_writable(), av_buffer_is_writable()
*/
int av_frame_is_writable(AVFrame *frame);
/**
* Ensure that the frame data is writable, avoiding data copy if possible.
*
* Do nothing if the frame is writable, allocate new buffers and copy the data
* if it is not.
*
* @return 0 on success, a negative AVERROR on error.
*
* @see av_frame_is_writable(), av_buffer_is_writable(),
* av_buffer_make_writable()
*/
int av_frame_make_writable(AVFrame *frame);
/**
* Copy the frame data from src to dst.
*
* This function does not allocate anything, dst must be already initialized and
* allocated with the same parameters as src.
*
* This function only copies the frame data (i.e. the contents of the data /
* extended data arrays), not any other properties.
*
* @return >= 0 on success, a negative AVERROR on error.
*/
int av_frame_copy(AVFrame *dst, const AVFrame *src);
/**
* Copy only "metadata" fields from src to dst.
*
* Metadata for the purpose of this function are those fields that do not affect
* the data layout in the buffers. E.g. pts, sample rate (for audio) or sample
* aspect ratio (for video), but not width/height or channel layout.
* Side data is also copied.
*/
int av_frame_copy_props(AVFrame *dst, const AVFrame *src);
/**
* Get the buffer reference a given data plane is stored in.
*
* @param plane index of the data plane of interest in frame->extended_data.
*
* @return the buffer reference that contains the plane or NULL if the input
* frame is not valid.
*/
AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane);
/**
* Add a new side data to a frame.
*
* @param frame a frame to which the side data should be added
* @param type type of the added side data
* @param size size of the side data
*
* @return newly added side data on success, NULL on error
*/
AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
enum AVFrameSideDataType type,
int size);
/**
* Add a new side data to a frame from an existing AVBufferRef
*
* @param frame a frame to which the side data should be added
* @param type the type of the added side data
* @param buf an AVBufferRef to add as side data. The ownership of
* the reference is transferred to the frame.
*
* @return newly added side data on success, NULL on error. On failure
* the frame is unchanged and the AVBufferRef remains owned by
* the caller.
*/
AVFrameSideData *av_frame_new_side_data_from_buf(AVFrame *frame,
enum AVFrameSideDataType type,
AVBufferRef *buf);
/**
* @return a pointer to the side data of a given type on success, NULL if there
* is no side data with such type in this frame.
*/
AVFrameSideData *av_frame_get_side_data(const AVFrame *frame,
enum AVFrameSideDataType type);
/**
* If side data of the supplied type exists in the frame, free it and remove it
* from the frame.
*/
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type);
/**
* Flags for frame cropping.
*/
enum {
/**
* Apply the maximum possible cropping, even if it requires setting the
* AVFrame.data[] entries to unaligned pointers. Passing unaligned data
* to FFmpeg API is generally not allowed, and causes undefined behavior
* (such as crashes). You can pass unaligned data only to FFmpeg APIs that
* are explicitly documented to accept it. Use this flag only if you
* absolutely know what you are doing.
*/
AV_FRAME_CROP_UNALIGNED = 1 << 0,
};
/**
* Crop the given video AVFrame according to its crop_left/crop_top/crop_right/
* crop_bottom fields. If cropping is successful, the function will adjust the
* data pointers and the width/height fields, and set the crop fields to 0.
*
* In all cases, the cropping boundaries will be rounded to the inherent
* alignment of the pixel format. In some cases, such as for opaque hwaccel
* formats, the left/top cropping is ignored. The crop fields are set to 0 even
* if the cropping was rounded or ignored.
*
* @param frame the frame which should be cropped
* @param flags Some combination of AV_FRAME_CROP_* flags, or 0.
*
* @return >= 0 on success, a negative AVERROR on error. If the cropping fields
* were invalid, AVERROR(ERANGE) is returned, and nothing is changed.
*/
int av_frame_apply_cropping(AVFrame *frame, int flags);
/**
* @return a string identifying the side data type
*/
const char *av_frame_side_data_name(enum AVFrameSideDataType type);
/**
* @}
*/
#endif /* AVUTIL_FRAME_H */

View File

@ -0,0 +1,269 @@
/*
* Copyright (C) 2013 Reimar Döffinger <Reimar.Doeffinger@gmx.de>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* @ingroup lavu_hash_generic
* Generic hashing API
*/
#ifndef AVUTIL_HASH_H
#define AVUTIL_HASH_H
#include <stdint.h>
#include "version.h"
/**
* @defgroup lavu_hash Hash Functions
* @ingroup lavu_crypto
* Hash functions useful in multimedia.
*
* Hash functions are widely used in multimedia, from error checking and
* concealment to internal regression testing. libavutil has efficient
* implementations of a variety of hash functions that may be useful for
* FFmpeg and other multimedia applications.
*
* @{
*
* @defgroup lavu_hash_generic Generic Hashing API
* An abstraction layer for all hash functions supported by libavutil.
*
* If your application needs to support a wide range of different hash
* functions, then the Generic Hashing API is for you. It provides a generic,
* reusable API for @ref lavu_hash "all hash functions" implemented in libavutil.
* If you just need to use one particular hash function, use the @ref lavu_hash
* "individual hash" directly.
*
* @section Sample Code
*
* A basic template for using the Generic Hashing API follows:
*
* @code
* struct AVHashContext *ctx = NULL;
* const char *hash_name = NULL;
* uint8_t *output_buf = NULL;
*
* // Select from a string returned by av_hash_names()
* hash_name = ...;
*
* // Allocate a hash context
* ret = av_hash_alloc(&ctx, hash_name);
* if (ret < 0)
* return ret;
*
* // Initialize the hash context
* av_hash_init(ctx);
*
* // Update the hash context with data
* while (data_left) {
* av_hash_update(ctx, data, size);
* }
*
* // Now we have no more data, so it is time to finalize the hash and get the
* // output. But we need to first allocate an output buffer. Note that you can
* // use any memory allocation function, including malloc(), not just
* // av_malloc().
* output_buf = av_malloc(av_hash_get_size(ctx));
* if (!output_buf)
* return AVERROR(ENOMEM);
*
* // Finalize the hash context.
* // You can use any of the av_hash_final*() functions provided, for other
* // output formats. If you do so, be sure to adjust the memory allocation
* // above. See the function documentation below for the exact amount of extra
* // memory needed.
* av_hash_final(ctx, output_buffer);
*
* // Free the context
* av_hash_freep(&ctx);
* @endcode
*
* @section Hash Function-Specific Information
* If the CRC32 hash is selected, the #AV_CRC_32_IEEE polynomial will be
* used.
*
* If the Murmur3 hash is selected, the default seed will be used. See @ref
* lavu_murmur3_seedinfo "Murmur3" for more information.
*
* @{
*/
/**
* @example ffhash.c
* This example is a simple command line application that takes one or more
* arguments. It demonstrates a typical use of the hashing API with allocation,
* initialization, updating, and finalizing.
*/
struct AVHashContext;
/**
* Allocate a hash context for the algorithm specified by name.
*
* @return >= 0 for success, a negative error code for failure
*
* @note The context is not initialized after a call to this function; you must
* call av_hash_init() to do so.
*/
int av_hash_alloc(struct AVHashContext **ctx, const char *name);
/**
* Get the names of available hash algorithms.
*
* This function can be used to enumerate the algorithms.
*
* @param[in] i Index of the hash algorithm, starting from 0
* @return Pointer to a static string or `NULL` if `i` is out of range
*/
const char *av_hash_names(int i);
/**
* Get the name of the algorithm corresponding to the given hash context.
*/
const char *av_hash_get_name(const struct AVHashContext *ctx);
/**
* Maximum value that av_hash_get_size() will currently return.
*
* You can use this if you absolutely want or need to use static allocation for
* the output buffer and are fine with not supporting hashes newly added to
* libavutil without recompilation.
*
* @warning
* Adding new hashes with larger sizes, and increasing the macro while doing
* so, will not be considered an ABI change. To prevent your code from
* overflowing a buffer, either dynamically allocate the output buffer with
* av_hash_get_size(), or limit your use of the Hashing API to hashes that are
* already in FFmpeg during the time of compilation.
*/
#define AV_HASH_MAX_SIZE 64
/**
* Get the size of the resulting hash value in bytes.
*
* The maximum value this function will currently return is available as macro
* #AV_HASH_MAX_SIZE.
*
* @param[in] ctx Hash context
* @return Size of the hash value in bytes
*/
int av_hash_get_size(const struct AVHashContext *ctx);
/**
* Initialize or reset a hash context.
*
* @param[in,out] ctx Hash context
*/
void av_hash_init(struct AVHashContext *ctx);
/**
* Update a hash context with additional data.
*
* @param[in,out] ctx Hash context
* @param[in] src Data to be added to the hash context
* @param[in] len Size of the additional data
*/
#if FF_API_CRYPTO_SIZE_T
void av_hash_update(struct AVHashContext *ctx, const uint8_t *src, int len);
#else
void av_hash_update(struct AVHashContext *ctx, const uint8_t *src, size_t len);
#endif
/**
* Finalize a hash context and compute the actual hash value.
*
* The minimum size of `dst` buffer is given by av_hash_get_size() or
* #AV_HASH_MAX_SIZE. The use of the latter macro is discouraged.
*
* It is not safe to update or finalize a hash context again, if it has already
* been finalized.
*
* @param[in,out] ctx Hash context
* @param[out] dst Where the final hash value will be stored
*
* @see av_hash_final_bin() provides an alternative API
*/
void av_hash_final(struct AVHashContext *ctx, uint8_t *dst);
/**
* Finalize a hash context and store the actual hash value in a buffer.
*
* It is not safe to update or finalize a hash context again, if it has already
* been finalized.
*
* If `size` is smaller than the hash size (given by av_hash_get_size()), the
* hash is truncated; if size is larger, the buffer is padded with 0.
*
* @param[in,out] ctx Hash context
* @param[out] dst Where the final hash value will be stored
* @param[in] size Number of bytes to write to `dst`
*/
void av_hash_final_bin(struct AVHashContext *ctx, uint8_t *dst, int size);
/**
* Finalize a hash context and store the hexadecimal representation of the
* actual hash value as a string.
*
* It is not safe to update or finalize a hash context again, if it has already
* been finalized.
*
* The string is always 0-terminated.
*
* If `size` is smaller than `2 * hash_size + 1`, where `hash_size` is the
* value returned by av_hash_get_size(), the string will be truncated.
*
* @param[in,out] ctx Hash context
* @param[out] dst Where the string will be stored
* @param[in] size Maximum number of bytes to write to `dst`
*/
void av_hash_final_hex(struct AVHashContext *ctx, uint8_t *dst, int size);
/**
* Finalize a hash context and store the Base64 representation of the
* actual hash value as a string.
*
* It is not safe to update or finalize a hash context again, if it has already
* been finalized.
*
* The string is always 0-terminated.
*
* If `size` is smaller than AV_BASE64_SIZE(hash_size), where `hash_size` is
* the value returned by av_hash_get_size(), the string will be truncated.
*
* @param[in,out] ctx Hash context
* @param[out] dst Where the final hash value will be stored
* @param[in] size Maximum number of bytes to write to `dst`
*/
void av_hash_final_b64(struct AVHashContext *ctx, uint8_t *dst, int size);
/**
* Free hash context and set hash context pointer to `NULL`.
*
* @param[in,out] ctx Pointer to hash context
*/
void av_hash_freep(struct AVHashContext **ctx);
/**
* @}
* @}
*/
#endif /* AVUTIL_HASH_H */

View File

@ -0,0 +1,343 @@
/*
* Copyright (c) 2018 Mohammad Izadi <moh.izadi at gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_HDR_DYNAMIC_METADATA_H
#define AVUTIL_HDR_DYNAMIC_METADATA_H
#include "frame.h"
#include "rational.h"
/**
* Option for overlapping elliptical pixel selectors in an image.
*/
enum AVHDRPlusOverlapProcessOption {
AV_HDR_PLUS_OVERLAP_PROCESS_WEIGHTED_AVERAGING = 0,
AV_HDR_PLUS_OVERLAP_PROCESS_LAYERING = 1,
};
/**
* Represents the percentile at a specific percentage in
* a distribution.
*/
typedef struct AVHDRPlusPercentile {
/**
* The percentage value corresponding to a specific percentile linearized
* RGB value in the processing window in the scene. The value shall be in
* the range of 0 to100, inclusive.
*/
uint8_t percentage;
/**
* The linearized maxRGB value at a specific percentile in the processing
* window in the scene. The value shall be in the range of 0 to 1, inclusive
* and in multiples of 0.00001.
*/
AVRational percentile;
} AVHDRPlusPercentile;
/**
* Color transform parameters at a processing window in a dynamic metadata for
* SMPTE 2094-40.
*/
typedef struct AVHDRPlusColorTransformParams {
/**
* The relative x coordinate of the top left pixel of the processing
* window. The value shall be in the range of 0 and 1, inclusive and
* in multiples of 1/(width of Picture - 1). The value 1 corresponds
* to the absolute coordinate of width of Picture - 1. The value for
* first processing window shall be 0.
*/
AVRational window_upper_left_corner_x;
/**
* The relative y coordinate of the top left pixel of the processing
* window. The value shall be in the range of 0 and 1, inclusive and
* in multiples of 1/(height of Picture - 1). The value 1 corresponds
* to the absolute coordinate of height of Picture - 1. The value for
* first processing window shall be 0.
*/
AVRational window_upper_left_corner_y;
/**
* The relative x coordinate of the bottom right pixel of the processing
* window. The value shall be in the range of 0 and 1, inclusive and
* in multiples of 1/(width of Picture - 1). The value 1 corresponds
* to the absolute coordinate of width of Picture - 1. The value for
* first processing window shall be 1.
*/
AVRational window_lower_right_corner_x;
/**
* The relative y coordinate of the bottom right pixel of the processing
* window. The value shall be in the range of 0 and 1, inclusive and
* in multiples of 1/(height of Picture - 1). The value 1 corresponds
* to the absolute coordinate of height of Picture - 1. The value for
* first processing window shall be 1.
*/
AVRational window_lower_right_corner_y;
/**
* The x coordinate of the center position of the concentric internal and
* external ellipses of the elliptical pixel selector in the processing
* window. The value shall be in the range of 0 to (width of Picture - 1),
* inclusive and in multiples of 1 pixel.
*/
uint16_t center_of_ellipse_x;
/**
* The y coordinate of the center position of the concentric internal and
* external ellipses of the elliptical pixel selector in the processing
* window. The value shall be in the range of 0 to (height of Picture - 1),
* inclusive and in multiples of 1 pixel.
*/
uint16_t center_of_ellipse_y;
/**
* The clockwise rotation angle in degree of arc with respect to the
* positive direction of the x-axis of the concentric internal and external
* ellipses of the elliptical pixel selector in the processing window. The
* value shall be in the range of 0 to 180, inclusive and in multiples of 1.
*/
uint8_t rotation_angle;
/**
* The semi-major axis value of the internal ellipse of the elliptical pixel
* selector in amount of pixels in the processing window. The value shall be
* in the range of 1 to 65535, inclusive and in multiples of 1 pixel.
*/
uint16_t semimajor_axis_internal_ellipse;
/**
* The semi-major axis value of the external ellipse of the elliptical pixel
* selector in amount of pixels in the processing window. The value
* shall not be less than semimajor_axis_internal_ellipse of the current
* processing window. The value shall be in the range of 1 to 65535,
* inclusive and in multiples of 1 pixel.
*/
uint16_t semimajor_axis_external_ellipse;
/**
* The semi-minor axis value of the external ellipse of the elliptical pixel
* selector in amount of pixels in the processing window. The value shall be
* in the range of 1 to 65535, inclusive and in multiples of 1 pixel.
*/
uint16_t semiminor_axis_external_ellipse;
/**
* Overlap process option indicates one of the two methods of combining
* rendered pixels in the processing window in an image with at least one
* elliptical pixel selector. For overlapping elliptical pixel selectors
* in an image, overlap_process_option shall have the same value.
*/
enum AVHDRPlusOverlapProcessOption overlap_process_option;
/**
* The maximum of the color components of linearized RGB values in the
* processing window in the scene. The values should be in the range of 0 to
* 1, inclusive and in multiples of 0.00001. maxscl[ 0 ], maxscl[ 1 ], and
* maxscl[ 2 ] are corresponding to R, G, B color components respectively.
*/
AVRational maxscl[3];
/**
* The average of linearized maxRGB values in the processing window in the
* scene. The value should be in the range of 0 to 1, inclusive and in
* multiples of 0.00001.
*/
AVRational average_maxrgb;
/**
* The number of linearized maxRGB values at given percentiles in the
* processing window in the scene. The maximum value shall be 15.
*/
uint8_t num_distribution_maxrgb_percentiles;
/**
* The linearized maxRGB values at given percentiles in the
* processing window in the scene.
*/
AVHDRPlusPercentile distribution_maxrgb[15];
/**
* The fraction of selected pixels in the image that contains the brightest
* pixel in the scene. The value shall be in the range of 0 to 1, inclusive
* and in multiples of 0.001.
*/
AVRational fraction_bright_pixels;
/**
* This flag indicates that the metadata for the tone mapping function in
* the processing window is present (for value of 1).
*/
uint8_t tone_mapping_flag;
/**
* The x coordinate of the separation point between the linear part and the
* curved part of the tone mapping function. The value shall be in the range
* of 0 to 1, excluding 0 and in multiples of 1/4095.
*/
AVRational knee_point_x;
/**
* The y coordinate of the separation point between the linear part and the
* curved part of the tone mapping function. The value shall be in the range
* of 0 to 1, excluding 0 and in multiples of 1/4095.
*/
AVRational knee_point_y;
/**
* The number of the intermediate anchor parameters of the tone mapping
* function in the processing window. The maximum value shall be 15.
*/
uint8_t num_bezier_curve_anchors;
/**
* The intermediate anchor parameters of the tone mapping function in the
* processing window in the scene. The values should be in the range of 0
* to 1, inclusive and in multiples of 1/1023.
*/
AVRational bezier_curve_anchors[15];
/**
* This flag shall be equal to 0 in bitstreams conforming to this version of
* this Specification. Other values are reserved for future use.
*/
uint8_t color_saturation_mapping_flag;
/**
* The color saturation gain in the processing window in the scene. The
* value shall be in the range of 0 to 63/8, inclusive and in multiples of
* 1/8. The default value shall be 1.
*/
AVRational color_saturation_weight;
} AVHDRPlusColorTransformParams;
/**
* This struct represents dynamic metadata for color volume transform -
* application 4 of SMPTE 2094-40:2016 standard.
*
* To be used as payload of a AVFrameSideData or AVPacketSideData with the
* appropriate type.
*
* @note The struct should be allocated with
* av_dynamic_hdr_plus_alloc() and its size is not a part of
* the public ABI.
*/
typedef struct AVDynamicHDRPlus {
/**
* Country code by Rec. ITU-T T.35 Annex A. The value shall be 0xB5.
*/
uint8_t itu_t_t35_country_code;
/**
* Application version in the application defining document in ST-2094
* suite. The value shall be set to 0.
*/
uint8_t application_version;
/**
* The number of processing windows. The value shall be in the range
* of 1 to 3, inclusive.
*/
uint8_t num_windows;
/**
* The color transform parameters for every processing window.
*/
AVHDRPlusColorTransformParams params[3];
/**
* The nominal maximum display luminance of the targeted system display,
* in units of 0.0001 candelas per square metre. The value shall be in
* the range of 0 to 10000, inclusive.
*/
AVRational targeted_system_display_maximum_luminance;
/**
* This flag shall be equal to 0 in bit streams conforming to this version
* of this Specification. The value 1 is reserved for future use.
*/
uint8_t targeted_system_display_actual_peak_luminance_flag;
/**
* The number of rows in the targeted system_display_actual_peak_luminance
* array. The value shall be in the range of 2 to 25, inclusive.
*/
uint8_t num_rows_targeted_system_display_actual_peak_luminance;
/**
* The number of columns in the
* targeted_system_display_actual_peak_luminance array. The value shall be
* in the range of 2 to 25, inclusive.
*/
uint8_t num_cols_targeted_system_display_actual_peak_luminance;
/**
* The normalized actual peak luminance of the targeted system display. The
* values should be in the range of 0 to 1, inclusive and in multiples of
* 1/15.
*/
AVRational targeted_system_display_actual_peak_luminance[25][25];
/**
* This flag shall be equal to 0 in bitstreams conforming to this version of
* this Specification. The value 1 is reserved for future use.
*/
uint8_t mastering_display_actual_peak_luminance_flag;
/**
* The number of rows in the mastering_display_actual_peak_luminance array.
* The value shall be in the range of 2 to 25, inclusive.
*/
uint8_t num_rows_mastering_display_actual_peak_luminance;
/**
* The number of columns in the mastering_display_actual_peak_luminance
* array. The value shall be in the range of 2 to 25, inclusive.
*/
uint8_t num_cols_mastering_display_actual_peak_luminance;
/**
* The normalized actual peak luminance of the mastering display used for
* mastering the image essence. The values should be in the range of 0 to 1,
* inclusive and in multiples of 1/15.
*/
AVRational mastering_display_actual_peak_luminance[25][25];
} AVDynamicHDRPlus;
/**
* Allocate an AVDynamicHDRPlus structure and set its fields to
* default values. The resulting struct can be freed using av_freep().
*
* @return An AVDynamicHDRPlus filled with default values or NULL
* on failure.
*/
AVDynamicHDRPlus *av_dynamic_hdr_plus_alloc(size_t *size);
/**
* Allocate a complete AVDynamicHDRPlus and add it to the frame.
* @param frame The frame which side data is added to.
*
* @return The AVDynamicHDRPlus structure to be filled by caller or NULL
* on failure.
*/
AVDynamicHDRPlus *av_dynamic_hdr_plus_create_side_data(AVFrame *frame);
#endif /* AVUTIL_HDR_DYNAMIC_METADATA_H */

Some files were not shown because too many files have changed in this diff Show More