imageConversionContext = sws_getCachedContext(null, cameraCodecContext.width, cameraCodecContext.height, cameraCodecContext.pix_fmt, cameraCodecContext.width, cameraCodecContext.height, AVPixelFormat.AV_PIX_FMT_BGR24, SWS_BICUBIC, null, null, null);
if (imageConversionContext == null) return;
if(av_read_frame(cameraFormatContext, &cameraPacket)>=0)
{
if(cameraPacket.stream_index == cameraVideo)
{
avcodec_decode_video2(cameraCodecContext, rawFrame, &isFrameFinished, &cameraPacket);
if(isFrameFinished)
{
if(rawFrame.data.ptr == null || rawFrame.linesize.ptr == null || convertedFrame.data.ptr == null || convertedFrame.linesize.ptr == null) return;
sws_scale(imageConversionContext, rawFrame.data, rawFrame.linesize, 0, cameraCodecContext.height, data, linesize);
video.loadData(convertedFrame.data[0], 1280, 720, 2);
}
}
}
const rowStride = width*3;
const bufferSize = height*rowStride;
ubyte *[] data = [ new ubyte[rowStride] ];
int [] linesize = [ rowstride ];
Hmm. Do you see anything wrong with this piece of code in particular?
https://gist.github.com/aaronhyperum/1ba524f9f21b66b2973a752d50b888cc