sending Imagestream frame to Opencv fails - flutter

The problem is while using the camera and passing frames by ImageStream,
I keep getting src->data == nullptr from the c++ code (OpenCV) while invoking the imdecode function. How come?
I have debugged the issue and I have found out in the getByte function,
I get:
img.height = 480
img.width = 720
In my opinion each plane should be of length 345600 (480*720)
but I got:
img.planes[0].bytes.length = 368592
img.planes[1].bytes.length = 184271
img.planes[2].bytes.length = 184271
How can it be the image's planes don't have the same length??
The following block is from the flutter app:
initCamera() {
cameraController = CameraController(cameras[0], ResolutionPreset.medium);
cameraController.initialize().then((value) {
if (!mounted) return;
setState(() {
cameraController.startImageStream((imageStream) {
cameraImage = imageStream;
print("Frame number: $frameCounter");
frameCounter++;
res = imageMetaData.applyConvertToMatrix(cameraImage);
// runModel();
});
});
});
}
Calling the c++ code by ffi:
Uint8List getBytes(CameraImage img) {
final WriteBuffer allBytes = WriteBuffer();
//Write all the values from a Uint8List into the buffer.
img.planes.forEach((plane) => allBytes.putUint8List(plane.bytes));
return allBytes.done().buffer.asUint8List();
}
Pointer<Uint8> intListToArray(Uint8List list) {
final Pointer<Uint8> ptr = calloc.allocate<Uint8>(list.length);
for (var i = 0; i < list.length; i++) {
ptr.elementAt(i).value = list[i];
}
return ptr;
}
Future<bool> applyConvertToMatrix(CameraImage img) async {
func_convert_to_matrix = nativeAddLib.lookup<NativeFunction<Pointer<Void> Function(
Pointer<Uint8>, Pointer<Int32>)>>("convert_from_img_array_to_matrix")
.asFunction();
Uint8List bytes = getBytes(img);
Pointer<void> ptr;
Pointer<Uint8> imgBytes;
imgByteLength.value = bytes.length;
imgBytes = intListToArray(bytes);
ptr = func_convert_to_matrix(imgBytes, imgByteLength); //invoking the c++ code
return true;
}
The C++ code:
void *convert_from_img_array_to_matrix(
unsigned char *img,
int32_t *imgLengthBytes) {
cv::Mat *src = new Mat();
int32_t len_bytes = *imgLengthBytes;
std::vector<unsigned char> m;
__android_log_print(ANDROID_LOG_VERBOSE, "NATIVE", "Len bytes: %d", len_bytes);
while ( len_bytes>=0 )
{
m.push_back(*(img++));
len_bytes--;
}
*src = cv::imdecode(m, cv::IMREAD_UNCHANGED);
if (src->data == nullptr)
{
__android_log_print(ANDROID_LOG_VERBOSE, "NATIVE", "You reached nullptr");
return nullptr;
}
if (DEBUG_NATIVE)
__android_log_print(ANDROID_LOG_VERBOSE, "NATIVE", "convert_from_img_array_to_matrix() --- len before:%d len after:%d width:%d height:%d",
*imgLengthBytes, src->step[0] * src->rows,
src->cols, src->rows);
*imgLengthBytes = src->step[0] * src->rows;
return src;
}
Can someone suggest what am I doing wrong?

Related

Defining async call in init state in flutter

In the below code I have been trying to initialize the method checkStatus() with the values I get from the provider but since there is a loading delay to the provider generated data I am not ale to initialize the method in the init state. Is there a alternative way I could use here?
#override
void initState() {
// TODO: implement initState
super.initState();
final interestDetail = Provider.of<InterestProvider>(context, listen: false);
interestDetail.getData(context);
checkStatus(interestDetail.interest.subcategoryData,interestDetail.interest.customerSelectedSubcatIds);
}
List<int> checkStatus(a, c){
for (var i = 0, len1 = a.length; i < len1; i++) {
for (var j = 0, len2 = c.length; j < len2; j++) {
if (a[i].id == c[j]) {
_selectedIndexList.add(j);
_selectedCatList.add(j + 1);
check = true;
break;
}
}
}
print(check);
return _selectedIndexList;
}
you can modify your source code like below:
#override
void initState() {
// TODO: implement initState
super.initState();
Future.delayed(Duration.zero, () {
final interestDetail = Provider.of<InterestProvider>(context, listen: false);
interestDetail.getData(context);
checkStatus(interestDetail.interest.subcategoryData,interestDetail.interest.customerSelectedSubcatIds);
});
}
List<int> checkStatus(a, c){
for (var i = 0, len1 = a.length; i < len1; i++) {
for (var j = 0, len2 = c.length; j < len2; j++) {
if (a[i].id == c[j]) {
_selectedIndexList.add(j);
_selectedCatList.add(j + 1);
check = true;
break;
}
}
}
print(check);
return _selectedIndexList;
}

Process camera image in flutter

I'm trying to process the camera image input in Flutter but I can't seem to get it to work.
I have a listener on the camera feed that launch a function that is supposed to process the images btu everytime I end up doing anything in the function the program interface freeze. My guess is that I have too many frames to process and thus the program freezes but I have no idea how to ignore old frames.
Here is my code so far
import 'package:flutter/material.dart';
import 'package:camera/camera.dart';
import 'package:image/image.dart' as imglib;
import 'package:flutter/scheduler.dart';
import 'user_data_container.dart';
class Camera extends StatefulWidget {
Camera();
#override
_CameraState createState() => new _CameraState();
}
class _CameraState extends State<Camera> {
CameraController controller;
bool isDetecting = false;
double redavg;
imglib.Image last_image;
int count = 0;
Image _image_display;
#override
void initState() {
super.initState();
SchedulerBinding.instance.addPostFrameCallback((_) {
_initializeApp();
});
}
void _initializeApp() async {
var cameras = UserDataContainer.of(context).data.cameras;
if (cameras == null || cameras.length < 1) {
print('No camera is found');
} else {
controller = new CameraController(
cameras[0],
ResolutionPreset.high,
);
controller.initialize().then((_) {
if (!mounted) {
return;
}
setState(() {});
controller.startImageStream((CameraImage img) async {
print(count.toString() + "Stream. detecting: " + isDetecting.toString());
if (!isDetecting) {
isDetecting = true;
int startTime = new DateTime.now().millisecondsSinceEpoch;
_processCameraImage(img);
// int endTime = new DateTime.now().millisecondsSinceEpoch;
// print("Detection done in " + (endTime - startTime).toString());
// isDetecting = false;
}
else{
print("It's detecting");
}
});
});
}
}
#override
void dispose() {
controller?.dispose();
super.dispose();
}
#override
Widget build(BuildContext context) {
if (controller == null || !controller.value.isInitialized) {
return Container();
}
return
Scaffold(
body: Column(
children: <Widget>[
Text("red: "+ redavg.toString() + " "+ count.toString()),
Expanded(child: last_image == null ? Container() : _image_display),
],
),
);
// );
}
void _processCameraImage(CameraImage image) async {
count = count +1;
print("p: " + image.planes.length.toString());
// if (isDetecting) {
// print("Already detecting;");
// return;
// }
// else{
// isDetecting = true;
try {
await processFrame(image);
} catch (e) {
// await handleExepction(e)
} finally {
print("Done detecting :)");
// isDetecting = false;
}
// }
}
void processFrame(CameraImage image) async {
print ("convert");
imglib.Image convertedImage = await _convertCameraImage(image);
last_image = convertedImage;
imglib.PngEncoder pngEncoder = new imglib.PngEncoder(level: 0, filter: 0);
// Convert to png
List<int> png = pngEncoder.encodeImage(last_image);
_image_display = Image.memory(png);
// var colors = colorAverage(convertedImage);
setState(() {
// redavg = colors[2];
// print(colors.toString());
});
}
imglib.Image _convertCameraImage(CameraImage image) {
int width = image.width;
int height = image.height;
var img = imglib.Image(image.planes[0].bytesPerRow, height); // Create Image buffer
const int hexFF = 0xFF000000;
final int uvyButtonStride = image.planes[1].bytesPerRow;
final int uvPixelStride = image.planes[1].bytesPerPixel;
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
final int uvIndex =
uvPixelStride * (x / 2).floor() + uvyButtonStride * (y / 2).floor();
final int index = y * width + x;
final yp = image.planes[0].bytes[index];
final up = image.planes[1].bytes[uvIndex];
final vp = image.planes[2].bytes[uvIndex];
// Calculate pixel color
int r = (yp + vp * 1436 / 1024 - 179).round().clamp(0, 255);
int g = (yp - up * 46549 / 131072 + 44 - vp * 93604 / 131072 + 91)
.round()
.clamp(0, 255);
int b = (yp + up * 1814 / 1024 - 227).round().clamp(0, 255);
// color: 0x FF FF FF FF
// A B G R
img.data[index] = hexFF | (b << 16) | (g << 8) | r;
}
}
// Rotate 90 degrees to upright
// var img1 = imglib.copyRotate(img, 90);
imglib.PngEncoder pngEncoder = new imglib.PngEncoder(level: 0, filter: 0);
// Convert to png
List<int> png = pngEncoder.encodeImage(img);
_image_display = Image.memory(png);
return img;
}
}

MVVM Writeablebitmap Image processing

The idea is to process a loaded image. I first wanted to process it that way, that there is a white/black line going through the image.
Actual the problem is, that a new Thread (for Thread.Sleep) freezes the whole UI, while processing. And when I try a Dispatcher, the image doesn't update.
So I got a ProcessingClass:
public WriteableBitmap GetProcessedPicture(int i)
{
WriteableBitmap wb = image.image;
Process(ref wb, i);
return wb;
}
private void Process(ref WriteableBitmap wb, int j)
{
int stride = wb.PixelWidth * (wb.Format.BitsPerPixel + 7) / 8;
byte[] data = new byte[stride * wb.PixelHeight];
for (int i = 0; i < data.Length; i++) data[i] = 255;
//just to see some changes
wb.WritePixels(new Int32Rect(j, 0, 100, 100), data, stride, 0);
}
And the MainViewModel class (I'm using MVVM light) with:
public void Start_Click()
{
Application.Current.Dispatcher.BeginInvoke(new Action(() =>
{
for (int i = 0; i < 100; i++)
{
ImageSource = processingClass.GetProcessedPicture(i);
Thread.Sleep(100);
}
}));
};
Thanks for the help.
I don't think ImageSource (assuming it binds to Image.Source) accepts a WriteableBitmap. You have to convert it to a BitmapImage first!
Adjusting your code a bit I'd do something like:
public void Start_Click()
{
var t = new Task(() =>
{
for (var i = 0; i < 100; i++)
{
var writeablebitmap = processingClass.GetProcessedPicture(i);
var bitmapImage = processingClass.ConvertToBitmapImage(writeablebitmap);
Application.Current.Dispatcher.BeginInvoke(new Action(() =>
{
ImageSource = bitmapImage;
}));
Thread.Sleep(100);
}
});
t.Start();
}
This way you're not stalling the UI thread with your image processing, but you still update the image on the UI thread.
As for converting your WriteableBitmap to a BitmapImage, there are plenty of resources available on the internet, like this one

(opencv) merge contours together

I am doing a real time motion detection program. I find that there are a lot of contour made in my different image after i used background subtraction method . i would like to ask is there any method that can merge these contour together or make a larger rect contain all the contours?
the case now i have been done
http://singhgaganpreet.files.wordpress.com/2012/07/motioncolour.jpg
My code is here
#include <iostream>
#include <OpenCV/cv.h>
#include <OPenCV/highgui.h>
using namespace cv;
using namespace std;
CvRect rect;
CvSeq* contours = 0;
CvMemStorage* storage = NULL;
CvCapture *cam;
IplImage *currentFrame, *currentFrame_grey, *differenceImg, *oldFrame_grey;
bool first = true;
int main(int argc, char* argv[])
{
//Create a new movie capture object.
cam = cvCaptureFromCAM(0);
//create storage for contours
storage = cvCreateMemStorage(0);
//capture current frame from webcam
currentFrame = cvQueryFrame(cam);
//Size of the image.
CvSize imgSize;
imgSize.width = currentFrame->width;
imgSize.height = currentFrame->height;
//Images to use in the program.
currentFrame_grey = cvCreateImage( imgSize, IPL_DEPTH_8U, 1);
while(1)
{
currentFrame = cvQueryFrame( cam );
if( !currentFrame ) break;
//Convert the image to grayscale.
cvCvtColor(currentFrame,currentFrame_grey,CV_RGB2GRAY);
if(first) //Capturing Background for the first time
{
differenceImg = cvCloneImage(currentFrame_grey);
oldFrame_grey = cvCloneImage(currentFrame_grey);
cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
first = false;
continue;
}
//Minus the current frame from the moving average.
cvAbsDiff(oldFrame_grey,currentFrame_grey,differenceImg);
//bluring the differnece image
cvSmooth(differenceImg, differenceImg, CV_BLUR);
//apply threshold to discard small unwanted movements
cvThreshold(differenceImg, differenceImg, 25, 255, CV_THRESH_BINARY);
//find contours
cvFindContours( differenceImg, storage, &contours );
//draw bounding box around each contour
for(; contours!=0; contours = contours->h_next)
{
rect = cvBoundingRect(contours, 0); //extract bounding box for current contour
//drawing rectangle
cvRectangle(currentFrame,
cvPoint(rect.x, rect.y),
cvPoint(rect.x+rect.width, rect.y+rect.height),
cvScalar(0, 0, 255, 0),
2, 8, 0);
}
//display colour image with bounding box
cvShowImage("Output Image", currentFrame);
//display threshold image
cvShowImage("Difference image", differenceImg);
//New Background
cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
//clear memory and contours
cvClearMemStorage( storage );
contours = 0;
//press Esc to exit
char c = cvWaitKey(33);
if( c == 27 ) break;
}
// Destroy the image & movies objects
cvReleaseImage(&oldFrame_grey);
cvReleaseImage(&differenceImg);
cvReleaseImage(&currentFrame);
cvReleaseImage(&currentFrame_grey);
//cvReleaseCapture(&cam);
return 0;
}
Did you try this?
std::vector<cv::Point> points;
points.insert(points.end(), contour1.begin(), contour1.end());
points.insert(points.end(), contour2.begin(), contour2.end());
convexHull(cv::Mat(points), contour);
PS. For some applications, it may be better to use approxPoly() rather than convexHull(). Just try both.
PPS. Try smoothing the resulting contour with gaussian. It also can be helpful.
I came across a similar problem. In my case I created an empty sequence then I filled it with the points of each contour, after that I fitted a bounding ellipse with that sequence.
Here is my code segment...
CvMemStorage *storage = cvCreateMemStorage ();
CvMemStorage *storage1 = cvCreateMemStorage ();
CvSeq *contours = 0;
//find contour in BInv
cvFindContours (BInv, storage, &contours, sizeof(CvContour), CV_RETR_LIST,CV_CHAIN_APPROX_NONE ,cvPoint(0,0));
//creating empty sequence of CvPoint
CvSeq* seq = cvCreateSeq(CV_SEQ_ELTYPE_POINT/*| CV_SEQ_KIND_SET | CV_SEQ_FLAG_SIMPLE*/,sizeof(CvSeq),sizeof(CvPoint),storage1);
//populating seq with all contours
for(; contours!=0; contours = contours->h_next)
for(int i=0;i<contours->total;i++)
{
CvPoint* p;
p = (CvPoint*)cvGetSeqElem (contours, i );
cvSeqPush(seq,p);
}
//bounding box and drawing
CvBox2D bbox=cvMinAreaRect2(seq, NULL );
cvEllipseBox(color,bbox,cvScalarAll(0),5,8,0);
hope this helps.
If you want to merge contours on the basis of distance apart then you can do something like this:
struct hash_pair {
template <class T1, class T2>
size_t operator()(const pair<T1, T2>& p) const
{
auto hash1 = hash<T1>{}(p.first);
auto hash2 = hash<T2>{}(p.second);
if (hash1 != hash2) {
return hash1 ^ hash2;
}
return hash1;
}
};
void findPixelsNearby(unordered_map<pair<int, int>,bool,hash_pair>&res, Point px,int pxlVal) {
for (int itr1 = (px.x) - pxlVal; itr1 <= (px.x) + pxlVal; itr1++) {
for (int itr2 = (px.y - pxlVal); itr2 <= (px.y) + pxlVal; itr2++) {
res[{itr1, itr2}] = true;
}
}
}
unordered_map<pair<int, int>, bool, hash_pair> createSets(vector<Point2f>Contour, int rect) {
unordered_map<pair<int,int>,bool,hash_pair>res;
for (auto tra : Contour) {
Point px = (Point)tra;
findPixelsNearby(res,px,rect);
}
return res;
}
//void drawContour(Mat& img, vector<Point2f>s1,int px,int py,int pz) {
// for (auto x : s1) {
// line(img, x, x, Scalar(px, py, pz), 4, 0);
//
// }
// resShow("temp",img,1);
//}
bool hasCommon(unordered_map<pair<int,int>,bool,hash_pair>s1, unordered_map<pair<int, int>, bool, hash_pair>s2){
for (auto x : s1) {
if (s2.find(x.first) != s2.end()) {
return true;
}
}
return false;
}
void MergeContours(Mat image, vector<Contour>&usableContours,int distance_considered, vector<Contour>& finalContours) {
int numberContours = usableContours.size();
vector<vector<int>>ids_for_contour_merge(numberContours);
vector<unordered_map<pair<int, int>, bool, hash_pair>>ContourSets;
vector<bool>boolVals(numberContours,false);
for (int i = 0; i < numberContours; i++) {
ContourSets.push_back(createSets(usableContours[i].points, distance_considered/2));
}
for (int i = 0; i < numberContours; i++) {
if (boolVals[i] == false) {
boolVals[i] = true;
for (int j = i+1; j < numberContours; j++) {
if (boolVals[j] == false) {
if(hasCommon(ContourSets[i], ContourSets[j])==true){
ContourSets[i].insert(ContourSets[j].begin(), ContourSets[j].end());
boolVals[j] = true;
ids_for_contour_merge[i].push_back(j);
j = i;
}
}
}
}
}
vector<bool>Visited(ids_for_contour_merge.size(), false);
for (int mr = 0; mr < ids_for_contour_merge.size(); mr++) {
if (Visited[mr] == false) {
vector<Point2f>temp=usableContours[mr].points;
if (ids_for_contour_merge[mr].size() > 0) {
for (int mc = 0; mc < ids_for_contour_merge[mr].size(); mc++) {
int valPtr = ids_for_contour_merge[mr][mc];
copy(usableContours[valPtr].points.begin(), usableContours[valPtr].points.end(), std::back_inserter(temp));
Visited[valPtr] = true;
}
}
else {
Visited[mr] = true;
}
Contour newCtr;
newCtr.points = temp;
finalContours.push_back(newCtr);
}
}
///////////////////////////////////////////////////////////////DRAWING CONTOURS
/*for (auto x : finalContours) {
cout <<"CONTOURS FINAL SIZE IS : " <<x.points.size()<<endl;
int px = 0;
int py = 0;
int pz = 0;
drawContour(image, x.points, ((px+rand())%255), ((py + rand()) % 255), ((pz + rand()) % 255));
}*/
//////////////////////////////////////////////////////////////////////////////
}
More On Github: https://github.com/HimanshuYadav117/Merge-Contours/blob/main/MergeContours.cpp

JPEG streaming with live555

I want to stream JPEG images or motion-JPEG file through live 555. But the problem is that in live 555 implementation for Jpegs is not available. Anyone can help ??
You can find a implementation that was posted to the devel mailing list http://lists.live555.com/pipermail/live-devel/2012-February/014672.html.
The code and a sample is available but this modification was rejected by live555 maintainer.
First we need to implement an MJPEGVideoSource than can feed a JPEGVideoRTPSink.
#include "JPEGVideoSource.hh"
class MJPEGVideoSource : public JPEGVideoSource
{
public:
static MJPEGVideoSource* createNew (UsageEnvironment& env, FramedSource* source)
{
return new MJPEGVideoSource(env,source);
}
virtual void doGetNextFrame()
{
if (m_inputSource)
m_inputSource->getNextFrame(fTo, fMaxSize, afterGettingFrameSub, this, FramedSource::handleClosure, this);
}
virtual void doStopGettingFrames()
{
FramedSource::doStopGettingFrames();
if (m_inputSource)
m_inputSource->stopGettingFrames();
}
static void afterGettingFrameSub(void* clientData, unsigned frameSize,unsigned numTruncatedBytes,struct timeval presentationTime,unsigned durationInMicroseconds)
{
MJPEGVideoSource* source = (MJPEGVideoSource*)clientData;
source->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
}
void afterGettingFrame(unsigned frameSize,unsigned numTruncatedBytes,struct timeval presentationTime,unsigned durationInMicroseconds)
{
int headerSize = 0;
bool headerOk = false;
fFrameSize = 0;
for (unsigned int i = 0; i < frameSize ; ++i)
{
// SOF
if ( (i+8) < frameSize && fTo[i] == 0xFF && fTo[i+1] == 0xC0 )
{
m_height = (fTo[i+5]<<5)|(fTo[i+6]>>3);
m_width = (fTo[i+7]<<5)|(fTo[i+8]>>3);
}
// DQT
if ( (i+5+64) < frameSize && fTo[i] == 0xFF && fTo[i+1] == 0xDB)
{
if (fTo[i+4] ==0)
{
memcpy(m_qTable, fTo + i + 5, 64);
m_qTable0Init = true;
}
else if (fTo[i+4] ==1)
{
memcpy(m_qTable + 64, fTo + i + 5, 64);
m_qTable1Init = true;
}
}
// End of header
if ( (i+1) < frameSize && fTo[i] == 0x3F && fTo[i+1] == 0x00 )
{
headerOk = true;
headerSize = i+2;
break;
}
}
if (headerOk)
{
fFrameSize = frameSize - headerSize;
memmove( fTo, fTo + headerSize, fFrameSize );
}
fNumTruncatedBytes = numTruncatedBytes;
fPresentationTime = presentationTime;
fDurationInMicroseconds = durationInMicroseconds;
afterGetting(this);
}
virtual u_int8_t type() { return 1; };
virtual u_int8_t qFactor() { return 128; };
virtual u_int8_t width() { return m_width; };
virtual u_int8_t height() { return m_height; };
u_int8_t const* quantizationTables( u_int8_t& precision, u_int16_t& length )
{
length = 0;
precision = 0;
if ( m_qTable0Init && m_qTable1Init )
{
precision = 8;
length = sizeof(m_qTable);
}
return m_qTable;
}
protected:
MJPEGVideoSource(UsageEnvironment& env, FramedSource* source) : JPEGVideoSource(env),
m_inputSource(source),
m_width(0),
m_height(0),
m_qTable0Init(false),
m_qTable1Init(false)
{
memset(&m_qTable,0,sizeof(m_qTable));
}
virtual ~MJPEGVideoSource()
{
Medium::close(m_inputSource);
}
protected:
FramedSource* m_inputSource;
u_int8_t m_width;
u_int8_t m_height;
u_int8_t m_qTable[128];
bool m_qTable0Init;
bool m_qTable1Init;
};
Next we can use it as a video source in order to build a simple RTSP server:
#include "liveMedia.hh"
#include "BasicUsageEnvironment.hh"
#include "GroupsockHelper.hh"
#include "MJPEGVideoSource.hh"
char const* inputFileName = "test.mjpeg";
int main(int argc, char** argv) {
// Begin by setting up our usage environment:
TaskScheduler* scheduler = BasicTaskScheduler::createNew();
UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
// Create 'groupsocks' for RTP and RTCP:
struct in_addr destinationAddress;
destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
const unsigned short rtpPortNum = 18888;
const unsigned short rtcpPortNum = rtpPortNum+1;
const unsigned char ttl = 255;
const Port rtpPort(rtpPortNum);
const Port rtcpPort(rtcpPortNum);
Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl);
rtpGroupsock.multicastSendOnly(); // we're a SSM source
Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl);
rtcpGroupsock.multicastSendOnly(); // we're a SSM source
// Create a 'JPEG Video RTP' sink from the RTP 'groupsock':
RTPSink* videoSink = JPEGVideoRTPSink::createNew(*env, &rtpGroupsock);
// Create (and start) a 'RTCP instance' for this RTP sink:
const unsigned estimatedSessionBandwidth = 5000; // in kbps; for RTCP b/w share
const unsigned maxCNAMElen = 100;
unsigned char CNAME[maxCNAMElen+1];
gethostname((char*)CNAME, maxCNAMElen);
CNAME[maxCNAMElen] = '\0'; // just in case
RTCPInstance* rtcp = RTCPInstance::createNew(*env, &rtcpGroupsock,
estimatedSessionBandwidth, CNAME,
videoSink, NULL /* we're a server */,
True /* we're a SSM source */);
// Note: This starts RTCP running automatically
RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554);
if (rtspServer == NULL) {
*env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
exit(1);
}
ServerMediaSession* sms = ServerMediaSession::createNew(*env, "testStream", inputFileName,"Session streamed by \"testMJPEGVideoStreamer\"",
True /*SSM*/);
sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
rtspServer->addServerMediaSession(sms);
char* url = rtspServer->rtspURL(sms);
*env << "Play this stream using the URL \"" << url << "\"\n";
delete[] url;
// Start the streaming:
*env << "Beginning streaming...\n";
// Open the input file as a 'byte-stream file source':
ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(*env, inputFileName);
if (fileSource == NULL) {
*env << "Unable to open file \"" << inputFileName
<< "\" as a byte-stream file source\n";
exit(1);
}
// Create the MJPEG video source:
MJPEGVideoSource* videoSource = MJPEGVideoSource::createNew(*env, fileSource);
// Finally, start playing:
*env << "Beginning to read from file...\n";
videoSink->startPlaying(*videoSource, NULL, NULL);
env->taskScheduler().doEventLoop();
return 0;
}
Hope you have done it but if not-
see this Jpeg Streaming using live555
This is doing the same thing as you have asked to stream the images/Jpegs.
For MJpegs you'll have to do the same process.