For detecting face in camera I am reffering to SquareCam(iOS developer library). But I am unable to show left eye, right eye and mouth position. I am using the following code
NSArray *sublayers = [NSArray arrayWithArray:[previewLayer sublayers]];
NSInteger sublayersCount = [sublayers count], currentSublayer = 0;
NSInteger featuresCount = [features count], currentFeature = 0;
[CATransaction begin];
[CATransaction setValue:(id)kCFBooleanTrue forKey:kCATransactionDisableActions];
// hide all the face layers
for ( CALayer *layer in sublayers )
{
if ( [[layer name] isEqualToString:#"FaceView"] )
[layer setHidden:YES];
}
if ( featuresCount == 0 || !detectFaces ) {
[CATransaction commit];
return; // early bail.
}
CGSize parentFrameSize = [previewView frame].size;
NSString *gravity = [previewLayer videoGravity];
BOOL isMirrored = [previewLayer isMirrored];
CGRect previewBox = [SquareCamViewController videoPreviewBoxForGravity:gravity
frameSize:parentFrameSize
apertureSize:clap.size];
for ( CIFaceFeature *ff in features ) {
// find the correct position for the square layer within the previewLayer
// the feature box originates in the bottom left of the video frame.
// (Bottom right if mirroring is turned on)
CGRect faceRect = [ff bounds];
CGRect leftEyeFrameRect;
CGFloat temp = faceRect.size.width;
faceRect.size.width = faceRect.size.height;
faceRect.size.height = temp;
temp = faceRect.origin.x;
faceRect.origin.x = faceRect.origin.y;
faceRect.origin.y = temp;
// scale coordinates so they fit in the preview box, which may be scaled
CGFloat widthScaleBy = previewBox.size.width / clap.size.height;
CGFloat heightScaleBy = previewBox.size.height / clap.size.width;
faceRect.size.width *= widthScaleBy;
faceRect.size.height *= heightScaleBy;
faceRect.origin.x *= widthScaleBy;
faceRect.origin.y *= heightScaleBy;
if ( isMirrored )
{
faceRect = CGRectOffset(faceRect, previewBox.origin.x + previewBox.size.width - faceRect.size.width - (faceRect.origin.x * 2), previewBox.origin.y);
}
else
{
faceRect = CGRectOffset(faceRect, previewBox.origin.x, previewBox.origin.y);
leftEyeFrameRect=CGRectOffset(faceRect,ff.leftEyePosition.x, ff.leftEyePosition.y);
}
CALayer *featureLayer = nil;
CALayer *eyeLayer = nil;
// re-use an existing layer if possible
while ( !featureLayer && (currentSublayer < sublayersCount) )
{
CALayer *currentLayer = [sublayers objectAtIndex:currentSublayer++];
if ( [[currentLayer name] isEqualToString:#"FaceLayer"] ) {
featureLayer = currentLayer;
[currentLayer setHidden:NO];
}
}
// create a new one if necessary
if ( !featureLayer ) {
featureLayer = [CALayer new];
[featureLayer setContents:(id)[square CGImage]];
[featureLayer setName:#"FaceLayer"];
[previewLayer addSublayer:featureLayer];
[featureLayer release];
}
[featureLayer setFrame:faceRect];
if (faceView !=nil) {
[faceView removeFromSuperview];
[faceView release];
}
if (leftEyeView != nil) {
[leftEyeView removeFromSuperview];
[leftEyeView release];
}
faceView = [[UIView alloc] initWithFrame:CGRectMake(faceRect.origin.x, faceRect.origin.y ,faceRect.size.width, faceRect.size.height)];
faceView.layer.borderWidth = 1;
faceView.layer.borderColor = [[UIColor redColor] CGColor];
[self.view addSubview:faceView];
leftEyeView = [[UIView alloc] initWithFrame:CGRectMake(faceView.frame.origin.x+(faceView.frame.size.height/2), faceView.frame.origin.y+(faceView.frame.size.height*0.10) ,faceView.frame.size.width*0.40, faceView.frame.size.height*0.40)];
UIImageView *leftEyeImageView=[[UIImageView alloc] initWithImage:[UIImage imageNamed:#"eye.png"]];
leftEyeImageView.frame = CGRectMake(0, 0, faceView.frame.size.width*0.40, faceView.frame.size.height*0.40);
[leftEyeView addSubview:leftEyeImageView];
[self.view addSubview:leftEyeView];
if (ff.hasLeftEyePosition) {
CGPoint leftEyeCenter= ff.leftEyePosition;
UIView *vv= [[UIView alloc] initWithFrame:CGRectMake(leftEyeCenter.x, leftEyeCenter.y, 50, 50)];
vv.center = leftEyeCenter;
vv.layer.borderWidth= 4.0;
vv.layer.borderColor= [[UIColor blackColor]CGColor];
[self.view addSubview:vv];
}
It is detecting eye but not showing in right position. Can anyone help to resolve this.
Thanks in advance.
I faced the same problem as you in the preview when using the front camera, because the preview is mirrored and I could not find any good information to scale.
The following code is the closest I got. Please note that I have an image defined as property that is called heartImage and that I assume you are using the Apple Sample called SquareCam.
In the method - (void)drawFaceBoxesForFeatures:(NSArray *)features forVideoBox:(CGRect)clap orientation:(UIDeviceOrientation)orientation
if(ff.hasLeftEyePosition)
{
//swap coordinates
CGFloat leftEyeRectOriginX = ff.leftEyePosition.y ;
CGFloat leftEyeRectOriginY = ff.leftEyePosition.x ;
CGFloat leftEyeRectWidth = faceRect.size.width*0.3;
CGFloat leftEyeRectHeight = faceRect.size.width*0.3;
//adjust scale
leftEyeRectOriginX *= widthScaleBy;
leftEyeRectOriginY *= heightScaleBy;
NSLog(#"LeftEyePosition: %#", NSStringFromCGPoint(ff.leftEyePosition));
CGRect r = CGRectMake(leftEyeRectOriginX - (leftEyeRectWidth/2) , leftEyeRectOriginY - (leftEyeRectHeight/2), leftEyeRectWidth, leftEyeRectHeight);
if ( isMirrored ){
r = CGRectOffset(r, previewBox.origin.x + previewBox.size.width - (rightEyeRectOriginX*2) - rightEyeRectWidth+ faceRect.origin.x, previewBox.origin.y);
NSLog(#"LeftEyeRect mirrored: %#", NSStringFromCGRect(r));
}
else{
r = CGRectOffset(r, previewBox.origin.x, previewBox.origin.y);
}
while ( !leftEyeEyeLayer && (currentSublayer < sublayersCount) ) {
CALayer *currentLayer = [sublayers objectAtIndex:currentSublayer++];
if ( [[currentLayer name] isEqualToString:#"LeftEyeLayer"] ) {
leftEyeEyeLayer = currentLayer;
[currentLayer setHidden:NO];
}
}
// create a new one if necessary
if ( !leftEyeEyeLayer ) {
leftEyeEyeLayer = [CALayer new];
[leftEyeEyeLayer setContents:(id)[heartImage CGImage]];
[leftEyeEyeLayer setName:#"LeftEyeLayer"];
[previewLayer addSublayer:leftEyeEyeLayer];
[leftEyeEyeLayer release];
}
[leftEyeEyeLayer setFrame:r];
}
The same applies for the Right Eye, with the exception that I use this in case it's mirrored: r = CGRectOffset(r, previewBox.origin.x + previewBox.size.width - (rightEyeRectOriginX*2) - rightEyeRectWidth+ faceRect.origin.x, previewBox.origin.y);
.
The only difference with the sample code is that you first want to remove all the featureLayers, so some lines above my code looks like this:
// hide all the face layers
for ( CALayer *layer in sublayers ) {
if ( [[layer name] isEqualToString:#"FaceLayer"] || [[layer name] isEqualToString:#"LeftEyeLayer"] || [[layer name] isEqualToString:#"RightEyeLayer"] )
[layer setHidden:YES];
}
To be precise, I have troubles only in the live camera preview. When using the method to save picture in library (- (CGImageRef)newSquareOverlayedImageForFeatures:(NSArray *)features
inCGImage:(CGImageRef)backgroundImage
withOrientation:(UIDeviceOrientation)orientation
frontFacing:(BOOL)isFrontFacing
) it works correctly by using:
if(ff.hasLeftEyePosition)
{
CGRect r = CGRectMake(ff.leftEyePosition.x-faceWidth*0.15, ff.leftEyePosition.y-faceWidth*0.15, faceWidth*0.3, faceWidth*0.3);
CGContextDrawImage(bitmapContext, r, [rotatedHeartImage CGImage]);
}
Please let me know if and how I should improve my answer.
This is probably caused by incorrect orientations between your input, detector and output. If the faces are detected, then probably only the output coordinates need to be translated from landscape to portrait or vice versa. Otherwise, take a look here.
You could investigate in Haar cascade training files and OPENGL but that's a total different approuch. It does support iOS versions below 6.0 that's a pro. But it's way harder then the Squarecam sample of Apple (con).
This OpenGL was able to detect ears, eyes etc. There could already be some training files on the web.
Related
I am a newbie to the world of cocos2d i am developing my first tutorial and facing one problem
my problem is i have an image (1024 X 320) and my orientation is landscape i need to move that image continuously from right to left for this purpose i have used space shooter tutorial by Ray(Thanks to him) but the image doesn't seem to be appearing again and again.
my code is..
-(id) init
{
if( (self=[super init])) {
CGSize screenSize = [CCDirector sharedDirector].winSize;
// 1) Create the CCParallaxNode
backgroundNode = [CCParallaxNode node];
[self addChild:backgroundNode z:-1];
// 2) Create the sprites we'll add to the CCParallaxNode
Back = [CCSprite spriteWithFile:#"bg_front_spacedust.png"];
//Back.position=ccp(screenSize.width/2, screenSize.height/2);
Back.rotation = -90;
Back1 = [CCSprite spriteWithFile:#"bg_front_spacedust.png"];
Back1.rotation = -90;
// 3) Determine relative movement speeds for space dust and background
CGPoint dustSpeed = ccp(0.1, 0.1);
// 4) Add children to CCParallaxNode
[backgroundNode addChild:Back z:0 parallaxRatio:dustSpeed positionOffset:ccp(screenSize.width/2, screenSize.height/2)];
NSLog(#"back.content width is...%f",Back.contentSize.width);
[backgroundNode addChild:Back1 z:1 parallaxRatio:dustSpeed positionOffset:ccp(screenSize.width/2, screenSize.height*2)];
// 5) Enable updates
[self scheduleUpdate];
}
return self;
}
- (void)update:(ccTime)dt {
// 1) Update background position
CGPoint backgroundScrollVel = ccp(0,-1000);
backgroundNode.position = ccpAdd(backgroundNode.position, ccpMult(backgroundScrollVel, dt));
// 2) Check for background elements moving offscreen
NSArray *spaceDusts = [NSArray arrayWithObjects:Back, Back1, nil];
for (CCSprite *spaceDust in spaceDusts) {
if ([backgroundNode convertToWorldSpace:spaceDust.position].x < -spaceDust.contentSize.width) {
[backgroundNode incrementOffset:ccp(2*spaceDust.contentSize.width,0) forChild:spaceDust];
}
}
}
please help me out of this
Thanks in advance.
try this one
if (backgroundNode.position.y <-screenSize.height*2)
backgroundNode.position = ccp(0,0);
As init method is called only once the approach you are doing will be done only one time you need to again set the Position of the backgroundNode to 0 in your update method.
here the multiple may vary
Try this code it is creating a paralax moving from bottom to top change CGPointMake(0, 1.0) to this CGPointMake(1.0,0) in paraNode addChild line.
-(id) init
{
// always call "super" init
// Apple recommends to re-assign "self" with the "super's" return value
if( (self=[super init]) )
{
float yPos =0.0;
NSMutableString *fileNameString = [[NSMutableString alloc]initWithCapacity:0];
if (IS_IPHONE_5)
{
[fileNameString appendString:#"Background-568h.png"];
yPos= 560.0;
}
else
{
[fileNameString appendString:#"Background.png"];
yPos= 470.0;
}
background1 = [CCSprite spriteWithFile:fileNameString];
background1.tag = 1;
background1.anchorPoint = CGPointMake(0,0);
background2 = [CCSprite spriteWithFile:fileNameString];
background2.tag = 2;
background2.anchorPoint = CGPointMake(0,0);
background3 = [CCSprite spriteWithFile:fileNameString];
background3.tag = 3;
background3.anchorPoint = CGPointMake(0,0);
background4 = [CCSprite spriteWithFile:fileNameString];
background4.tag = 4;
background4.anchorPoint = CGPointMake(0,0);
paraNode = [CCParallaxNode node];
[paraNode addChild:background1 z:1 parallaxRatio:CGPointMake(0, 1.0) positionOffset:CGPointMake(0, 0)];
[paraNode addChild:background2 z:2 parallaxRatio:CGPointMake(0, 1.0) positionOffset:CGPointMake(0, -yPos)];
[paraNode addChild:background3 z:3 parallaxRatio:CGPointMake(0, 1.0) positionOffset:CGPointMake(0, -yPos*2)];
[paraNode addChild:background4 z:4 parallaxRatio:CGPointMake(0, 1.0) positionOffset:CGPointMake(0, -yPos*3)];
[self addChild:paraNode z:-1 tag:123];
[self updateFrameRate:0.7 andYposition:yPos];
[fileNameString release];
fileNameString = nil;
}
return self;
}
-(void)updateFrameRate:(float)speedValue andYposition:(float)yposToSet
{
move1 = [CCMoveBy actionWithDuration:speedValue position:CGPointMake(0, yposToSet)];
move2 = [CCMoveBy actionWithDuration:0.0 position:CGPointMake(0, -yposToSet)];
move3 = [CCMoveBy actionWithDuration:0.0 position:CGPointMake(0, 0)];
CCSequence* sequence = [CCSequence actions:move1,move2,move3, nil];
CCRepeatForever* repeat = [CCRepeatForever actionWithAction:sequence];
[paraNode runAction:repeat];
}
I want to display a CALayer on video captured by AVCapture.
I am able to display the layer but for the next frame the previous should be removed.
My code is:
[CATransaction begin];
[CATransaction setValue:(id)kCFBooleanTrue forKey:kCATransactionDisableActions];
for (int i = 0; i < faces.size(); i++) {
CGRect faceRect;
// Get the Graphics Context
faceRect.origin.x = xyPoints.x;
faceRect.origin.y = xyPoints.y;
faceRect.size.width =50; //faces[i].width;
faceRect.size.height =50;// faces[i].height;
CALayer *featureLayer=nil;
// faceRect = CGRectApplyAffineTransform(faceRect, t);
if (!featureLayer) {
featureLayer = [[CALayer alloc]init];
featureLayer.borderColor = [[UIColor redColor] CGColor];
featureLayer.borderWidth = 10.0f;
[self.view.layer addSublayer:featureLayer];
}
featureLayer.frame = faceRect;
NSLog(#"frame-x - %f, frame-y - %f, frame-width - %f, frame-height - %f",featureLayer.frame.origin.x,featureLayer.frame.origin.y,featureLayer.frame.size.width,featureLayer.frame.size.height);
}
// [featureLayer removeFromSuperlayer];
[CATransaction commit];
where face is (const std::vector<cv::Rect)face OpenCV format.
I need to know where to place the code [featureLayer removeFromSuperLayer];
Note: "face" is not for face detection... it is just a rectangle.
I have got the solution ...
featureLayer is CALayer object I gave this as identity. Like
featureLayer.name = #"earLayer";
and whenever I detect the Object in frame I get the sublayers from main view like
NSArray *sublayers = [NSArray arrayWithArray:[self.view.layer sublayers]];
and count the sublayer to check in for loop like below:
int sublayersCount = [sublayers count];
int currentSublayer = 0;
for (CALayer *layer in sublayers) {
NSString *layerName = [layer name];
if ([layerName isEqualToString:#"earayer"])
[layer setHidden:YES];
}
Now I am getting the proper layer with Detected objects.
I have a problem in scaling the uiimageview which is placed inside the uiscrollview. I have googled and checked all the questions related to my problem in StackOverflow as well. I tried all the answers that are posted in the StackOverflow also. Nothing worked for me.
First I am placing the uiimageview inside uiscrollview in nib file and I am taking the image from Camera roll and filling the image view. Then I am using uirotationgesturerecognizer to rotate the image.
Here is the code that I am trying to do.
- (void)viewDidLoad
{
[super viewDidLoad];
NSLog(#"%#",[[UIDevice currentDevice] model]);
// Do any additional setup after loading the view, typically from a nib.
self.imagePicker = [[[UIImagePickerController alloc] init] autorelease];
self.picChosenImageView.layer.shouldRasterize = YES;
self.picChosenImageView.layer.rasterizationScale = [UIScreen mainScreen].scale;
self.picChosenImageView.layer.contents = (id)[UIImage imageNamed:#"test"].CGImage;
self.picChosenImageView.layer.shadowColor = [UIColor blackColor].CGColor;
self.picChosenImageView.layer.shadowOpacity = 0.8f;
self.picChosenImageView.layer.shadowRadius = 8;
self.picChosenImageView.layer.shadowPath = [UIBezierPath bezierPathWithRect:self.picChosenImageView.bounds].CGPath;
UIRotationGestureRecognizer *rotationRecognizer = [[[UIRotationGestureRecognizer alloc]initWithTarget:self
action:#selector(handleRotate:)] autorelease];
rotationRecognizer.delegate = self;
[self.picChosenImageView addGestureRecognizer:rotationRecognizer];
self.containerView.delegate = self;
self.containerView.contentSize = self.picChosenImageView.layer.frame.size;
self.containerView.maximumZoomScale = 4.0f;
self.containerView.minimumZoomScale = 1.0f;
angle = 0.0f;
useRotation = 0.0;
isRotationStarted=FALSE;
isZoomingStarted = FALSE;
}
-(void)lockZoom
{
maximumZoomScale = self.containerView.maximumZoomScale;
minimumZoomScale = self.containerView.minimumZoomScale;
self.containerView.maximumZoomScale = 1.0;
self.containerView.minimumZoomScale = 1.0;
self.containerView.clipsToBounds = false;
self.containerView.scrollEnabled = false;
}
-(void)unlockZoom
{
self.containerView.maximumZoomScale = maximumZoomScale;
self.containerView.minimumZoomScale = minimumZoomScale;
self.containerView.clipsToBounds = true;
self.containerView.scrollEnabled = true;
}
#pragma mark - ScrollView delegate methods
- (UIView *)viewForZoomingInScrollView:(UIScrollView *)scrollView
{
return self.picChosenImageView;
}
- (void)scrollViewDidZoom:(UIScrollView *)scrollView
{
CGRect frame = self.picChosenImageView.frame;
frame.origin = CGPointZero;
self.picChosenImageView.frame = frame;
//self.picChosenImageView.transform = prevTransform;
}
-(void) scrollViewWillBeginZooming:(UIScrollView *)scrollView withView:(UIView *)view
{
if(!isZoomingStarted)
{
self.picChosenImageView.transform = CGAffineTransformRotate(self.picChosenImageView.transform, angle);
NSLog(#"The zooming started");
isZoomingStarted = TRUE;
CGSize contentSize = self.containerView.bounds.size;
CGRect contentFrame = self.containerView.bounds;
NSLog(#"frame on start: %#", NSStringFromCGRect(contentFrame));
NSLog(#"size on start: %#", NSStringFromCGSize(contentSize));
//prevTransform = self.picChosenImageView.transform;
}
}
-(void) scrollViewDidEndZooming:(UIScrollView *)scrollView withView:(UIView *)view atScale:(float)scale
{
if(isZoomingStarted)
{
self.picChosenImageView.transform = CGAffineTransformRotate(self.picChosenImageView.transform, angle);
isZoomingStarted = FALSE;
CGSize contentSize = self.containerView.contentSize;
CGRect contentFrame = self.containerView.bounds;
NSLog(#"frame on end: %#", NSStringFromCGRect(contentFrame));
NSLog(#"size on end: %#", NSStringFromCGSize(contentSize));
}
}
#pragma mark - GestureRecognizer methods
- (void) handleRotate:(UIRotationGestureRecognizer *)recognizer
{
if(isZoomingStarted == FALSE)
{
if([recognizer state] == UIGestureRecognizerStateBegan)
{
angle = 0.0f;
[self lockZoom];
}
useRotation+= recognizer.rotation;
while( useRotation < -M_PI )
{
useRotation += M_PI*2;
}
while( useRotation > M_PI )
{
useRotation -= M_PI*2;
}
NSLog(#"The rotated value is %f",RADIANS_TO_DEGREES(useRotation));
self.picChosenImageView.transform = CGAffineTransformRotate([self.picChosenImageView transform],
recognizer.rotation);
[recognizer setRotation:0];
if([recognizer state] == UIGestureRecognizerStateEnded)
{
angle = useRotation;
useRotation = 0.0f;
isRotationStarted = FALSE;
self.containerView.hidden = NO;
//prevTransform = self.picChosenImageView.transform;
[self unlockZoom];
}
}
}
My problem is, I am able to successfully do a zoom in and zoom out. I am able to rotate the uiimageview as I wanted to. After rotating the uiimageview to a certain angle, and when I am trying to zoom in, the imageview gets back to the original position (rotate itself back to zero degree) and then the zooming happens. I want to retain the rotation and also zoom. I tried saving the previous transform and assign in back scrollDidzoom and scrollDidBegin delegate methods. None worked. Please help me to spot my mistake which I am overlooking.
try using CGAffineTransformScale instead of just resizing the frame for zooming:
anImage.transform = CGAffineTransformScale(anImage.transform, 2.0, 2.0);
changing the transform for scaling might fix your rotation issue.
hope this helps.
I had the same problem. UIScrollView is taking control over UIImageView and it is using transform without rotation.
So I do not give image reference to scroll and I have added UIPinchGestureRecognizer for scaling.
func viewForZoomingInScrollView(scrollView: UIScrollView) -> UIView? {
return nil
}
Dragging is still working :)
// viewDidLoad
var pinchGestureRecognizer = UIPinchGestureRecognizer(target: self, action: #selector(pinchRecogniezed))
scrollView.addGestureRecognizer(pinchGestureRecognizer)
func pinchRecogniezed(sender: UIPinchGestureRecognizer) {
if sender.state == .Began || sender.state == .Changed {
let scale = sender.scale
imageView.transform = CGAffineTransformScale(imageView.transform, scale, scale)
sender.scale = 1
}
}
I am using a UIPopoverController to display properties of my ContentItems. All worked fine in iOS 4.3 using presentPopoverFromRect. Some of the ContentItem rect's are fairly large, and in iOS 5, the popover re-sizes itself to fit in the margin between the rect's edges and the screen borders. In the worst cases the popover is less than 50 pixels tall and overlays the NavigationBar. Obviously not user friendly.
I've re-written the code (latest attempt below) to detect the problem cases and give the popover a 1 pixel rect at the center point of the ContentItem. This works most of the time although I'm still reverse-engineering the behavior for some boundary cases.
However I feel I must be missing something. During the iOS 5 Beta there was some discussion of the need to specify resizing masks of the Views the popover managed - but I just find it hard to believe the behavior I'm seeing is Apple fault and not mine. Advice is appreciated.
- (IBAction)handleDoubleTap:(UIGestureRecognizer *)sender
{
int subViewTag = sender.view.tag;
DLog(#":tag = %d", subViewTag);
NSString* key = [NSString stringWithFormat:#"%d", subViewTag];
ContentItemView* contentItemView = [self.contentItemViewDict objectForKey:key];
ContentItem* theContentItem = [contentItemView contentItem];
ContentItemPropertiesViewController* contentPopover = [[[ContentItemPropertiesViewController alloc] initWithNibName:#"ContentItemPropertiesViewController"
bundle:[NSBundle mainBundle]] autorelease];
contentPopover.delegate = self;
contentPopover.theItem = theContentItem;
contentPopover.theView = sender.view;
[contentPopover setContentSizeForViewInPopover:/*k_contentItemPopoverSize*/ CGSizeMake(320, 344.0f)];
UIPopoverController* popover = [[UIPopoverController alloc] initWithContentViewController:contentPopover];
popover.delegate = self;
self.contentPopoverViewController = popover;
[popover setPopoverContentSize:CGSizeMake(320, 344.0f)];
[popover release];
// Check to see if there's room for the popover around the edges of the object
CGRect popoverRect = [self rectForPopover:sender.view.frame];
if (popoverRect.origin.x == 0 && popoverRect.origin.y == 0 &&
popoverRect.size.width == 0 && popoverRect.size.height == 0) {
popoverRect = sender.view.bounds;
}
CGRect theRect = [sender.view convertRect:popoverRect
toView:self.view];
DLog(#"popoverRect %f, %f, %f, %f", theRect.origin.x, theRect.origin.y, theRect.size.width, theRect.size.height);
[self.contentPopoverViewController presentPopoverFromRect:theRect
inView:self.view
permittedArrowDirections:UIPopoverArrowDirectionAny
animated:YES];
}
- (CGRect)rectForPopover:(CGRect)viewRect
{
#define k_popoverPad 30.0f // a little extra room for the popover borders and arrow
DLog();
// Get the width and height the popover controller needs to fully display itself
CGSize popoverSize = k_contentItemPopoverSize;
CGFloat popoverWidth = popoverSize.width;
CGFloat popoverHeight = popoverSize.height;
// Get the edges of the object's rect translated to sceneView's coordinates
CGFloat leftEdge = self.sceneView.frame.origin.x + viewRect.origin.x - k_popoverPad;
CGFloat rightEdge = leftEdge + viewRect.size.width + k_popoverPad;
CGFloat topEdge = self.sceneView.frame.origin.y + viewRect.origin.y - k_popoverPad;
CGFloat bottomEdge = topEdge + viewRect.size.height + k_popoverPad;
// Get the bounds of the view
CGFloat viewRightBound = self.view.bounds.origin.x + self.view.bounds.size.width;
CGFloat viewBottomBound = self.view.bounds.origin.y + self.view.bounds.size.height;
// Compare the "margin" between the screen bounds and object's edges
// to see if the popover will fit somewhere
if (leftEdge > popoverWidth || // room on the left
topEdge > popoverHeight || // room at the top
viewRightBound - rightEdge > popoverWidth || // room to the right
viewBottomBound - bottomEdge > popoverHeight) { // room at the bottom
return CGRectZero;
} else {
// return a rect that is (essentially) the centerpoint of the object
CGRect newRect = CGRectMake((rightEdge - leftEdge) / 2.0f, (bottomEdge - topEdge) / 2.0f, 1.0f, 1.0f);
return newRect;
}
}
I'm using AVCaptureSession to capture video from my iPhone 3G and I need to capture the image and change it before displaying on my AVCaptureVideoPreviewLayer. In my current implementation, I am simply implementing the captureOutput: method and displaying the UIImage in a UIImageView, but that does not work for some reason.
Any ideas? Subclass AVCaptureSession somehow?
Thanks!
Check out this https://developer.apple.com/library/ios/#samplecode/SquareCam/Introduction/Intro.html
I think it does this.
It draws face boxes onto faces.
- (void)drawFaceBoxesForFeatures:(NSArray *)features forVideoBox:(CGRect)clap orientation:(UIDeviceOrientation)orientation// videoMirrored:(const BOOL)ISVIDEOMIRRORED
{
NSArray *sublayers = [NSArray arrayWithArray:[previewLayer sublayers]];
NSInteger sublayersCount = [sublayers count], currentSublayer = 0;
NSInteger featuresCount = [features count], currentFeature = 0;
[CATransaction begin];
[CATransaction setValue:(id)kCFBooleanTrue forKey:kCATransactionDisableActions];
// hide all the face layers
for ( CALayer *layer in sublayers ) {
if ( [[layer name] isEqualToString:#"FaceLayer"] )
[layer setHidden:YES];
}
if ( featuresCount == 0 || !detectFaces ) {
[CATransaction commit];
return; // early bail.
}
CGSize parentFrameSize = [previewView frame].size;
NSString *gravity = [previewLayer videoGravity];
//BOOL isMirrored = isVideoMirrored;
#warning Deprecated
const BOOL isMirrored = [previewLayer isMirrored]; // deprecated
CGRect previewBox = [SquareCamViewController videoPreviewBoxForGravity:gravity
frameSize:parentFrameSize
apertureSize:clap.size];
for ( CIFaceFeature *ff in features ) {
// find the correct position for the square layer within the previewLayer
// the feature box originates in the bottom left of the video frame.
// (Bottom right if mirroring is turned on)
CGRect faceRect = [ff bounds];
// flip preview width and height
CGFloat temp = faceRect.size.width;
faceRect.size.width = faceRect.size.height;
faceRect.size.height = temp;
temp = faceRect.origin.x;
faceRect.origin.x = faceRect.origin.y;
faceRect.origin.y = temp;
// scale coordinates so they fit in the preview box, which may be scaled
CGFloat widthScaleBy = previewBox.size.width / clap.size.height;
CGFloat heightScaleBy = previewBox.size.height / clap.size.width;
faceRect.size.width *= widthScaleBy;
faceRect.size.height *= heightScaleBy;
faceRect.origin.x *= widthScaleBy;
faceRect.origin.y *= heightScaleBy;
if ( isMirrored )
faceRect = CGRectOffset(faceRect, previewBox.origin.x + previewBox.size.width - faceRect.size.width - (faceRect.origin.x * 2), previewBox.origin.y);
else
faceRect = CGRectOffset(faceRect, previewBox.origin.x, previewBox.origin.y);
CALayer *featureLayer = nil;
// re-use an existing layer if possible
while ( !featureLayer && (currentSublayer < sublayersCount) ) {
CALayer *currentLayer = [sublayers objectAtIndex:currentSublayer++];
if ( [[currentLayer name] isEqualToString:#"FaceLayer"] ) {
featureLayer = currentLayer;
[currentLayer setHidden:NO];
}
}
// create a new one if necessary
if ( !featureLayer ) {
featureLayer = [CALayer new];
[featureLayer setContents:(id)[square CGImage]];
[featureLayer setName:#"FaceLayer"];
[previewLayer addSublayer:featureLayer];
[featureLayer release];
}
[featureLayer setFrame:faceRect];
switch (orientation) {
case UIDeviceOrientationPortrait:
[featureLayer setAffineTransform:CGAffineTransformMakeRotation(DegreesToRadians(0.))];
break;
case UIDeviceOrientationPortraitUpsideDown:
[featureLayer setAffineTransform:CGAffineTransformMakeRotation(DegreesToRadians(180.))];
break;
case UIDeviceOrientationLandscapeLeft:
[featureLayer setAffineTransform:CGAffineTransformMakeRotation(DegreesToRadians(90.))];
break;
case UIDeviceOrientationLandscapeRight:
[featureLayer setAffineTransform:CGAffineTransformMakeRotation(DegreesToRadians(-90.))];
break;
case UIDeviceOrientationFaceUp:
case UIDeviceOrientationFaceDown:
default:
break; // leave the layer in its last known orientation
}
currentFeature++;
}
[CATransaction commit];
}