首页 诗词 字典 板报 句子 名言 友答 励志 学校 网站地图
当前位置: 首页 > 教程频道 > 移动开发 > 移动开发 >

施用SCListener识别麦克风声音录入

2012-08-11 
使用SCListener识别麦克风声音录入.h文件#import Foundation/Foundation.h#import AudioToolbox/AudioQ

使用SCListener识别麦克风声音录入

.h文件

#import <Foundation/Foundation.h>

#import <AudioToolbox/AudioQueue.h>

#import <AudioToolbox/AudioServices.h>


@interface SCListener : NSObject {

    AudioQueueLevelMeterState *levels;

    

    AudioQueueRef queue;

    AudioStreamBasicDescription format;

    Float64 sampleRate;

}


+ (SCListener *)sharedListener;


- (void)listen;

- (BOOL)isListening;

- (void)pause;

- (void)stop;


- (Float32)averagePower;

- (Float32)peakPower;

- (AudioQueueLevelMeterState *)levels;


@end


.m文件

#import "SCListener.h"


@interface SCListener(Private)


-(void)updateLevels;

-(void)setupQueue;

-(void)setupFormat;

-(void)setupBuffers;

-(void)setupMetering;


@end


static SCListener *sharedListener =nil;


static void listeningCallback(void *inUserData,AudioQueueRef inAQ, AudioQueueBufferRef inBuffer,const AudioTimeStamp *inStartTime,UInt32 inNumberPacketsDescriptions, constAudioStreamPacketDescription *inPacketDescs) {

    SCListener *listener = (SCListener *)inUserData;

    if ([listener isListening])

        AudioQueueEnqueueBuffer(inAQ, inBuffer,0, NULL);

}


@implementation SCListener


+ (SCListener *)sharedListener {

    @synchronized(self) {

        if (sharedListener ==nil)

            [[self alloc]init];

    }

    

    returnsharedListener;

}


- (void)dealloc {

    [sharedListener stop];

    [super dealloc];

}


#pragma mark -

#pragma mark Listening


- (void)listen {

    if (queue == nil)

        [self setupQueue];

    

    AudioQueueStart(queue,NULL);

}


- (void)pause {

    if (![self isListening])

        return;

    

    AudioQueueStop(queue,true);

}


- (void)stop {

    if (queue == nil)

        return;

    

    AudioQueueDispose(queue,true);

    queue = nil;

}


- (BOOL)isListening {

    if (queue == nil)

        return NO;

    

    UInt32 isListening, ioDataSize = sizeof(UInt32);

    OSStatus result =AudioQueueGetProperty(queue,kAudioQueueProperty_IsRunning, &isListening, &ioDataSize);

    return (result != noErr) ?NO : isListening;

}


#pragma mark -

#pragma mark Levels getters


- (Float32)averagePower {

    if (![self isListening])

        return 0.0;

    

    return [selflevels][0].mAveragePower;

}


- (Float32)peakPower {

    if (![self isListening])

        return 0.0;

    

    return [self levels][0].mPeakPower;

}


- (AudioQueueLevelMeterState *)levels {

    if (![self isListening])

        return nil;

    

    [selfupdateLevels];

    return levels;

}


- (void)updateLevels {

    UInt32 ioDataSize = format.mChannelsPerFrame *sizeof(AudioQueueLevelMeterState);

    AudioQueueGetProperty(queue, (AudioQueuePropertyID)kAudioQueueProperty_CurrentLevelMeter,levels, &ioDataSize);

}


#pragma mark -

#pragma mark Setup


- (void)setupQueue {

    if (queue)

        return;

    

    [selfsetupFormat];

    [selfsetupBuffers];

    AudioQueueNewInput(&format,listeningCallback, self,NULL, NULL,0, &queue);

    [selfsetupMetering];    

}


- (void)setupFormat {

#if TARGET_IPHONE_SIMULATOR

    format.mSampleRate = 44100.0;

#else

    UInt32 ioDataSize = sizeof(sampleRate);

    AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate, &ioDataSize, &sampleRate);

    format.mSampleRate =sampleRate;

#endif

    format.mFormatID =kAudioFormatLinearPCM;

    format.mFormatFlags =kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;

    format.mFramesPerPacket =format.mChannelsPerFrame =1;

    format.mBitsPerChannel =16;

    format.mBytesPerPacket =format.mBytesPerFrame =2;

}


- (void)setupBuffers {

    AudioQueueBufferRef buffers[3];

    for (NSInteger i =0; i < 3; ++i) { 

        AudioQueueAllocateBuffer(queue,735, &buffers[i]); 

        AudioQueueEnqueueBuffer(queue, buffers[i],0, NULL); 

    }

}


- (void)setupMetering {

    levels = (AudioQueueLevelMeterState *)calloc(sizeof(AudioQueueLevelMeterState),format.mChannelsPerFrame);

    UInt32 trueValue = true;

    AudioQueueSetProperty(queue,kAudioQueueProperty_EnableLevelMetering, &trueValue, sizeof(UInt32));

}


#pragma mark -

#pragma mark Singleton Pattern


+ (id)allocWithZone:(NSZone *)zone {

    @synchronized(self) {

        if (sharedListener ==nil) {

            sharedListener = [superallocWithZone:zone];

            return sharedListener;

        }

    }

    

    return nil;

}


- (id)copyWithZone:(NSZone *)zone {

    return self;

}


- (id)init {

    if ([super init] == nil)

        return nil;

    

    return self;

}


- (id)retain {

    return self;

}


- (unsigned)retainCount {

    returnUINT_MAX;

}


- (void)release {

    // Do nothing.

}


- (id)autorelease {

    return self;

}


@end



使用方法


-(void)beginListen

{

//開始偵測    

    [[SCListenersharedListener] listen];

    

    //取得最大音量

    [[SCListenersharedListener] peakPower];

    

    //取得平均音量

    [[SCListenersharedListener] averagePower];

    

    listener = [SCListenersharedListener];

    

    //暂停

    // We can temporarily stop returning levels

    [listener pause];

    [listener listen];// Quick.

    //停止

    // Or free up resources when we're not listening for awhile.

    [listener stop];

    [listenerlisten]; // Slower.

}


-(void)getPower

{

    AudioQueueLevelMeterState *levels = [listenerlevels];


    NSLog(@"levels[0].mPeakPower %@", [NSStringstringWithFormat:@"%f", levels[0].mPeakPower]);

    //最大音量

    Float32 peak = levels[0].mPeakPower;

    Float32 average = levels[0].mAveragePower;

    //平均音量

    if (![listener isListening])

        return;

    NSLog(@"peak %f", peak);

}





热点排行