- Find and open the audio input device.
- Create a capture session.
- Add device input for audio device to the session.
- .
- , , .
- .
â - :
- (id)init
{
self = [super init];
if (self) {
[self setOutputFile:[@"~/Desktop/Audio Recording.aif" stringByStandardizingPath]];
}
return self;
}
- (void)awakeFromNib
{
BOOL success;
NSError *error;
QTCaptureDevice *audioDevice = [QTCaptureDevice defaultInputDeviceWithMediaType:QTMediaTypeSound];
success = [audioDevice open:&error];
if (!success) {
[[NSAlert alertWithError:error] runModal];
return;
}
captureSession = [[QTCaptureSession alloc] init];
captureAudioDeviceInput = [[QTCaptureDeviceInput alloc] initWithDevice:audioDevice];
success = [captureSession addInput:captureAudioDeviceInput error:&error];
if (!success) {
[captureAudioDeviceInput release];
captureAudioDeviceInput = nil;
[audioDevice close];
[captureSession release];
captureSession = nil;
[[NSAlert alertWithError:error] runModal];
return;
}
captureAudioDataOutput = [[QTCaptureDecompressedAudioOutput alloc] init];
[captureAudioDataOutput setDelegate:self];
success = [captureSession addOutput:captureAudioDataOutput error:&error];
if (!success) {
[captureAudioDeviceInput release];
captureAudioDeviceInput = nil;
[audioDevice close];
[captureAudioDataOutput release];
captureAudioDataOutput = nil;
[captureSession release];
captureSession = nil;
[[NSAlert alertWithError:error] runModal];
return;
}
OSStatus err = noErr;
AudioComponentDescription effectAudioUnitComponentDescription;
effectAudioUnitComponentDescription.componentType = kAudioUnitType_Effect;
effectAudioUnitComponentDescription.componentSubType = kAudioUnitSubType_Delay;
effectAudioUnitComponentDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
effectAudioUnitComponentDescription.componentFlags = 0;
effectAudioUnitComponentDescription.componentFlagsMask = 0;
AudioComponent effectAudioUnitComponent = AudioComponentFindNext(NULL, &effectAudioUnitComponentDescription);
err = AudioComponentInstanceNew(effectAudioUnitComponent, &effectAudioUnit);
if (noErr == err) {
AURenderCallbackStruct renderCallbackStruct;
renderCallbackStruct.inputProc = PushCurrentInputBufferIntoAudioUnit;
renderCallbackStruct.inputProcRefCon = self;
err = AudioUnitSetProperty(effectAudioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &renderCallbackStruct, sizeof(renderCallbackStruct));
}
if (noErr != err) {
if (effectAudioUnit) {
AudioComponentInstanceDispose(effectAudioUnit);
effectAudioUnit = NULL;
}
[captureAudioDeviceInput release];
captureAudioDeviceInput = nil;
[audioDevice close];
[captureSession release];
captureSession = nil;
[[NSAlert alertWithError:[NSError errorWithDomain:NSOSStatusErrorDomain code:err userInfo:nil]] runModal];
return;
}
[captureSession startRunning];
[window setDelegate:self];
}
- (void)windowWillClose:(NSNotification *)notification
{
[self setRecording:NO];
[captureSession stopRunning];
QTCaptureDevice *audioDevice = [captureAudioDeviceInput device];
if ([audioDevice isOpen])
[audioDevice close];
}
- (void)dealloc
{
[captureSession release];
[captureAudioDeviceInput release];
[captureAudioDataOutput release];
[outputFile release];
if (extAudioFile)
ExtAudioFileDispose(extAudioFile);
if (effectAudioUnit) {
if (didSetUpAudioUnits)
AudioUnitUninitialize(effectAudioUnit);
AudioComponentInstanceDispose(effectAudioUnit);
}
[super dealloc];
}
#pragma mark ======== Audio capture methods =========
- (void)captureOutput:(QTCaptureOutput *)captureOutput didOutputAudioSampleBuffer:(QTSampleBuffer *)sampleBuffer fromConnection:(QTCaptureConnection *)connection
{
OSStatus err = noErr;
BOOL isRecording = [self isRecording];
QTFormatDescription *formatDescription = [sampleBuffer formatDescription];
NSValue *sampleBufferASBDValue = [formatDescription attributeForKey:QTFormatDescriptionAudioStreamBasicDescriptionAttribute];
if (!sampleBufferASBDValue)
return;
AudioStreamBasicDescription sampleBufferASBD = {0};
[sampleBufferASBDValue getValue:&sampleBufferASBD];
if ((sampleBufferASBD.mChannelsPerFrame != currentInputASBD.mChannelsPerFrame) || (sampleBufferASBD.mSampleRate != currentInputASBD.mSampleRate)) {
currentInputASBD = sampleBufferASBD;
if (didSetUpAudioUnits) {
AudioUnitUninitialize(effectAudioUnit);
if (extAudioFile) {
ExtAudioFileDispose(extAudioFile);
extAudioFile = NULL;
}
} else {
didSetUpAudioUnits = YES;
}
err = AudioUnitSetProperty(effectAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, ¤tInputASBD, sizeof(currentInputASBD));
if (noErr == err)
err = AudioUnitSetProperty(effectAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, ¤tInputASBD, sizeof(currentInputASBD));
if (noErr == err)
err = AudioUnitInitialize(effectAudioUnit);
if (noErr != err) {
NSLog(@"Failed to set up audio units (%d)", err);
didSetUpAudioUnits = NO;
bzero(¤tInputASBD, sizeof(currentInputASBD));
}
}
if (isRecording && !extAudioFile) {
AudioStreamBasicDescription recordedASBD = {0};
recordedASBD.mSampleRate = currentInputASBD.mSampleRate;
recordedASBD.mFormatID = kAudioFormatLinearPCM;
recordedASBD.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
recordedASBD.mBytesPerPacket = 2 * currentInputASBD.mChannelsPerFrame;
recordedASBD.mFramesPerPacket = 1;
recordedASBD.mBytesPerFrame = 2 * currentInputASBD.mChannelsPerFrame;
recordedASBD.mChannelsPerFrame = currentInputASBD.mChannelsPerFrame;
recordedASBD.mBitsPerChannel = 16;
NSData *inputChannelLayoutData = [formatDescription attributeForKey:QTFormatDescriptionAudioChannelLayoutAttribute];
AudioChannelLayout *recordedChannelLayout = (AudioChannelLayout *)[inputChannelLayoutData bytes];
err = ExtAudioFileCreateWithURL((CFURLRef)[NSURL fileURLWithPath:[self outputFile]], kAudioFileAIFFType, &recordedASBD, recordedChannelLayout, kAudioFileFlags_EraseFile, &extAudioFile);
if (noErr == err)
err = ExtAudioFileSetProperty(extAudioFile, kExtAudioFileProperty_ClientDataFormat, sizeof(currentInputASBD), ¤tInputASBD);
if (noErr != err) {
NSLog(@"Failed to set up ExtAudioFile (%d)", err);
ExtAudioFileDispose(extAudioFile);
extAudioFile = NULL;
}
} else if (!isRecording && extAudioFile) {
ExtAudioFileDispose(extAudioFile);
extAudioFile = NULL;
}
NSUInteger numberOfFrames = [sampleBuffer numberOfSamples];
currentSampleTime += (double)numberOfFrames;
AudioTimeStamp timeStamp = {0};
timeStamp.mSampleTime = currentSampleTime;
timeStamp.mFlags |= kAudioTimeStampSampleTimeValid;
AudioUnitRenderActionFlags flags = 0;
AudioBufferList *outputABL = calloc(1, sizeof(*outputABL) + (currentInputASBD.mChannelsPerFrame - 1)*sizeof(outputABL->mBuffers[0]));
outputABL->mNumberBuffers = currentInputASBD.mChannelsPerFrame;
UInt32 channelIndex;
for (channelIndex = 0; channelIndex < currentInputASBD.mChannelsPerFrame; channelIndex++) {
UInt32 dataSize = numberOfFrames * currentInputASBD.mBytesPerFrame;
outputABL->mBuffers[channelIndex].mDataByteSize = dataSize;
outputABL->mBuffers[channelIndex].mData = malloc(dataSize);
outputABL->mBuffers[channelIndex].mNumberChannels = 1;
}
currentInputAudioBufferList = [sampleBuffer audioBufferListWithOptions:QTSampleBufferAudioBufferListOptionAssure16ByteAlignment];
err = AudioUnitRender(effectAudioUnit, &flags, &timeStamp, 0, numberOfFrames, outputABL);
currentInputAudioBufferList = NULL;
if ((noErr == err) && extAudioFile) {
err = ExtAudioFileWriteAsync(extAudioFile, numberOfFrames, outputABL);
}
for (channelIndex = 0; channelIndex < currentInputASBD.mChannelsPerFrame; channelIndex++) {
free(outputABL->mBuffers[channelIndex].mData);
}
free(outputABL);
}
- (AudioBufferList *)currentInputAudioBufferList
{
return currentInputAudioBufferList;
}
, #prama mark , .
, !