I am trying to create my own Sound Unit Audio effects based on microphone input. This application allows the simultaneous input / output of a microphone to the speaker. I can apply effects and work using the simulator, but when I try to check on the iPhone, I donβt hear anything. I am inserting my code if anyone can help me:
- (id) init{ self = [super init]; OSStatus status; // Describe audio component AudioComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_RemoteIO; desc.componentFlags = 0; desc.componentFlagsMask = 0; desc.componentManufacturer = kAudioUnitManufacturer_Apple; // Get component AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc); // Get audio units status = AudioComponentInstanceNew(inputComponent, &audioUnit); checkStatus(status); // Enable IO for recording UInt32 flag = 1; status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag)); checkStatus(status); // Enable IO for playback status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &flag, sizeof(flag)); checkStatus(status); // Describe format AudioStreamBasicDescription audioFormat; audioFormat.mSampleRate = 44100.00; audioFormat.mFormatID = kAudioFormatLinearPCM; audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; audioFormat.mFramesPerPacket = 1; audioFormat.mChannelsPerFrame = 1; audioFormat.mBitsPerChannel = 16; audioFormat.mBytesPerPacket = 2; audioFormat.mBytesPerFrame = 2; // Apply format status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &audioFormat, sizeof(audioFormat)); checkStatus(status); status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &audioFormat, sizeof(audioFormat)); checkStatus(status); // Set input callback AURenderCallbackStruct callbackStruct; callbackStruct.inputProc = recordingCallback; callbackStruct.inputProcRefCon = self; status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, kInputBus, &callbackStruct, sizeof(callbackStruct)); checkStatus(status); // Set output callback callbackStruct.inputProc = playbackCallback; callbackStruct.inputProcRefCon = self; status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, kOutputBus, &callbackStruct, sizeof(callbackStruct)); checkStatus(status); // Allocate our own buffers (1 channel, 16 bits per sample, thus 16 bits per frame, thus 2 bytes per frame). // Practice learns the buffers used contain 512 frames, if this changes it will be fixed in processAudio. tempBuffer.mNumberChannels = 1; tempBuffer.mDataByteSize = 512 * 2; tempBuffer.mData = malloc( 512 * 2 ); // Initialise status = AudioUnitInitialize(audioUnit); checkStatus(status); return self; }
This callback is called when new microphone audio data is available. But never come here when I test the iPhone:
static OSStatus recordingCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData) { AudioBuffer buffer; buffer.mNumberChannels = 1; buffer.mDataByteSize = inNumberFrames * 2; buffer.mData = malloc( inNumberFrames * 2 );