任何人使用core-Audio成功完成脱机渲染.
我必须混合两个音频文件并应用混响(使用2 AudioFilePlayer,MultiChannelMixer,Reverb2和RemoteIO).
得到它的工作我可以保存它,而它的预览(在renderCallBack的RemoteIO).
我需要保存它而不玩它(离线).
提前致谢.
解决方法
离线渲染使用GenericOutput AudioUnit为我工作.
我在这里分享工作代码.
核心音频框架似乎有一点.但是像ASBD这样的小事情,参数…等正在产生这些问题.尝试努力,它会工作.不要放弃:-).核心音频在处理低级音频时非常强大和有用.这是我从最近几周学到的东西.享受:-D ….
我在这里分享工作代码.
核心音频框架似乎有一点.但是像ASBD这样的小事情,参数…等正在产生这些问题.尝试努力,它会工作.不要放弃:-).核心音频在处理低级音频时非常强大和有用.这是我从最近几周学到的东西.享受:-D ….
在.h中声明这些
//AUGraph AUGraph mGraph; //Audio Unit References AudioUnit mFilePlayer; AudioUnit mFilePlayer2; AudioUnit mReverb; AudioUnit mTone; AudioUnit mMixer; AudioUnit mGIO; //Audio File Location AudioFileID inputFile; AudioFileID inputFile2; //Audio file refereces for saving ExtAudioFileRef extAudioFile; //Standard sample rate Float64 graphSampleRate; AudioStreamBasicDescription stereoStreamFormat864; Float64 MaxSampleTime;
// in .m class
- (id) init { self = [super init]; graphSampleRate = 44100.0; MaxSampleTime = 0.0; UInt32 category = kAudioSessionCategory_MediaPlayback; CheckError(AudioSessionSetProperty(kAudioSessionProperty_AudioCategory,sizeof(category),&category),"Couldn't set category on audio session"); [self initializeAUGraph]; return self; }
// ASBD设置
- (void) setupStereoStream864 { // The AudioUnitSampleType data type is the recommended type for sample data in audio // units. This obtains the byte size of the type for use in filling in the ASBD. size_t bytesPerSample = sizeof (AudioUnitSampleType); // Fill the application audio format struct's fields to define a linear PCM,// stereo,noninterleaved stream at the hardware sample rate. stereoStreamFormat864.mFormatID = kAudioFormatLinearPCM; stereoStreamFormat864.mFormatFlags = kAudioFormatFlagsAudioUnitCanonical; stereoStreamFormat864.mBytesPerPacket = bytesPerSample; stereoStreamFormat864.mFramesPerPacket = 1; stereoStreamFormat864.mBytesPerFrame = bytesPerSample; stereoStreamFormat864.mChannelsPerFrame = 2; // 2 indicates stereo stereoStreamFormat864.mBitsPerChannel = 8 * bytesPerSample; stereoStreamFormat864.mSampleRate = graphSampleRate; }
// AUGraph设置
- (void)initializeAUGraph { [self setupStereoStream864]; // Setup the AUGraph,add AUNodes,and make connections // create a new AUGraph CheckError(NewAUGraph(&mGraph),"Couldn't create new graph"); // AUNodes represent AudioUnits on the AUGraph and provide an // easy means for connecting audioUnits together. AUNode filePlayerNode; AUNode filePlayerNode2; AUNode mixerNode; AUNode reverbNode; AUNode toneNode; AUNode gOutputNode; // file player component AudioComponentDescription filePlayer_desc; filePlayer_desc.componentType = kAudioUnitType_Generator; filePlayer_desc.componentSubType = kAudioUnitSubType_AudioFilePlayer; filePlayer_desc.componentFlags = 0; filePlayer_desc.componentFlagsMask = 0; filePlayer_desc.componentManufacturer = kAudioUnitManufacturer_Apple; // file player component2 AudioComponentDescription filePlayer2_desc; filePlayer2_desc.componentType = kAudioUnitType_Generator; filePlayer2_desc.componentSubType = kAudioUnitSubType_AudioFilePlayer; filePlayer2_desc.componentFlags = 0; filePlayer2_desc.componentFlagsMask = 0; filePlayer2_desc.componentManufacturer = kAudioUnitManufacturer_Apple; // Create AudioComponentDescriptions for the AUs we want in the graph // mixer component AudioComponentDescription mixer_desc; mixer_desc.componentType = kAudioUnitType_Mixer; mixer_desc.componentSubType = kAudioUnitSubType_MultiChannelMixer; mixer_desc.componentFlags = 0; mixer_desc.componentFlagsMask = 0; mixer_desc.componentManufacturer = kAudioUnitManufacturer_Apple; // Create AudioComponentDescriptions for the AUs we want in the graph // Reverb component AudioComponentDescription reverb_desc; reverb_desc.componentType = kAudioUnitType_Effect; reverb_desc.componentSubType = kAudioUnitSubType_Reverb2; reverb_desc.componentFlags = 0; reverb_desc.componentFlagsMask = 0; reverb_desc.componentManufacturer = kAudioUnitManufacturer_Apple; //tone component AudioComponentDescription tone_desc; tone_desc.componentType = kAudioUnitType_FormatConverter; //tone_desc.componentSubType = kAudioUnitSubType_NewTimePitch; tone_desc.componentSubType = kAudioUnitSubType_Varispeed; tone_desc.componentFlags = 0; tone_desc.componentFlagsMask = 0; tone_desc.componentManufacturer = kAudioUnitManufacturer_Apple; AudioComponentDescription gOutput_desc; gOutput_desc.componentType = kAudioUnitType_Output; gOutput_desc.componentSubType = kAudioUnitSubType_GenericOutput; gOutput_desc.componentFlags = 0; gOutput_desc.componentFlagsMask = 0; gOutput_desc.componentManufacturer = kAudioUnitManufacturer_Apple; //Add nodes to graph // Add nodes to the graph to hold our AudioUnits,// You pass in a reference to the AudioComponentDescription // and get back an AudioUnit AUGraphAddNode(mGraph,&filePlayer_desc,&filePlayerNode ); AUGraphAddNode(mGraph,&filePlayer2_desc,&filePlayerNode2 ); AUGraphAddNode(mGraph,&mixer_desc,&mixerNode ); AUGraphAddNode(mGraph,&reverb_desc,&reverbNode ); AUGraphAddNode(mGraph,&tone_desc,&toneNode ); AUGraphAddNode(mGraph,&gOutput_desc,&gOutputNode); //Open the graph early,initialize late // open the graph AudioUnits are open but not initialized (no resource allocation occurs here) CheckError(AUGraphOpen(mGraph),"Couldn't Open the graph"); //Reference to Nodes // get the reference to the AudioUnit object for the file player graph node AUGraphNodeInfo(mGraph,filePlayerNode,NULL,&mFilePlayer); AUGraphNodeInfo(mGraph,filePlayerNode2,&mFilePlayer2); AUGraphNodeInfo(mGraph,reverbNode,&mReverb); AUGraphNodeInfo(mGraph,toneNode,&mTone); AUGraphNodeInfo(mGraph,mixerNode,&mMixer); AUGraphNodeInfo(mGraph,gOutputNode,&mGIO); AUGraphConnectNodeInput(mGraph,0); AUGraphConnectNodeInput(mGraph,1); AUGraphConnectNodeInput(mGraph,0); UInt32 busCount = 2; // bus count for mixer unit input //Setup mixer unit bus count CheckError(AudioUnitSetProperty ( mMixer,kAudioUnitProperty_ElementCount,kAudioUnitScope_Input,&busCount,sizeof (busCount) ),"Couldn't set mixer unit's bus count"); //Enable metering mode to view levels input and output levels of mixer UInt32 onValue = 1; CheckError(AudioUnitSetProperty(mMixer,kAudioUnitProperty_MeteringMode,&onValue,sizeof(onValue)),"error"); // Increase the maximum frames per slice allows the mixer unit to accommodate the // larger slice size used when the screen is locked. UInt32 maximumFramesPerSlice = 4096; CheckError(AudioUnitSetProperty ( mMixer,kAudioUnitProperty_MaximumFramesPerSlice,kAudioUnitScope_Global,&maximumFramesPerSlice,sizeof (maximumFramesPerSlice) ),"Couldn't set mixer units maximum framers per slice"); // set the audio data format of tone Unit AudioUnitSetProperty(mTone,kAudioUnitProperty_StreamFormat,&stereoStreamFormat864,sizeof(AudioStreamBasicDescription)); // set the audio data format of reverb Unit AudioUnitSetProperty(mReverb,sizeof(AudioStreamBasicDescription)); // set initial reverb AudioUnitParameterValue reverbTime = 2.5; AudioUnitSetParameter(mReverb,4,reverbTime,0); AudioUnitSetParameter(mReverb,5,0); AudioStreamBasicDescription auEffectStreamFormat; UInt32 asbdSize = sizeof (auEffectStreamFormat); memset (&auEffectStreamFormat,sizeof (auEffectStreamFormat )); // get the audio data format from reverb CheckError(AudioUnitGetProperty(mReverb,&auEffectStreamFormat,&asbdSize),"Couldn't get aueffectunit ASBD"); auEffectStreamFormat.mSampleRate = graphSampleRate; // set the audio data format of mixer Unit CheckError(AudioUnitSetProperty(mMixer,kAudioUnitScope_Output,sizeof(auEffectStreamFormat)),"Couldn't set ASBD on mixer output"); CheckError(AUGraphInitialize(mGraph),"Couldn't Initialize the graph"); [self setUpAUFilePlayer]; [self setUpAUFilePlayer2]; }
-(OSStatus) setUpAUFilePlayer{ NSString *songPath = [[NSBundle mainBundle] pathForResource: @"testVoice" ofType:@".m4a"]; CFURLRef songURL = ( CFURLRef) [NSURL fileURLWithPath:songPath]; // open the input audio file CheckError(AudioFileOpenURL(songURL,kAudioFileReadPermission,&inputFile),"setUpAUFilePlayer AudioFileOpenURL Failed"); AudioStreamBasicDescription fileASBD; // get the audio data format from the file UInt32 propSize = sizeof(fileASBD); CheckError(AudioFileGetProperty(inputFile,kAudioFilePropertyDataFormat,&propSize,&fileASBD),"setUpAUFilePlayer couldn't get file's data format"); // tell the file player unit to load the file we want to play CheckError(AudioUnitSetProperty(mFilePlayer,kAudioUnitProperty_ScheduledFileIDs,&inputFile,sizeof(inputFile)),"setUpAUFilePlayer AudioUnitSetProperty[kAudioUnitProperty_ScheduledFileIDs] Failed"); UInt64 nPackets; UInt32 propsize = sizeof(nPackets); CheckError(AudioFileGetProperty(inputFile,kAudioFilePropertyAudioDataPacketCount,&propsize,&nPackets),"setUpAUFilePlayer AudioFileGetProperty[kAudioFilePropertyAudioDataPacketCount] Failed"); // tell the file player AU to play the entire file ScheduledAudioFileRegion rgn; memset (&rgn.mTimeStamp,sizeof(rgn.mTimeStamp)); rgn.mTimeStamp.mFlags = kAudioTimeStampSampleTimeValid; rgn.mTimeStamp.mSampleTime = 0; rgn.mCompletionProc = NULL; rgn.mCompletionProcUserData = NULL; rgn.mAudioFile = inputFile; rgn.mLoopCount = -1; rgn.mStartFrame = 0; rgn.mFramesToPlay = nPackets * fileASBD.mFramesPerPacket; if (MaxSampleTime < rgn.mFramesToPlay) { MaxSampleTime = rgn.mFramesToPlay; } CheckError(AudioUnitSetProperty(mFilePlayer,kAudioUnitProperty_ScheduledFileRegion,&rgn,sizeof(rgn)),"setUpAUFilePlayer1 AudioUnitSetProperty[kAudioUnitProperty_ScheduledFileRegion] Failed"); // prime the file player AU with default values UInt32 defaultVal = 0; CheckError(AudioUnitSetProperty(mFilePlayer,kAudioUnitProperty_ScheduledFilePrime,&defaultVal,sizeof(defaultVal)),"setUpAUFilePlayer AudioUnitSetProperty[kAudioUnitProperty_ScheduledFilePrime] Failed"); // tell the file player AU when to start playing (-1 sample time means next render cycle) AudioTimeStamp startTime; memset (&startTime,sizeof(startTime)); startTime.mFlags = kAudioTimeStampSampleTimeValid; startTime.mSampleTime = -1; CheckError(AudioUnitSetProperty(mFilePlayer,kAudioUnitProperty_ScheduleStartTimeStamp,&startTime,sizeof(startTime)),"setUpAUFilePlayer AudioUnitSetProperty[kAudioUnitProperty_ScheduleStartTimeStamp]"); return noErr; }
-(OSStatus) setUpAUFilePlayer2{ NSString *songPath = [[NSBundle mainBundle] pathForResource: @"BGmusic" ofType:@".mp3"]; CFURLRef songURL = ( CFURLRef) [NSURL fileURLWithPath:songPath]; // open the input audio file CheckError(AudioFileOpenURL(songURL,&inputFile2),"setUpAUFilePlayer2 AudioFileOpenURL Failed"); AudioStreamBasicDescription fileASBD; // get the audio data format from the file UInt32 propSize = sizeof(fileASBD); CheckError(AudioFileGetProperty(inputFile2,"setUpAUFilePlayer2 couldn't get file's data format"); // tell the file player unit to load the file we want to play CheckError(AudioUnitSetProperty(mFilePlayer2,&inputFile2,sizeof(inputFile2)),"setUpAUFilePlayer2 AudioUnitSetProperty[kAudioUnitProperty_ScheduledFileIDs] Failed"); UInt64 nPackets; UInt32 propsize = sizeof(nPackets); CheckError(AudioFileGetProperty(inputFile2,"setUpAUFilePlayer2 AudioFileGetProperty[kAudioFilePropertyAudioDataPacketCount] Failed"); // tell the file player AU to play the entire file ScheduledAudioFileRegion rgn; memset (&rgn.mTimeStamp,sizeof(rgn.mTimeStamp)); rgn.mTimeStamp.mFlags = kAudioTimeStampSampleTimeValid; rgn.mTimeStamp.mSampleTime = 0; rgn.mCompletionProc = NULL; rgn.mCompletionProcUserData = NULL; rgn.mAudioFile = inputFile2; rgn.mLoopCount = -1; rgn.mStartFrame = 0; rgn.mFramesToPlay = nPackets * fileASBD.mFramesPerPacket; if (MaxSampleTime < rgn.mFramesToPlay) { MaxSampleTime = rgn.mFramesToPlay; } CheckError(AudioUnitSetProperty(mFilePlayer2,"setUpAUFilePlayer2 AudioUnitSetProperty[kAudioUnitProperty_ScheduledFileRegion] Failed"); // prime the file player AU with default values UInt32 defaultVal = 0; CheckError(AudioUnitSetProperty(mFilePlayer2,"setUpAUFilePlayer2 AudioUnitSetProperty[kAudioUnitProperty_ScheduledFilePrime] Failed"); // tell the file player AU when to start playing (-1 sample time means next render cycle) AudioTimeStamp startTime; memset (&startTime,sizeof(startTime)); startTime.mFlags = kAudioTimeStampSampleTimeValid; startTime.mSampleTime = -1; CheckError(AudioUnitSetProperty(mFilePlayer2,"setUpAUFilePlayer2 AudioUnitSetProperty[kAudioUnitProperty_ScheduleStartTimeStamp]"); return noErr; }
//开始保存文件
- (void)startRecordingAAC{ AudioStreamBasicDescription destinationFormat; memset(&destinationFormat,sizeof(destinationFormat)); destinationFormat.mChannelsPerFrame = 2; destinationFormat.mFormatID = kAudioFormatMPEG4AAC; UInt32 size = sizeof(destinationFormat); OSStatus result = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,&size,&destinationFormat); if(result) printf("AudioFormatGetProperty %ld \n",result); NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,NSUserDomainMask,YES); NSString *documentsDirectory = [paths objectAtIndex:0]; NSString *destinationFilePath = [[NSString alloc] initWithFormat: @"%@/output.m4a",documentsDirectory]; CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kcfAllocatorDefault,(CFStringRef)destinationFilePath,kcfURLPOSIXPathStyle,false); [destinationFilePath release]; // specify codec Saving the output in .m4a format result = ExtAudioFileCreateWithURL(destinationURL,kAudioFileM4AType,&destinationFormat,kAudioFileFlags_EraseFile,&extAudioFile); if(result) printf("ExtAudioFileCreateWithURL %ld \n",result); CFRelease(destinationURL); // This is a very important part and easiest way to set the ASBD for the File with correct format. AudioStreamBasicDescription clientFormat; UInt32 fSize = sizeof (clientFormat); memset(&clientFormat,sizeof(clientFormat)); // get the audio data format from the Output Unit CheckError(AudioUnitGetProperty(mGIO,&clientFormat,&fSize),"AudioUnitGetProperty on Failed"); // set the audio data format of mixer Unit CheckError(ExtAudioFileSetProperty(extAudioFile,kExtAudioFileProperty_ClientDataFormat,sizeof(clientFormat),&clientFormat),"ExtAudioFileSetProperty kExtAudioFileProperty_ClientDataFormat Failed"); // specify codec UInt32 codec = kAppleHardwareAudioCodecManufacturer; CheckError(ExtAudioFileSetProperty(extAudioFile,kExtAudioFileProperty_CodecManufacturer,sizeof(codec),&codec),"ExtAudioFileSetProperty on extAudioFile Faild"); CheckError(ExtAudioFileWriteAsync(extAudioFile,NULL),"ExtAudioFileWriteAsync Failed"); [self pullGenericOutput]; }
//从GenericOutput节点手动输入和获取数据/缓冲区.
-(void)pullGenericOutput{ AudioUnitRenderActionFlags flags = 0; AudioTimeStamp inTimeStamp; memset(&inTimeStamp,sizeof(AudioTimeStamp)); inTimeStamp.mFlags = kAudioTimeStampSampleTimeValid; UInt32 busNumber = 0; UInt32 numberFrames = 512; inTimeStamp.mSampleTime = 0; int channelCount = 2; NSLog(@"Final numberFrames :%li",numberFrames); int totFrms = MaxSampleTime; while (totFrms > 0) { if (totFrms < numberFrames) { numberFrames = totFrms; NSLog(@"Final numberFrames :%li",numberFrames); } else { totFrms -= numberFrames; } AudioBufferList *bufferList = (AudioBufferList*)malloc(sizeof(AudioBufferList)+sizeof(AudioBuffer)*(channelCount-1)); bufferList->mNumberBuffers = channelCount; for (int j=0; j<channelCount; j++) { AudioBuffer buffer = {0}; buffer.mNumberChannels = 1; buffer.mDataByteSize = numberFrames*sizeof(AudioUnitSampleType); buffer.mData = calloc(numberFrames,sizeof(AudioUnitSampleType)); bufferList->mBuffers[j] = buffer; } CheckError(AudioUnitRender(mGIO,&flags,&inTimeStamp,busNumber,numberFrames,bufferList),"AudioUnitRender mGIO"); CheckError(ExtAudioFileWrite(extAudioFile,("extaudiofilewrite fail")); } [self FilesSavingCompleted]; }
// FilesSavingCompleted
-(void)FilesSavingCompleted{ OSStatus status = ExtAudioFileDispose(extAudioFile); printf("OSStatus(ExtAudioFileDispose): %ld\n",status); }