我想为iPhone构建一个合成器.我知道可以为iPhone使用自定义音频单元.乍一看,这听起来很有希望,因为有很多可用的音频单元编程资源.但是,使用iPhone上的自定义音频单元似乎有点棘手(参见:http://lists.apple.com/archives/Coreaudio-api/2008/Nov/msg00262.html)
这似乎是那种即负载的人必须做的事情,但一个简单的谷歌搜索"iphone音频合成"不会沿着好和易于教程或推荐的工具套件的线转起来什么.
那么,这里的任何人都有在iPhone上合成声音的经验吗?自定义音频单元是可行的,还是我应该考虑采用另一种更简单的方法?
And*_*nan 21
我也在调查这个.我认为AudioQueue API可能就是这样.
就我而言,似乎工作正常.
文件:BleepMachine.h
//
// BleepMachine.h
// WgHeroPrototype
//
// Created by Andy Buchanan on 05/01/2010.
// Copyright 2010 Andy Buchanan. All rights reserved.
//
#include <AudioToolbox/AudioToolbox.h>
// Class to implement sound playback using the AudioQueue API's
// Currently just supports playing two sine wave tones, one per
// stereo channel. The sound data is liitle-endian signed 16-bit @ 44.1KHz
//
class BleepMachine
{
static void staticQueueCallback( void* userData, AudioQueueRef outAQ, AudioQueueBufferRef outBuffer )
{
BleepMachine* pThis = reinterpret_cast<BleepMachine*> ( userData );
pThis->queueCallback( outAQ, outBuffer );
}
void queueCallback( AudioQueueRef outAQ, AudioQueueBufferRef outBuffer );
AudioStreamBasicDescription m_outFormat;
AudioQueueRef m_outAQ;
enum
{
kBufferSizeInFrames = 512,
kNumBuffers = 4,
kSampleRate = 44100,
};
AudioQueueBufferRef m_buffers[kNumBuffers];
bool m_isInitialised;
struct Wave
{
Wave(): volume(1.f), phase(0.f), frequency(0.f), fStep(0.f) {}
float volume;
float phase;
float frequency;
float fStep;
};
enum
{
kLeftWave = 0,
kRightWave = 1,
kNumWaves,
};
Wave m_waves[kNumWaves];
public:
BleepMachine();
~BleepMachine();
bool Initialise();
void Shutdown();
bool Start();
bool Stop();
bool SetWave( int id, float frequency, float volume );
};
// Notes by name. Integer value is number of semitones above A.
enum Note
{
A = 0,
Asharp,
B,
C,
Csharp,
D,
Dsharp,
E,
F,
Fsharp,
G,
Gsharp,
Bflat = Asharp,
Dflat = Csharp,
Eflat = Dsharp,
Gflat = Fsharp,
Aflat = Gsharp,
};
// Helper function calculates fundamental frequency for a given note
float CalculateFrequencyFromNote( SInt32 semiTones, SInt32 octave=4 );
float CalculateFrequencyFromMIDINote( SInt32 midiNoteNumber );
Run Code Online (Sandbox Code Playgroud)
文件:BleepMachine.mm
//
// BleepMachine.mm
// WgHeroPrototype
//
// Created by Andy Buchanan on 05/01/2010.
// Copyright 2010 Andy Buchanan. All rights reserved.
//
#include "BleepMachine.h"
void BleepMachine::queueCallback( AudioQueueRef outAQ, AudioQueueBufferRef outBuffer )
{
// Render the wave
// AudioQueueBufferRef is considered "opaque", but it's a reference to
// an AudioQueueBuffer which is not.
// All the samples manipulate this, so I'm not quite sure what they mean by opaque
// saying....
SInt16* coreAudioBuffer = (SInt16*)outBuffer->mAudioData;
// Specify how many bytes we're providing
outBuffer->mAudioDataByteSize = kBufferSizeInFrames * m_outFormat.mBytesPerFrame;
// Generate the sine waves to Signed 16-Bit Stero interleaved ( Little Endian )
float volumeL = m_waves[kLeftWave].volume;
float volumeR = m_waves[kRightWave].volume;
float phaseL = m_waves[kLeftWave].phase;
float phaseR = m_waves[kRightWave].phase;
float fStepL = m_waves[kLeftWave].fStep;
float fStepR = m_waves[kRightWave].fStep;
for( int s=0; s<kBufferSizeInFrames*2; s+=2 )
{
float sampleL = ( volumeL * sinf( phaseL ) );
float sampleR = ( volumeR * sinf( phaseR ) );
short sampleIL = (int)(sampleL * 32767.0);
short sampleIR = (int)(sampleR * 32767.0);
coreAudioBuffer[s] = sampleIL;
coreAudioBuffer[s+1] = sampleIR;
phaseL += fStepL;
phaseR += fStepR;
}
m_waves[kLeftWave].phase = fmodf( phaseL, 2 * M_PI ); // Take modulus to preserve precision
m_waves[kRightWave].phase = fmodf( phaseR, 2 * M_PI );
// Enqueue the buffer
AudioQueueEnqueueBuffer( m_outAQ, outBuffer, 0, NULL );
}
bool BleepMachine::SetWave( int id, float frequency, float volume )
{
if ( ( id < kLeftWave ) || ( id >= kNumWaves ) ) return false;
Wave& wave = m_waves[ id ];
wave.volume = volume;
wave.frequency = frequency;
wave.fStep = 2 * M_PI * frequency / kSampleRate;
return true;
}
bool BleepMachine::Initialise()
{
m_outFormat.mSampleRate = kSampleRate;
m_outFormat.mFormatID = kAudioFormatLinearPCM;
m_outFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
m_outFormat.mFramesPerPacket = 1;
m_outFormat.mChannelsPerFrame = 2;
m_outFormat.mBytesPerPacket = m_outFormat.mBytesPerFrame = sizeof(UInt16) * 2;
m_outFormat.mBitsPerChannel = 16;
m_outFormat.mReserved = 0;
OSStatus result = AudioQueueNewOutput(
&m_outFormat,
BleepMachine::staticQueueCallback,
this,
NULL,
NULL,
0,
&m_outAQ
);
if ( result < 0 )
{
printf( "ERROR: %d\n", (int)result );
return false;
}
// Allocate buffers for the audio
UInt32 bufferSizeBytes = kBufferSizeInFrames * m_outFormat.mBytesPerFrame;
for ( int buf=0; buf<kNumBuffers; buf++ )
{
OSStatus result = AudioQueueAllocateBuffer( m_outAQ, bufferSizeBytes, &m_buffers[ buf ] );
if ( result )
{
printf( "ERROR: %d\n", (int)result );
return false;
}
// Prime the buffers
queueCallback( m_outAQ, m_buffers[ buf ] );
}
m_isInitialised = true;
return true;
}
void BleepMachine::Shutdown()
{
Stop();
if ( m_outAQ )
{
// AudioQueueDispose also chucks any audio buffers it has
AudioQueueDispose( m_outAQ, true );
}
m_isInitialised = false;
}
BleepMachine::BleepMachine()
: m_isInitialised(false), m_outAQ(0)
{
for ( int buf=0; buf<kNumBuffers; buf++ )
{
m_buffers[ buf ] = NULL;
}
}
BleepMachine::~BleepMachine()
{
Shutdown();
}
bool BleepMachine::Start()
{
OSStatus result = AudioQueueSetParameter( m_outAQ, kAudioQueueParam_Volume, 1.0 );
if ( result ) printf( "ERROR: %d\n", (int)result );
// Start the queue
result = AudioQueueStart( m_outAQ, NULL );
if ( result ) printf( "ERROR: %d\n", (int)result );
return true;
}
bool BleepMachine::Stop()
{
OSStatus result = AudioQueueStop( m_outAQ, true );
if ( result ) printf( "ERROR: %d\n", (int)result );
return true;
}
// A (A4=440)
// A# f(n)=2^(n/12) * r
// B where n = number of semitones
// C and r is the root frequency e.g. 440
// C#
// D frq -> MIDI note number
// D# p = 69 + 12 x log2(f/440)
// E
// F
// F#
// G
// G#
//
// MIDI Note ref: http://www.phys.unsw.edu.au/jw/notes.html
//
// MIDI Node numbers:
// A3 57
// A#3 58
// B3 59
// C4 60 <--
// C#4 61
// D4 62
// D#4 63
// E4 64
// F4 65
// F#4 66
// G4 67
// G#4 68
// A4 69 <--
// A#4 70
// B4 71
// C5 72
float CalculateFrequencyFromNote( SInt32 semiTones, SInt32 octave )
{
semiTones += ( 12 * (octave-4) );
float root = 440.f;
float fn = powf( 2.f, (float)semiTones/12.f ) * root;
return fn;
}
float CalculateFrequencyFromMIDINote( SInt32 midiNoteNumber )
{
SInt32 semiTones = midiNoteNumber - 69;
return CalculateFrequencyFromNote( semiTones, 4 );
}
//for ( SInt32 midiNote=21; midiNote<=108; ++midiNote )
//{
// printf( "MIDI Note %d: %f Hz \n",(int)midiNote,CalculateFrequencyFromMIDINote( midiNote ) );
//}
Run Code Online (Sandbox Code Playgroud)
更新:基本使用信息
初始化.接近开始,我在我的代码中使用initFromNib:
m_bleepMachine = new BleepMachine;
m_bleepMachine->Initialise();
m_bleepMachine->Start();
Run Code Online (Sandbox Code Playgroud)现在声音播放正在运行,但产生静音.
在您的代码中,当您想要更改音调生成时调用此方法
m_bleepMachine->SetWave( ch, frq, vol );
Run Code Online (Sandbox Code Playgroud)
程序终止时
delete m_bleepMachine;
Run Code Online (Sandbox Code Playgroud)mor*_*des 16
自从近一年前我的原帖以来,我已经走了很长的路.经过一次非常详尽的搜索,我提出了很少适合iOS开发的高级综合工具.有许多是GPL许可,但GPL许可证限制太多,我觉得使用它很舒服.LibPD工作得很好,是rjdj使用的,但我发现自己对图形编程范例感到非常沮丧.JSyn的基于c的引擎csyn是一个选项,但它需要许可,我真的习惯使用开源工具进行编程.它看起来确实值得一看.
最后,我使用STK作为我的基本框架.STK是一种非常低级的工具,需要大量的缓冲级编程才能正常工作.这与PD或SuperCollider等更高级别相反,它允许您简单地将单位发生器插在一起而不用担心处理原始音频数据.
使用STK以这种方式工作肯定比使用高级工具慢一点,但我对此感到满意.特别是现在我对C/C++编程变得越来越熟悉了.
有一个新的项目正在为Open Frameworks创建一个补丁式添加.在温哥华大学,我认为它叫做Cleo.它尚未发布,但它看起来像C++中单元生成器的修补样式连接的非常好的混合,而不是要求使用另一种语言.它与Open Frameworks紧密集成,可能有吸引力或不吸引人.
所以,要回答我原来的问题,首先你需要学习如何写入输出缓冲区.这是一些很好的示例代码:
http://atastypixel.com/blog/using-remoteio-audio-unit/
然后你需要做一些综合来生成音频数据.如果您喜欢打补丁,我会毫不犹豫地推荐libpd.它似乎工作得很好,你可以按照习惯的方式工作.如果你讨厌图形修补(像我一样),你现在最好的起点可能就是STK.如果STK和低级音频编程看起来有点过头(就像对我来说那样),只需卷起袖子,打个帐篷,然后在学习曲线上进行一次长途徒步.你最终会成为一个更好的程序员.
我希望我能在一年前给自己一些建议:加入Apple的Core Audio邮件列表.
============== 2014年编辑===========
我现在正在使用(并积极参与)Tonic音频合成库.如果我自己也不这么说的话,真棒.