Add Dsp::GetSamples and cleanup Apu

This commit is contained in:
scawful
2024-04-20 08:09:43 -04:00
parent a2bda63ae5
commit e89d768b5a
4 changed files with 22 additions and 595 deletions

View File

@@ -20,14 +20,6 @@ namespace audio {
void Apu::Init() {
// Set the clock frequency
clock_.SetFrequency(kApuClockSpeed);
// Initialize Digital Signal Processor Callbacks
dsp_.SetSampleFetcher([this](uint16_t address) -> uint8_t {
return this->FetchSampleFromRam(address);
});
dsp_.SetSamplePusher(
[this](int16_t sample) { this->PushToAudioBuffer(sample); });
}
void Apu::Reset() {
@@ -40,16 +32,11 @@ void Apu::Update() {
auto cycles_to_run = clock_.GetCycleCount();
for (auto i = 0; i < cycles_to_run; ++i) {
// Update the Apu
UpdateChannelSettings();
// Update the SPC700
uint8_t opcode = spc700_.read(spc700_.PC);
spc700_.ExecuteInstructions(opcode);
spc700_.PC++;
}
ProcessSamples();
}
void Apu::Notify(uint32_t address, uint16_t data) {
@@ -60,7 +47,6 @@ void Apu::Notify(uint32_t address, uint16_t data) {
spc700_.write(offset, data);
// HACK - This is a temporary solution to get the Apu to play audio
SDL_Log("Apu::Notify(0x%08x, 0x%04x)", address, data);
ports_[address - 0x2140] = data;
switch (address) {
case 0x2140:
@@ -78,64 +64,6 @@ void Apu::Notify(uint32_t address, uint16_t data) {
}
}
void Apu::ProcessSamples() {
// Fetch sample data from AudioRam
// Iterate over all voices
for (uint8_t voice_num = 0; voice_num < 8; voice_num++) {
// Fetch the sample data for the current voice from AudioRam
uint8_t sample = FetchSampleForVoice(voice_num);
// Process the sample through DSP
int16_t processed_sample = dsp_.ProcessSample(voice_num, sample);
// Add the processed sample to the audio buffer
audio_samples_.push_back(processed_sample);
}
}
uint8_t Apu::FetchSampleForVoice(uint8_t voice_num) {
uint16_t address = CalculateAddressForVoice(voice_num);
return aram_.read(address);
}
uint16_t Apu::CalculateAddressForVoice(uint8_t voice_num) {
// TODO: Calculate the address for the specified voice
return voice_num;
}
int16_t Apu::GetNextSample() {
if (!audio_samples_.empty()) {
int16_t sample = audio_samples_.front();
audio_samples_.erase(audio_samples_.begin());
return sample;
}
return 0; // TODO: Return the last sample instead of 0.
}
const std::vector<int16_t>& Apu::GetAudioSamples() const {
return audio_samples_;
}
void Apu::UpdateChannelSettings() {
// TODO: Implement this method to update the channel settings.
}
int16_t Apu::GenerateSample(int channel) {
// TODO: Implement this method to generate a sample for the specified channel.
}
void Apu::ApplyEnvelope(int channel) {
// TODO: Implement this method to apply an envelope to the specified channel.
}
uint8_t Apu::ReadDspMemory(uint16_t address) {
return dsp_.ReadGlobalReg(address);
}
void Apu::WriteDspMemory(uint16_t address, uint8_t value) {
dsp_.WriteGlobalReg(address, value);
}
} // namespace audio
} // namespace emu
} // namespace app

View File

@@ -17,12 +17,6 @@ namespace audio {
using namespace memory;
/**
*
*
*/
const int kApuClockSpeed = 1024000; // 1.024 MHz
const int apuSampleRate = 32000; // 32 KHz
const int apuClocksPerSample = 64; // 64 clocks per sample
@@ -62,16 +56,10 @@ class Apu : public Observer {
void Update();
void Notify(uint32_t address, uint16_t data) override;
void ProcessSamples();
uint8_t FetchSampleForVoice(uint8_t voice_num);
uint16_t CalculateAddressForVoice(uint8_t voice_num);
int16_t GetNextSample();
// Called upon a reset
void Initialize() {
spc700_.Reset();
dsp_.Reset();
SignalReady();
}
// Set Port 0 = $AA and Port 1 = $BB
@@ -100,16 +88,7 @@ class Apu : public Observer {
void UpdateClock(int delta_time) { clock_.UpdateClock(delta_time); }
// Method to fetch a sample from AudioRam
uint8_t FetchSampleFromRam(uint16_t address) const {
return aram_.read(address);
}
// Method to push a processed sample to the audio buffer
void PushToAudioBuffer(int16_t sample) { audio_samples_.push_back(sample); }
// Returns the audio samples for the current frame
const std::vector<int16_t> &GetAudioSamples() const;
auto dsp() -> Dsp & { return dsp_; }
private:
// Constants for communication
@@ -120,29 +99,13 @@ class Apu : public Observer {
// Port buffers (equivalent to $2140 to $2143 for the main CPU)
uint8_t ports_[4] = {0};
// Updates internal state based on APU register settings
void UpdateChannelSettings();
// Generates a sample for an audio channel
int16_t GenerateSample(int channel);
// Applies an envelope to an audio channel
void ApplyEnvelope(int channel);
// Handles DSP (Digital Signal Processor) memory reads and writes
uint8_t ReadDspMemory(uint16_t address);
void WriteDspMemory(uint16_t address, uint8_t value);
// Member variables to store internal APU state and resources
AudioRam &aram_;
Clock &clock_;
MemoryImpl &memory_;
DigitalSignalProcessor dsp_;
Dsp dsp_;
Spc700 spc700_{aram_};
std::vector<int16_t> audio_samples_;
std::function<void()> ready_callback_;
};
} // namespace audio

View File

@@ -7,282 +7,21 @@ namespace app {
namespace emu {
namespace audio {
void DigitalSignalProcessor::Reset() {}
void Dsp::Reset() {}
uint8_t DigitalSignalProcessor::ReadVoiceReg(uint8_t voice, uint8_t reg) const {
voice %= kNumVoices;
switch (reg % kNumVoiceRegs) {
case 0:
return voices_[voice].vol_left;
case 1:
return voices_[voice].vol_right;
case 2:
return voices_[voice].pitch_low;
case 3:
return voices_[voice].pitch_high;
case 4:
return voices_[voice].source_number;
case 5:
return voices_[voice].adsr1;
case 6:
return voices_[voice].adsr2;
case 7:
return voices_[voice].gain;
case 8:
return voices_[voice].envx;
case 9:
return voices_[voice].outx;
default:
return 0; // This shouldn't happen, but it's good to have a default
// case
void Dsp::GetSamples(int16_t* sample_data, int samples_per_frame,
bool pal_timing) {
// resample from 534 / 641 samples per frame to wanted value
float wantedSamples = (pal_timing ? 641.0 : 534.0);
double adder = wantedSamples / samples_per_frame;
double location = sample_offset_ - wantedSamples;
for (int i = 0; i < samples_per_frame; i++) {
sample_data[i * 2] = sample_buffer_[(((int)location) & 0x3ff) * 2];
sample_data[i * 2 + 1] = sample_buffer_[(((int)location) & 0x3ff) * 2 + 1];
location += adder;
}
}
void DigitalSignalProcessor::WriteVoiceReg(uint8_t voice, uint8_t reg,
uint8_t value) {
voice %= kNumVoices;
switch (reg % kNumVoiceRegs) {
case 0:
voices_[voice].vol_left = static_cast<int8_t>(value);
break;
case 1:
voices_[voice].vol_right = static_cast<int8_t>(value);
break;
case 2:
voices_[voice].pitch_low = value;
break;
case 3:
voices_[voice].pitch_high = value;
break;
case 4:
voices_[voice].source_number = value;
break;
case 5:
voices_[voice].adsr1 = value;
break;
case 6:
voices_[voice].adsr2 = value;
break;
case 7:
voices_[voice].gain = value;
break;
// Note: envx and outx are read-only, so they don't have cases here
}
}
// Set the callbacks
void DigitalSignalProcessor::SetSampleFetcher(SampleFetcher fetcher) {
sample_fetcher_ = fetcher;
}
void DigitalSignalProcessor::SetSamplePusher(SamplePusher pusher) {
sample_pusher_ = pusher;
}
int16_t DigitalSignalProcessor::DecodeSample(uint8_t voice_num) {
Voice const& voice = voices_[voice_num];
uint16_t sample_address = voice.source_number;
// Use the callback to fetch the sample
int16_t sample = static_cast<int16_t>(sample_fetcher_(sample_address) << 8);
return sample;
}
int16_t DigitalSignalProcessor::ProcessSample(uint8_t voice_num,
int16_t sample) {
Voice const& voice = voices_[voice_num];
// Adjust the pitch (for simplicity, we're just adjusting the sample value)
sample += voice.pitch_low + (voice.pitch_high << 8);
// Apply volume (separate for left and right for stereo sound)
int16_t left_sample = (sample * voice.vol_left) / 255;
int16_t right_sample = (sample * voice.vol_right) / 255;
// Combine stereo samples into a single 16-bit value
return (left_sample + right_sample) / 2;
}
void DigitalSignalProcessor::MixSamples() {
int16_t mixed_sample = 0;
for (uint8_t i = 0; i < kNumVoices; i++) {
int16_t decoded_sample = DecodeSample(i);
int16_t processed_sample = ProcessSample(i, decoded_sample);
mixed_sample += processed_sample;
}
// Clamp the mixed sample to 16-bit range
if (mixed_sample > 32767) {
mixed_sample = 32767;
} else if (mixed_sample < -32768) {
mixed_sample = -32768;
}
// Use the callback to push the mixed sample
sample_pusher_(mixed_sample);
}
void DigitalSignalProcessor::UpdateEnvelope(uint8_t voice) {
uint8_t adsr1 = ReadVoiceReg(voice, 0x05);
uint8_t adsr2 = ReadVoiceReg(voice, 0x06);
uint8_t gain = ReadVoiceReg(voice, 0x07);
uint8_t enableADSR = (adsr1 & 0x80) >> 7;
if (enableADSR) {
// Handle ADSR envelope
Voice& voice_obj = voices_[voice];
switch (voice_obj.state) {
case VoiceState::ATTACK:
// Update amplitude based on attack rate
voice_obj.current_amplitude += AttackRate(adsr1);
if (voice_obj.current_amplitude >= ENVELOPE_MAX) {
voice_obj.current_amplitude = ENVELOPE_MAX;
voice_obj.state = VoiceState::DECAY;
}
break;
case VoiceState::DECAY:
// Update amplitude based on decay rate
voice_obj.current_amplitude -= DecayRate(adsr2);
if (voice_obj.current_amplitude <= voice_obj.decay_level) {
voice_obj.current_amplitude = voice_obj.decay_level;
voice_obj.state = VoiceState::SUSTAIN;
}
break;
case VoiceState::SUSTAIN:
// Keep amplitude at the calculated decay level
voice_obj.current_amplitude = voice_obj.decay_level;
break;
case VoiceState::RELEASE:
// Update amplitude based on release rate
voice_obj.current_amplitude -= ReleaseRate(adsr2);
if (voice_obj.current_amplitude <= 0) {
voice_obj.current_amplitude = 0;
voice_obj.state = VoiceState::OFF;
}
break;
default:
break;
}
} else {
// Handle Gain envelope
// Extract mode from the gain byte
uint8_t mode = (gain & 0xE0) >> 5;
uint8_t rate = gain & 0x1F;
Voice& voice_obj = voices_[voice];
switch (mode) {
case 0: // Direct Designation
case 1:
case 2:
case 3:
voice_obj.current_amplitude =
rate << 3; // Multiplying by 8 to scale to 0-255
break;
case 6: // Increase Mode (Linear)
voice_obj.current_amplitude += gainTimings[0][rate];
if (voice_obj.current_amplitude > ENVELOPE_MAX) {
voice_obj.current_amplitude = ENVELOPE_MAX;
}
break;
case 7: // Increase Mode (Bent Line)
// Hypothetical behavior: Increase linearly at first, then increase
// more slowly You'll likely need to adjust this based on your
// specific requirements
if (voice_obj.current_amplitude < (ENVELOPE_MAX / 2)) {
voice_obj.current_amplitude += gainTimings[1][rate];
} else {
voice_obj.current_amplitude += gainTimings[1][rate] / 2;
}
if (voice_obj.current_amplitude > ENVELOPE_MAX) {
voice_obj.current_amplitude = ENVELOPE_MAX;
}
break;
case 4: // Decrease Mode (Linear)
if (voice_obj.current_amplitude < gainTimings[2][rate]) {
voice_obj.current_amplitude = 0;
} else {
voice_obj.current_amplitude -= gainTimings[2][rate];
}
break;
case 5: // Decrease Mode (Exponential)
voice_obj.current_amplitude -=
(voice_obj.current_amplitude * gainTimings[3][rate]) / ENVELOPE_MAX;
break;
default:
// Default behavior can be handled here if necessary
break;
}
}
}
void DigitalSignalProcessor::update_voice_state(uint8_t voice_num) {
if (voice_num >= kNumVoices) return;
Voice& voice = voices_[voice_num];
switch (voice.state) {
case VoiceState::OFF:
// Reset current amplitude
voice.current_amplitude = 0;
break;
case VoiceState::ATTACK:
// Increase the current amplitude at a rate defined by the ATTACK
// setting
voice.current_amplitude += AttackRate(voice.adsr1);
if (voice.current_amplitude >= ENVELOPE_MAX) {
voice.current_amplitude = ENVELOPE_MAX;
voice.state = VoiceState::DECAY;
voice.decay_level = CalculateDecayLevel(voice.adsr2);
}
break;
case VoiceState::DECAY:
// Decrease the current amplitude at a rate defined by the DECAY setting
voice.current_amplitude -= DecayRate(voice.adsr2);
if (voice.current_amplitude <= voice.decay_level) {
voice.current_amplitude = voice.decay_level;
voice.state = VoiceState::SUSTAIN;
}
break;
case VoiceState::SUSTAIN:
// Keep the current amplitude at the decay level
break;
case VoiceState::RELEASE:
// Decrease the current amplitude at a rate defined by the RELEASE
// setting
voice.current_amplitude -= ReleaseRate(voice.adsr2);
if (voice.current_amplitude == 0) {
voice.state = VoiceState::OFF;
}
break;
}
}
void DigitalSignalProcessor::process_envelope(uint8_t voice_num) {
if (voice_num >= kNumVoices) return;
Voice& voice = voices_[voice_num];
// Update the voice state first (based on keys, etc.)
update_voice_state(voice_num);
// Calculate the envelope value based on the current amplitude
voice.envx = calculate_envelope_value(voice.current_amplitude);
// Apply the envelope value to the audio output
apply_envelope_to_output(voice_num);
}
} // namespace audio
} // namespace emu
} // namespace app

View File

@@ -12,9 +12,6 @@ namespace app {
namespace emu {
namespace audio {
using SampleFetcher = std::function<uint8_t(uint16_t)>;
using SamplePusher = std::function<void(int16_t)>;
/**
* The S-DSP is a digital signal processor generating the sound data.
*
@@ -49,8 +46,16 @@ using SamplePusher = std::function<void(int16_t)>;
* | | | | multiplied by ENVX, before applying VOL. |
*/
class DigitalSignalProcessor {
class Dsp {
public:
void Reset();
void GetSamples(int16_t* sample_data, int samples_per_frame, bool pal_timing);
private:
int16_t sample_buffer_[0x400 * 2]; // (1024 samples, *2 for stereo)
int16_t sample_offset_; // current offset in samplebuffer
static const size_t kNumVoices = 8;
static const size_t kNumVoiceRegs = 10;
static const size_t kNumGlobalRegs = 15;
@@ -97,214 +102,6 @@ class DigitalSignalProcessor {
// Decay times in ms
const std::vector<uint32_t> decayTimes = {1200, 740, 440, 290,
180, 110, 74, 37};
// Release times in ms
const std::vector<uint32_t> releaseTimes = {
// "Infinite" is represented by a large value, e.g., UINT32_MAX
UINT32_MAX, 38000, 28000, 24000, 19000, 14000, 12000, 9400,
7100, 5900, 4700, 3500, 2900, 2400, 1800, 1500,
1200, 880, 740, 590, 440, 370, 290, 220,
180, 150, 110, 92, 74, 55, 37, 18};
// Gain timings for decrease linear, decrease exponential, etc.
// Organized by mode: [Linear Increase, Bentline Increase, Linear Decrease,
// Exponential Decrease]
const std::vector<std::vector<uint32_t>> gainTimings = {
{UINT32_MAX, 3100, 2600, 2000, 1500, 1300, 1000, 770, 640, 510, 380,
320, 260, 190, 160, 130, 96, 80, 64, 48, 40, 32,
24, 20, 16, 12, 10, 8, 6, 4, 2},
{UINT32_MAX, 5400, 4600, 3500, 2600, 2300, 1800, 1300, 1100, 900,
670, 560, 450, 340, 280, 220, 170, 140, 110, 84,
70, 56, 42, 35, 28, 21, 18, 14, 11, 7,
/*3.5=*/3},
// Repeating the Linear Increase timings for Linear Decrease, since they
// are the same.
{UINT32_MAX, 3100, 2600, 2000, 1500, 1300, 1000, 770, 640, 510, 380,
320, 260, 190, 160, 130, 96, 80, 64, 48, 40, 32,
24, 20, 16, 12, 10, 8, 6, 4, 2},
{UINT32_MAX, 38000, 28000, 24000, 19000, 14000, 12000, 9400,
7100, 5900, 4700, 3500, 2900, 2400, 1800, 1500,
1200, 880, 740, 590, 440, 370, 290, 220,
180, 150, 110, 92, 55, 37, 18}};
// DSP Period Table
const std::vector<std::vector<uint16_t>> DigitalSignalProcessorPeriodTable = {
// ... Your DSP period table here ...
};
// DSP Period Offset
const std::vector<uint16_t> DigitalSignalProcessorPeriodOffset = {
// ... Your DSP period offsets here ...
};
uint8_t calculate_envelope_value(uint16_t amplitude) const {
// Convert the 16-bit amplitude to an 8-bit envelope value
return amplitude >> 8;
}
void apply_envelope_to_output(uint8_t voice_num) {
Voice& voice = voices_[voice_num];
// Scale the OUTX by the envelope value
// This might be a linear scaling, or more complex operations can be used
voice.outx = (voice.outx * voice.envx) / 255;
}
SampleFetcher sample_fetcher_;
SamplePusher sample_pusher_;
public:
DigitalSignalProcessor() = default;
void Reset();
void SetSampleFetcher(std::function<uint8_t(uint16_t)> fetcher);
void SetSamplePusher(std::function<void(int16_t)> pusher);
// Read a byte from a voice register
uint8_t ReadVoiceReg(uint8_t voice, uint8_t reg) const;
// Write a byte to a voice register
void WriteVoiceReg(uint8_t voice, uint8_t reg, uint8_t value);
// Read a byte from a global register
uint8_t ReadGlobalReg(uint8_t reg) const {
return globalRegs[reg % kNumGlobalRegs];
}
// Write a byte to a global register
void WriteGlobalReg(uint8_t reg, uint8_t value) {
globalRegs[reg % kNumGlobalRegs] = value;
}
int16_t DecodeSample(uint8_t voice_num);
int16_t ProcessSample(uint8_t voice_num, int16_t sample);
void MixSamples();
// Trigger a voice to start playing
void trigger_voice(uint8_t voice_num) {
if (voice_num >= kNumVoices) return;
Voice& voice = voices_[voice_num];
voice.state = VoiceState::ATTACK;
// Initialize other state management variables if needed
}
// Release a voice (e.g., note release in ADSR)
void release_voice(uint8_t voice_num) {
if (voice_num >= kNumVoices) return;
Voice& voice = voices_[voice_num];
if (voice.state != VoiceState::OFF) {
voice.state = VoiceState::RELEASE;
}
// Update other state management variables if needed
}
// Calculate envelope for a given voice
void UpdateEnvelope(uint8_t voice);
// Voice-related functions (implementations)
void set_voice_volume(int voice_num, int8_t left, int8_t right) {
voices_[voice_num].vol_left = left;
voices_[voice_num].vol_right = right;
}
void set_voice_pitch(int voice_num, uint16_t pitch) {
voices_[voice_num].pitch_low = pitch & 0xFF;
voices_[voice_num].pitch_high = (pitch >> 8) & 0xFF;
}
void set_voice_source_number(int voice_num, uint8_t srcn) {
voices_[voice_num].source_number = srcn;
}
void set_voice_adsr(int voice_num, uint8_t adsr1, uint8_t adsr2) {
voices_[voice_num].adsr1 = adsr1;
voices_[voice_num].adsr2 = adsr2;
}
void set_voice_gain(int voice_num, uint8_t gain) {
voices_[voice_num].gain = gain;
}
uint8_t read_voice_envx(int voice_num) { return voices_[voice_num].envx; }
int8_t read_voice_outx(int voice_num) { return voices_[voice_num].outx; }
// Global DSP functions
void set_master_volume(int8_t left, int8_t right) {
mvol_left = left;
mvol_right = right;
}
void set_echo_volume(int8_t left, int8_t right) {
evol_left = left;
evol_right = right;
}
void update_voice_state(uint8_t voice_num);
// Override the key_on and key_off methods to utilize the new state management
void key_on(uint8_t value) {
for (uint8_t i = 0; i < kNumVoices; i++) {
if (value & (1 << i)) {
trigger_voice(i);
}
}
}
void key_off(uint8_t value) {
for (uint8_t i = 0; i < kNumVoices; i++) {
if (value & (1 << i)) {
release_voice(i);
}
}
}
void set_flags(uint8_t value) {
flags = value;
// More logic may be needed here depending on flag behaviors
}
uint8_t read_endx() { return endx; }
uint16_t AttackRate(uint8_t adsr1) {
// Convert the ATTACK portion of adsr1 into a rate of amplitude change
// You might need to adjust this logic based on the exact ADSR
// implementation details
return (adsr1 & 0x0F) * 16; // Just a hypothetical conversion
}
uint16_t DecayRate(uint8_t adsr2) {
// Convert the DECAY portion of adsr2 into a rate of amplitude change
return ((adsr2 >> 4) & 0x07) * 8; // Hypothetical conversion
}
uint16_t ReleaseRate(uint8_t adsr2) {
// Convert the RELEASE portion of adsr2 into a rate of amplitude change
return (adsr2 & 0x0F) * 16; // Hypothetical conversion
}
uint16_t CalculateDecayLevel(uint8_t adsr2) {
// Calculate the decay level based on the SUSTAIN portion of adsr2
// This is the level the amplitude will decay to before entering the SUSTAIN
// phase Again, adjust based on your implementation details
return ((adsr2 >> 4) & 0x07) * 256; // Hypothetical conversion
}
// Envelope processing for all voices
// Goes through each voice and processes its envelope.
void process_envelopes() {
for (size_t i = 0; i < kNumVoices; ++i) {
process_envelope(i);
}
}
// Envelope processing for a specific voice
// For a given voice, update its state (ADSR), calculate the envelope value,
// and apply the envelope to the audio output.
void process_envelope(uint8_t voice_num);
};
} // namespace audio