early-access version 1831

This commit is contained in:
pineappleEA 2021-06-27 07:28:57 +02:00
parent 0826a8be7d
commit 9e3b1eea80
10 changed files with 165 additions and 146 deletions

View file

@ -1,7 +1,7 @@
yuzu emulator early access yuzu emulator early access
============= =============
This is the source code for early-access 1830. This is the source code for early-access 1831.
## Legal Notice ## Legal Notice

View file

@ -1,4 +1,4 @@
if (MINGW OR (${CMAKE_SYSTEM_NAME} MATCHES "Linux")) if (MINGW OR (${CMAKE_SYSTEM_NAME} MATCHES "Linux") OR APPLE)
set(LIBUSB_FOUND ON CACHE BOOL "libusb is present" FORCE) set(LIBUSB_FOUND ON CACHE BOOL "libusb is present" FORCE)
set(LIBUSB_VERSION "1.0.24" CACHE STRING "libusb version string" FORCE) set(LIBUSB_VERSION "1.0.24" CACHE STRING "libusb version string" FORCE)

View file

@ -69,7 +69,7 @@ namespace {
} // namespace } // namespace
namespace AudioCore { namespace AudioCore {
constexpr s32 NUM_BUFFERS = 2; constexpr s32 NUM_BUFFERS = 4;
AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_, AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_,
AudioCommon::AudioRendererParameter params, AudioCommon::AudioRendererParameter params,
@ -129,87 +129,85 @@ Stream::State AudioRenderer::GetStreamState() const {
ResultCode AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params, ResultCode AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params,
std::vector<u8>& output_params) { std::vector<u8>& output_params) {
{ std::scoped_lock lock{mutex};
std::scoped_lock lock{mutex}; InfoUpdater info_updater{input_params, output_params, behavior_info};
InfoUpdater info_updater{input_params, output_params, behavior_info};
if (!info_updater.UpdateBehaviorInfo(behavior_info)) { if (!info_updater.UpdateBehaviorInfo(behavior_info)) {
LOG_ERROR(Audio, "Failed to update behavior info input parameters"); LOG_ERROR(Audio, "Failed to update behavior info input parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (!info_updater.UpdateMemoryPools(memory_pool_info)) {
LOG_ERROR(Audio, "Failed to update memory pool parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (!info_updater.UpdateVoiceChannelResources(voice_context)) {
LOG_ERROR(Audio, "Failed to update voice channel resource parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (!info_updater.UpdateVoices(voice_context, memory_pool_info, 0)) {
LOG_ERROR(Audio, "Failed to update voice parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
// TODO(ogniK): Deal with stopped audio renderer but updates still taking place
if (!info_updater.UpdateEffects(effect_context, true)) {
LOG_ERROR(Audio, "Failed to update effect parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (behavior_info.IsSplitterSupported()) {
if (!info_updater.UpdateSplitterInfo(splitter_context)) {
LOG_ERROR(Audio, "Failed to update splitter parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS; return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
} }
}
if (!info_updater.UpdateMemoryPools(memory_pool_info)) { const auto mix_result = info_updater.UpdateMixes(mix_context, worker_params.mix_buffer_count,
LOG_ERROR(Audio, "Failed to update memory pool parameters"); splitter_context, effect_context);
if (mix_result.IsError()) {
LOG_ERROR(Audio, "Failed to update mix parameters");
return mix_result;
}
// TODO(ogniK): Sinks
if (!info_updater.UpdateSinks(sink_context)) {
LOG_ERROR(Audio, "Failed to update sink parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
// TODO(ogniK): Performance buffer
if (!info_updater.UpdatePerformanceBuffer()) {
LOG_ERROR(Audio, "Failed to update performance buffer parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (!info_updater.UpdateErrorInfo(behavior_info)) {
LOG_ERROR(Audio, "Failed to update error info");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (behavior_info.IsElapsedFrameCountSupported()) {
if (!info_updater.UpdateRendererInfo(elapsed_frame_count)) {
LOG_ERROR(Audio, "Failed to update renderer info");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS; return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
} }
}
// TODO(ogniK): Statistics
if (!info_updater.UpdateVoiceChannelResources(voice_context)) { if (!info_updater.WriteOutputHeader()) {
LOG_ERROR(Audio, "Failed to update voice channel resource parameters"); LOG_ERROR(Audio, "Failed to write output header");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS; return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
} }
if (!info_updater.UpdateVoices(voice_context, memory_pool_info, 0)) { // TODO(ogniK): Check when all sections are implemented
LOG_ERROR(Audio, "Failed to update voice parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
// TODO(ogniK): Deal with stopped audio renderer but updates still taking place if (!info_updater.CheckConsumedSize()) {
if (!info_updater.UpdateEffects(effect_context, true)) { LOG_ERROR(Audio, "Audio buffers were not consumed!");
LOG_ERROR(Audio, "Failed to update effect parameters"); return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (behavior_info.IsSplitterSupported()) {
if (!info_updater.UpdateSplitterInfo(splitter_context)) {
LOG_ERROR(Audio, "Failed to update splitter parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
}
const auto mix_result = info_updater.UpdateMixes(
mix_context, worker_params.mix_buffer_count, splitter_context, effect_context);
if (mix_result.IsError()) {
LOG_ERROR(Audio, "Failed to update mix parameters");
return mix_result;
}
// TODO(ogniK): Sinks
if (!info_updater.UpdateSinks(sink_context)) {
LOG_ERROR(Audio, "Failed to update sink parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
// TODO(ogniK): Performance buffer
if (!info_updater.UpdatePerformanceBuffer()) {
LOG_ERROR(Audio, "Failed to update performance buffer parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (!info_updater.UpdateErrorInfo(behavior_info)) {
LOG_ERROR(Audio, "Failed to update error info");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (behavior_info.IsElapsedFrameCountSupported()) {
if (!info_updater.UpdateRendererInfo(elapsed_frame_count)) {
LOG_ERROR(Audio, "Failed to update renderer info");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
}
// TODO(ogniK): Statistics
if (!info_updater.WriteOutputHeader()) {
LOG_ERROR(Audio, "Failed to write output header");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
// TODO(ogniK): Check when all sections are implemented
if (!info_updater.CheckConsumedSize()) {
LOG_ERROR(Audio, "Audio buffers were not consumed!");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
} }
return ResultSuccess; return ResultSuccess;
} }
@ -234,10 +232,8 @@ void AudioRenderer::QueueMixedBuffer(Buffer::Tag tag) {
command_generator.PostCommand(); command_generator.PostCommand();
// Base sample size // Base sample size
std::size_t BUFFER_SIZE{worker_params.sample_count}; std::size_t BUFFER_SIZE{worker_params.sample_count};
// Samples // Samples, making sure to clear
std::vector<s16> buffer(BUFFER_SIZE * stream->GetNumChannels()); std::vector<s16> buffer(BUFFER_SIZE * stream->GetNumChannels(), 0);
// Make sure to clear our samples
std::memset(buffer.data(), 0, buffer.size() * sizeof(s16));
if (sink_context.InUse()) { if (sink_context.InUse()) {
const auto stream_channel_count = stream->GetNumChannels(); const auto stream_channel_count = stream->GetNumChannels();

View file

@ -795,7 +795,7 @@ void CommandGenerator::UpdateI3dl2Reverb(I3dl2ReverbParams& info, I3dl2ReverbSta
state.lowpass_1 = 0.0f; state.lowpass_1 = 0.0f;
} else { } else {
const auto a = 1.0f - hf_gain; const auto a = 1.0f - hf_gain;
const auto b = 2.0f * (1.0f - hf_gain * CosD(256.0f * info.hf_reference / const auto b = 2.0f * (2.0f - hf_gain * CosD(256.0f * info.hf_reference /
static_cast<f32>(info.sample_rate))); static_cast<f32>(info.sample_rate)));
const auto c = std::sqrt(b * b - 4.0f * a * a); const auto c = std::sqrt(b * b - 4.0f * a * a);
@ -843,7 +843,7 @@ void CommandGenerator::UpdateI3dl2Reverb(I3dl2ReverbParams& info, I3dl2ReverbSta
} }
const auto max_early_delay = state.early_delay_line.GetMaxDelay(); const auto max_early_delay = state.early_delay_line.GetMaxDelay();
const auto reflection_time = 1000.0f * (0.0098f * info.reverb_delay + 0.02f); const auto reflection_time = 1000.0f * (0.9998f * info.reverb_delay + 0.02f);
for (std::size_t tap = 0; tap < AudioCommon::I3DL2REVERB_TAPS; tap++) { for (std::size_t tap = 0; tap < AudioCommon::I3DL2REVERB_TAPS; tap++) {
const auto length = AudioCommon::CalculateDelaySamples( const auto length = AudioCommon::CalculateDelaySamples(
sample_rate, 1000.0f * info.reflection_delay + reflection_time * EARLY_TAP_TIMES[tap]); sample_rate, 1000.0f * info.reflection_delay + reflection_time * EARLY_TAP_TIMES[tap]);
@ -1004,7 +1004,8 @@ void CommandGenerator::GenerateFinalMixCommand() {
} }
s32 CommandGenerator::DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 CommandGenerator::DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_state,
s32 sample_count, s32 channel, std::size_t mix_offset) { s32 sample_start_offset, s32 sample_end_offset, s32 sample_count,
s32 channel, std::size_t mix_offset) {
const auto& in_params = voice_info.GetInParams(); const auto& in_params = voice_info.GetInParams();
const auto& wave_buffer = in_params.wave_buffer[dsp_state.wave_buffer_index]; const auto& wave_buffer = in_params.wave_buffer[dsp_state.wave_buffer_index];
if (wave_buffer.buffer_address == 0) { if (wave_buffer.buffer_address == 0) {
@ -1013,14 +1014,12 @@ s32 CommandGenerator::DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_s
if (wave_buffer.buffer_size == 0) { if (wave_buffer.buffer_size == 0) {
return 0; return 0;
} }
if (wave_buffer.end_sample_offset < wave_buffer.start_sample_offset) { if (sample_end_offset < sample_start_offset) {
return 0; return 0;
} }
const auto samples_remaining = const auto samples_remaining = (sample_end_offset - sample_start_offset) - dsp_state.offset;
(wave_buffer.end_sample_offset - wave_buffer.start_sample_offset) - dsp_state.offset;
const auto start_offset = const auto start_offset =
((wave_buffer.start_sample_offset + dsp_state.offset) * in_params.channel_count) * ((dsp_state.offset + sample_start_offset) * in_params.channel_count) * sizeof(s16);
sizeof(s16);
const auto buffer_pos = wave_buffer.buffer_address + start_offset; const auto buffer_pos = wave_buffer.buffer_address + start_offset;
const auto samples_processed = std::min(sample_count, samples_remaining); const auto samples_processed = std::min(sample_count, samples_remaining);
@ -1044,8 +1043,8 @@ s32 CommandGenerator::DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_s
} }
s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_state,
s32 sample_count, [[maybe_unused]] s32 channel, s32 sample_start_offset, s32 sample_end_offset, s32 sample_count,
std::size_t mix_offset) { [[maybe_unused]] s32 channel, std::size_t mix_offset) {
const auto& in_params = voice_info.GetInParams(); const auto& in_params = voice_info.GetInParams();
const auto& wave_buffer = in_params.wave_buffer[dsp_state.wave_buffer_index]; const auto& wave_buffer = in_params.wave_buffer[dsp_state.wave_buffer_index];
if (wave_buffer.buffer_address == 0) { if (wave_buffer.buffer_address == 0) {
@ -1054,7 +1053,7 @@ s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_s
if (wave_buffer.buffer_size == 0) { if (wave_buffer.buffer_size == 0) {
return 0; return 0;
} }
if (wave_buffer.end_sample_offset < wave_buffer.start_sample_offset) { if (sample_end_offset < sample_start_offset) {
return 0; return 0;
} }
@ -1079,10 +1078,9 @@ s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_s
s32 coef1 = coeffs[idx * 2]; s32 coef1 = coeffs[idx * 2];
s32 coef2 = coeffs[idx * 2 + 1]; s32 coef2 = coeffs[idx * 2 + 1];
const auto samples_remaining = const auto samples_remaining = (sample_end_offset - sample_start_offset) - dsp_state.offset;
(wave_buffer.end_sample_offset - wave_buffer.start_sample_offset) - dsp_state.offset;
const auto samples_processed = std::min(sample_count, samples_remaining); const auto samples_processed = std::min(sample_count, samples_remaining);
const auto sample_pos = wave_buffer.start_sample_offset + dsp_state.offset; const auto sample_pos = dsp_state.offset + sample_start_offset;
const auto samples_remaining_in_frame = sample_pos % SAMPLES_PER_FRAME; const auto samples_remaining_in_frame = sample_pos % SAMPLES_PER_FRAME;
auto position_in_frame = ((sample_pos / SAMPLES_PER_FRAME) * NIBBLES_PER_SAMPLE) + auto position_in_frame = ((sample_pos / SAMPLES_PER_FRAME) * NIBBLES_PER_SAMPLE) +
@ -1210,9 +1208,8 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* o
} }
std::size_t temp_mix_offset{}; std::size_t temp_mix_offset{};
bool is_buffer_completed{false};
auto samples_remaining = sample_count; auto samples_remaining = sample_count;
while (samples_remaining > 0 && !is_buffer_completed) { while (samples_remaining > 0) {
const auto samples_to_output = std::min(samples_remaining, min_required_samples); const auto samples_to_output = std::min(samples_remaining, min_required_samples);
const auto samples_to_read = (samples_to_output * resample_rate + dsp_state.fraction) >> 15; const auto samples_to_read = (samples_to_output * resample_rate + dsp_state.fraction) >> 15;
@ -1229,7 +1226,6 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* o
const auto& wave_buffer = in_params.wave_buffer[dsp_state.wave_buffer_index]; const auto& wave_buffer = in_params.wave_buffer[dsp_state.wave_buffer_index];
// No more data can be read // No more data can be read
if (!dsp_state.is_wave_buffer_valid[dsp_state.wave_buffer_index]) { if (!dsp_state.is_wave_buffer_valid[dsp_state.wave_buffer_index]) {
is_buffer_completed = true;
break; break;
} }
@ -1238,15 +1234,29 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* o
// TODO(ogniK): ADPCM loop context // TODO(ogniK): ADPCM loop context
} }
s32 samples_offset_start;
s32 samples_offset_end;
if (dsp_state.loop_count > 0 ||
(wave_buffer.loop_start_sample != 0 && wave_buffer.loop_end_sample != 0 &&
wave_buffer.loop_start_sample <= wave_buffer.loop_end_sample)) {
samples_offset_start = wave_buffer.loop_start_sample;
samples_offset_end = wave_buffer.loop_end_sample;
} else {
samples_offset_start = wave_buffer.start_sample_offset;
samples_offset_end = wave_buffer.end_sample_offset;
}
s32 samples_decoded{0}; s32 samples_decoded{0};
switch (in_params.sample_format) { switch (in_params.sample_format) {
case SampleFormat::Pcm16: case SampleFormat::Pcm16:
samples_decoded = DecodePcm16(voice_info, dsp_state, samples_to_read - samples_read, samples_decoded =
channel, temp_mix_offset); DecodePcm16(voice_info, dsp_state, samples_offset_start, samples_offset_end,
samples_to_read - samples_read, channel, temp_mix_offset);
break; break;
case SampleFormat::Adpcm: case SampleFormat::Adpcm:
samples_decoded = DecodeAdpcm(voice_info, dsp_state, samples_to_read - samples_read, samples_decoded =
channel, temp_mix_offset); DecodeAdpcm(voice_info, dsp_state, samples_offset_start, samples_offset_end,
samples_to_read - samples_read, channel, temp_mix_offset);
break; break;
default: default:
UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format); UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
@ -1257,15 +1267,19 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* o
dsp_state.offset += samples_decoded; dsp_state.offset += samples_decoded;
dsp_state.played_sample_count += samples_decoded; dsp_state.played_sample_count += samples_decoded;
if (dsp_state.offset >= if (dsp_state.offset >= (samples_offset_end - samples_offset_start) ||
(wave_buffer.end_sample_offset - wave_buffer.start_sample_offset) ||
samples_decoded == 0) { samples_decoded == 0) {
// Reset our sample offset // Reset our sample offset
dsp_state.offset = 0; dsp_state.offset = 0;
if (wave_buffer.is_looping) { if (wave_buffer.is_looping) {
if (samples_decoded == 0) { dsp_state.loop_count++;
if (wave_buffer.loop_count > 0 &&
(dsp_state.loop_count > wave_buffer.loop_count || samples_decoded == 0)) {
// End of our buffer // End of our buffer
is_buffer_completed = true; voice_info.SetWaveBufferCompleted(dsp_state, wave_buffer);
}
if (samples_decoded == 0) {
break; break;
} }
@ -1273,15 +1287,8 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* o
dsp_state.played_sample_count = 0; dsp_state.played_sample_count = 0;
} }
} else { } else {
// Update our wave buffer states // Update our wave buffer states
dsp_state.is_wave_buffer_valid[dsp_state.wave_buffer_index] = false; voice_info.SetWaveBufferCompleted(dsp_state, wave_buffer);
dsp_state.wave_buffer_consumed++;
dsp_state.wave_buffer_index =
(dsp_state.wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
if (wave_buffer.end_of_stream) {
dsp_state.played_sample_count = 0;
}
} }
} }
} }

View file

@ -86,10 +86,10 @@ private:
std::vector<u8>& work_buffer); std::vector<u8>& work_buffer);
void UpdateI3dl2Reverb(I3dl2ReverbParams& info, I3dl2ReverbState& state, bool should_clear); void UpdateI3dl2Reverb(I3dl2ReverbParams& info, I3dl2ReverbState& state, bool should_clear);
// DSP Code // DSP Code
s32 DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 sample_count, s32 DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 sample_start_offset,
s32 channel, std::size_t mix_offset); s32 sample_end_offset, s32 sample_count, s32 channel, std::size_t mix_offset);
s32 DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 sample_count, s32 DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 sample_start_offset,
s32 channel, std::size_t mix_offset); s32 sample_end_offset, s32 sample_count, s32 channel, std::size_t mix_offset);
void DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* output, VoiceState& dsp_state, void DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* output, VoiceState& dsp_state,
s32 channel, s32 target_sample_rate, s32 sample_count, s32 node_id); s32 channel, s32 target_sample_rate, s32 sample_count, s32 node_id);

View file

@ -189,9 +189,6 @@ bool InfoUpdater::UpdateVoices(VoiceContext& voice_context,
if (voice_in_params.is_new) { if (voice_in_params.is_new) {
// Default our values for our voice // Default our values for our voice
voice_info.Initialize(); voice_info.Initialize();
if (channel_count == 0 || channel_count > AudioCommon::MAX_CHANNEL_COUNT) {
continue;
}
// Zero out our voice states // Zero out our voice states
for (std::size_t channel = 0; channel < channel_count; channel++) { for (std::size_t channel = 0; channel < channel_count; channel++) {

View file

@ -66,7 +66,7 @@ void ServerVoiceInfo::Initialize() {
in_params.last_volume = 0.0f; in_params.last_volume = 0.0f;
in_params.biquad_filter.fill({}); in_params.biquad_filter.fill({});
in_params.wave_buffer_count = 0; in_params.wave_buffer_count = 0;
in_params.wave_bufffer_head = 0; in_params.wave_buffer_head = 0;
in_params.mix_id = AudioCommon::NO_MIX; in_params.mix_id = AudioCommon::NO_MIX;
in_params.splitter_info_id = AudioCommon::NO_SPLITTER; in_params.splitter_info_id = AudioCommon::NO_SPLITTER;
in_params.additional_params_address = 0; in_params.additional_params_address = 0;
@ -75,7 +75,7 @@ void ServerVoiceInfo::Initialize() {
out_params.played_sample_count = 0; out_params.played_sample_count = 0;
out_params.wave_buffer_consumed = 0; out_params.wave_buffer_consumed = 0;
in_params.voice_drop_flag = false; in_params.voice_drop_flag = false;
in_params.buffer_mapped = false; in_params.buffer_mapped = true;
in_params.wave_buffer_flush_request_count = 0; in_params.wave_buffer_flush_request_count = 0;
in_params.was_biquad_filter_enabled.fill(false); in_params.was_biquad_filter_enabled.fill(false);
@ -126,7 +126,7 @@ void ServerVoiceInfo::UpdateParameters(const VoiceInfo::InParams& voice_in,
in_params.volume = voice_in.volume; in_params.volume = voice_in.volume;
in_params.biquad_filter = voice_in.biquad_filter; in_params.biquad_filter = voice_in.biquad_filter;
in_params.wave_buffer_count = voice_in.wave_buffer_count; in_params.wave_buffer_count = voice_in.wave_buffer_count;
in_params.wave_bufffer_head = voice_in.wave_buffer_head; in_params.wave_buffer_head = voice_in.wave_buffer_head;
if (behavior_info.IsFlushVoiceWaveBuffersSupported()) { if (behavior_info.IsFlushVoiceWaveBuffersSupported()) {
const auto in_request_count = in_params.wave_buffer_flush_request_count; const auto in_request_count = in_params.wave_buffer_flush_request_count;
const auto voice_request_count = voice_in.wave_buffer_flush_request_count; const auto voice_request_count = voice_in.wave_buffer_flush_request_count;
@ -185,14 +185,16 @@ void ServerVoiceInfo::UpdateWaveBuffers(
wave_buffer.buffer_size = 0; wave_buffer.buffer_size = 0;
wave_buffer.context_address = 0; wave_buffer.context_address = 0;
wave_buffer.context_size = 0; wave_buffer.context_size = 0;
wave_buffer.loop_start_sample = 0;
wave_buffer.loop_end_sample = 0;
wave_buffer.sent_to_dsp = true; wave_buffer.sent_to_dsp = true;
} }
// Mark all our wave buffers as invalid // Mark all our wave buffers as invalid
for (std::size_t channel = 0; channel < static_cast<std::size_t>(in_params.channel_count); for (std::size_t channel = 0; channel < static_cast<std::size_t>(in_params.channel_count);
channel++) { channel++) {
for (auto& is_valid : voice_states[channel]->is_wave_buffer_valid) { for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; ++i) {
is_valid = false; voice_states[channel]->is_wave_buffer_valid[i] = false;
} }
} }
} }
@ -211,7 +213,7 @@ void ServerVoiceInfo::UpdateWaveBuffer(ServerWaveBuffer& out_wavebuffer,
const WaveBuffer& in_wave_buffer, SampleFormat sample_format, const WaveBuffer& in_wave_buffer, SampleFormat sample_format,
bool is_buffer_valid, bool is_buffer_valid,
[[maybe_unused]] BehaviorInfo& behavior_info) { [[maybe_unused]] BehaviorInfo& behavior_info) {
if (!is_buffer_valid && out_wavebuffer.sent_to_dsp) { if (!is_buffer_valid && out_wavebuffer.sent_to_dsp && out_wavebuffer.buffer_address != 0) {
out_wavebuffer.buffer_address = 0; out_wavebuffer.buffer_address = 0;
out_wavebuffer.buffer_size = 0; out_wavebuffer.buffer_size = 0;
} }
@ -239,6 +241,8 @@ void ServerVoiceInfo::UpdateWaveBuffer(ServerWaveBuffer& out_wavebuffer,
out_wavebuffer.buffer_size = in_wave_buffer.buffer_size; out_wavebuffer.buffer_size = in_wave_buffer.buffer_size;
out_wavebuffer.context_address = in_wave_buffer.context_address; out_wavebuffer.context_address = in_wave_buffer.context_address;
out_wavebuffer.context_size = in_wave_buffer.context_size; out_wavebuffer.context_size = in_wave_buffer.context_size;
out_wavebuffer.loop_start_sample = in_wave_buffer.loop_start_sample;
out_wavebuffer.loop_end_sample = in_wave_buffer.loop_end_sample;
in_params.buffer_mapped = in_params.buffer_mapped =
in_wave_buffer.buffer_address != 0 && in_wave_buffer.buffer_size != 0; in_wave_buffer.buffer_address != 0 && in_wave_buffer.buffer_size != 0;
// TODO(ogniK): Pool mapper attachment // TODO(ogniK): Pool mapper attachment
@ -249,19 +253,16 @@ void ServerVoiceInfo::UpdateWaveBuffer(ServerWaveBuffer& out_wavebuffer,
void ServerVoiceInfo::WriteOutStatus( void ServerVoiceInfo::WriteOutStatus(
VoiceInfo::OutParams& voice_out, VoiceInfo::InParams& voice_in, VoiceInfo::OutParams& voice_out, VoiceInfo::InParams& voice_in,
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states) { std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states) {
if (voice_in.is_new) { if (voice_in.is_new || in_params.is_new) {
in_params.is_new = true; in_params.is_new = true;
voice_out.wave_buffer_consumed = 0; voice_out.wave_buffer_consumed = 0;
voice_out.played_sample_count = 0; voice_out.played_sample_count = 0;
voice_out.voice_dropped = false; voice_out.voice_dropped = false;
} else if (!in_params.is_new) {
voice_out.wave_buffer_consumed = voice_states[0]->wave_buffer_consumed;
voice_out.played_sample_count = voice_states[0]->played_sample_count;
voice_out.voice_dropped = in_params.voice_drop_flag;
} else { } else {
voice_out.wave_buffer_consumed = 0; const auto& state = voice_states[0];
voice_out.played_sample_count = 0; voice_out.wave_buffer_consumed = state->wave_buffer_consumed;
voice_out.voice_dropped = false; voice_out.played_sample_count = state->played_sample_count;
voice_out.voice_dropped = state->voice_dropped;
} }
} }
@ -283,7 +284,8 @@ ServerVoiceInfo::OutParams& ServerVoiceInfo::GetOutParams() {
bool ServerVoiceInfo::ShouldSkip() const { bool ServerVoiceInfo::ShouldSkip() const {
// TODO(ogniK): Handle unmapped wave buffers or parameters // TODO(ogniK): Handle unmapped wave buffers or parameters
return !in_params.in_use || (in_params.wave_buffer_count == 0) || in_params.voice_drop_flag; return !in_params.in_use || in_params.wave_buffer_count == 0 || !in_params.buffer_mapped ||
in_params.voice_drop_flag;
} }
bool ServerVoiceInfo::UpdateForCommandGeneration(VoiceContext& voice_context) { bool ServerVoiceInfo::UpdateForCommandGeneration(VoiceContext& voice_context) {
@ -381,7 +383,7 @@ bool ServerVoiceInfo::UpdateParametersForCommandGeneration(
void ServerVoiceInfo::FlushWaveBuffers( void ServerVoiceInfo::FlushWaveBuffers(
u8 flush_count, std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states, u8 flush_count, std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states,
s32 channel_count) { s32 channel_count) {
auto wave_head = in_params.wave_bufffer_head; auto wave_head = in_params.wave_buffer_head;
for (u8 i = 0; i < flush_count; i++) { for (u8 i = 0; i < flush_count; i++) {
in_params.wave_buffer[wave_head].sent_to_dsp = true; in_params.wave_buffer[wave_head].sent_to_dsp = true;
@ -401,6 +403,17 @@ bool ServerVoiceInfo::HasValidWaveBuffer(const VoiceState* state) const {
return std::find(valid_wb.begin(), valid_wb.end(), true) != valid_wb.end(); return std::find(valid_wb.begin(), valid_wb.end(), true) != valid_wb.end();
} }
void ServerVoiceInfo::SetWaveBufferCompleted(VoiceState& dsp_state,
const ServerWaveBuffer& wave_buffer) {
dsp_state.is_wave_buffer_valid[dsp_state.wave_buffer_index] = false;
dsp_state.wave_buffer_consumed++;
dsp_state.wave_buffer_index = (dsp_state.wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
dsp_state.loop_count = 0;
if (wave_buffer.end_of_stream) {
dsp_state.played_sample_count = 0;
}
}
VoiceContext::VoiceContext(std::size_t voice_count_) : voice_count{voice_count_} { VoiceContext::VoiceContext(std::size_t voice_count_) : voice_count{voice_count_} {
for (std::size_t i = 0; i < voice_count; i++) { for (std::size_t i = 0; i < voice_count; i++) {
voice_channel_resources.emplace_back(static_cast<s32>(i)); voice_channel_resources.emplace_back(static_cast<s32>(i));

View file

@ -60,10 +60,12 @@ struct WaveBuffer {
u8 is_looping{}; u8 is_looping{};
u8 end_of_stream{}; u8 end_of_stream{};
u8 sent_to_server{}; u8 sent_to_server{};
INSERT_PADDING_BYTES(5); INSERT_PADDING_BYTES(1);
s32 loop_count{};
u64 context_address{}; u64 context_address{};
u64 context_size{}; u64 context_size{};
INSERT_PADDING_BYTES(8); u32 loop_start_sample{};
u32 loop_end_sample{};
}; };
static_assert(sizeof(WaveBuffer) == 0x38, "WaveBuffer is an invalid size"); static_assert(sizeof(WaveBuffer) == 0x38, "WaveBuffer is an invalid size");
@ -76,6 +78,9 @@ struct ServerWaveBuffer {
bool end_of_stream{}; bool end_of_stream{};
VAddr context_address{}; VAddr context_address{};
std::size_t context_size{}; std::size_t context_size{};
s32 loop_count{};
u32 loop_start_sample{};
u32 loop_end_sample{};
bool sent_to_dsp{true}; bool sent_to_dsp{true};
}; };
@ -108,6 +113,7 @@ struct VoiceState {
u32 external_context_size; u32 external_context_size;
bool is_external_context_used; bool is_external_context_used;
bool voice_dropped; bool voice_dropped;
s32 loop_count;
}; };
class VoiceChannelResource { class VoiceChannelResource {
@ -206,7 +212,7 @@ public:
float last_volume{}; float last_volume{};
std::array<BiquadFilterParameter, AudioCommon::MAX_BIQUAD_FILTERS> biquad_filter{}; std::array<BiquadFilterParameter, AudioCommon::MAX_BIQUAD_FILTERS> biquad_filter{};
s32 wave_buffer_count{}; s32 wave_buffer_count{};
s16 wave_bufffer_head{}; s16 wave_buffer_head{};
INSERT_PADDING_BYTES(2); INSERT_PADDING_BYTES(2);
BehaviorFlags behavior_flags{}; BehaviorFlags behavior_flags{};
VAddr additional_params_address{}; VAddr additional_params_address{};
@ -252,6 +258,7 @@ public:
void FlushWaveBuffers(u8 flush_count, void FlushWaveBuffers(u8 flush_count,
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states, std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states,
s32 channel_count); s32 channel_count);
void SetWaveBufferCompleted(VoiceState& dsp_state, const ServerWaveBuffer& wave_buffer);
private: private:
std::vector<s16> stored_samples; std::vector<s16> stored_samples;

View file

@ -103,9 +103,8 @@ void Adapter::AdapterInputThread(std::stop_token stop_token) {
AdapterPayload adapter_payload{}; AdapterPayload adapter_payload{};
adapter_scan_thread = {}; adapter_scan_thread = {};
adapter_input_thread_running = true;
while (adapter_input_thread_running && !stop_token.stop_requested()) { while (!stop_token.stop_requested()) {
libusb_interrupt_transfer(usb_adapter_handle->get(), input_endpoint, adapter_payload.data(), libusb_interrupt_transfer(usb_adapter_handle->get(), input_endpoint, adapter_payload.data(),
static_cast<s32>(adapter_payload.size()), &payload_size, 16); static_cast<s32>(adapter_payload.size()), &payload_size, 16);
if (IsPayloadCorrect(adapter_payload, payload_size)) { if (IsPayloadCorrect(adapter_payload, payload_size)) {
@ -129,7 +128,7 @@ bool Adapter::IsPayloadCorrect(const AdapterPayload& adapter_payload, s32 payloa
adapter_payload[0]); adapter_payload[0]);
if (input_error_counter++ > 20) { if (input_error_counter++ > 20) {
LOG_ERROR(Input, "GC adapter timeout, Is the adapter connected?"); LOG_ERROR(Input, "GC adapter timeout, Is the adapter connected?");
adapter_input_thread_running = false; adapter_input_thread.request_stop();
restart_scan_thread = true; restart_scan_thread = true;
} }
return false; return false;
@ -303,14 +302,15 @@ void Adapter::AdapterScanThread(std::stop_token stop_token) {
usb_adapter_handle = nullptr; usb_adapter_handle = nullptr;
pads = {}; pads = {};
while (!stop_token.stop_requested() && !Setup()) { while (!stop_token.stop_requested() && !Setup()) {
std::this_thread::sleep_for(std::chrono::seconds(1)); std::this_thread::sleep_for(std::chrono::seconds(2));
} }
} }
bool Adapter::Setup() { bool Adapter::Setup() {
constexpr u16 vid = 0x057e; // Nintendo constexpr u16 nintendo_vid = 0x057e;
constexpr u16 pid = 0x0337; // GC Adapter constexpr u16 gc_adapter_pid = 0x0337;
usb_adapter_handle = std::make_unique<LibUSBDeviceHandle>(libusb_ctx->get(), vid, pid); usb_adapter_handle =
std::make_unique<LibUSBDeviceHandle>(libusb_ctx->get(), nintendo_vid, gc_adapter_pid);
if (!usb_adapter_handle->get()) { if (!usb_adapter_handle->get()) {
return false; return false;
} }

View file

@ -156,6 +156,5 @@ private:
bool configuring{false}; bool configuring{false};
bool rumble_enabled{true}; bool rumble_enabled{true};
bool vibration_changed{true}; bool vibration_changed{true};
bool adapter_input_thread_running{false};
}; };
} // namespace GCAdapter } // namespace GCAdapter