early-access version 3690

This commit is contained in:
pineappleEA 2023-06-17 11:47:19 +02:00
parent 8bd9518b52
commit ec90002758
6 changed files with 55 additions and 37 deletions

View file

@ -1,7 +1,7 @@
yuzu emulator early access
=============
This is the source code for early-access 3689.
This is the source code for early-access 3690.
## Legal Notice

View file

@ -11,16 +11,16 @@ find_program(GNU_DATE date)
set(CAN_BUILD_NX_TZDB true)
if ("${GIT}" STREQUAL "GIT-NOTFOUND")
if (NOT GIT)
set(CAN_BUILD_NX_TZDB false)
endif()
if ("${GNU_MAKE}" STREQUAL "GNU_MAKE-NOTFOUND")
if (NOT GNU_MAKE)
set(CAN_BUILD_NX_TZDB false)
endif()
if ("${GNU_DATE}" STREQUAL "GNU_DATE-NOTFOUND")
if (NOT GNU_DATE)
set(CAN_BUILD_NX_TZDB false)
endif()
if ("${CMAKE_SYSTEM_NAME}" STREQUAL "Windows" OR ANDROID)
if (CMAKE_SYSTEM_NAME STREQUAL "Windows" OR ANDROID)
# tzdb_to_nx currently requires a posix-compliant host
# MinGW and Android are handled here due to the executable format being different from the host system
# TODO (lat9nq): cross-compiling support
@ -32,14 +32,14 @@ set(NX_TZDB_ARCHIVE "${CMAKE_CURRENT_BINARY_DIR}/${NX_TZDB_VERSION}.zip")
set(NX_TZDB_ROMFS_DIR "${CMAKE_CURRENT_BINARY_DIR}/nx_tzdb")
if ((NOT ${CAN_BUILD_NX_TZDB} OR ${YUZU_DOWNLOAD_TIME_ZONE_DATA}) AND NOT EXISTS ${NX_TZDB_ARCHIVE})
if ((NOT CAN_BUILD_NX_TZDB OR YUZU_DOWNLOAD_TIME_ZONE_DATA) AND NOT EXISTS ${NX_TZDB_ARCHIVE})
set(NX_TZDB_DOWNLOAD_URL "https://github.com/lat9nq/tzdb_to_nx/releases/download/${NX_TZDB_VERSION}/${NX_TZDB_VERSION}.zip")
message(STATUS "Downloading time zone data from ${NX_TZDB_DOWNLOAD_URL}...")
file(DOWNLOAD ${NX_TZDB_DOWNLOAD_URL} ${NX_TZDB_ARCHIVE}
STATUS NX_TZDB_DOWNLOAD_STATUS)
list(GET NX_TZDB_DOWNLOAD_STATUS 0 NX_TZDB_DOWNLOAD_STATUS_CODE)
if (NOT "${NX_TZDB_DOWNLOAD_STATUS_CODE}" STREQUAL "0")
if (NOT NX_TZDB_DOWNLOAD_STATUS_CODE EQUAL 0)
message(FATAL_ERROR "Time zone data download failed (status code ${NX_TZDB_DOWNLOAD_STATUS_CODE})")
endif()
@ -48,7 +48,7 @@ if ((NOT ${CAN_BUILD_NX_TZDB} OR ${YUZU_DOWNLOAD_TIME_ZONE_DATA}) AND NOT EXISTS
${NX_TZDB_ARCHIVE}
DESTINATION
${NX_TZDB_ROMFS_DIR})
elseif (${CAN_BUILD_NX_TZDB} AND NOT ${YUZU_DOWNLOAD_TIME_ZONE_DATA})
elseif (CAN_BUILD_NX_TZDB AND NOT YUZU_DOWNLOAD_TIME_ZONE_DATA)
add_subdirectory(tzdb_to_nx)
add_dependencies(nx_tzdb x80e)

View file

@ -15,7 +15,7 @@ set(DIRECTORY_NAME ${HEADER_NAME})
set(FILE_DATA "")
foreach(ZONE_FILE ${FILE_LIST})
if ("${ZONE_FILE}" STREQUAL "\n")
if (ZONE_FILE STREQUAL "\n")
continue()
endif()
@ -26,13 +26,13 @@ foreach(ZONE_FILE ${FILE_LIST})
foreach(I RANGE 0 ${ZONE_DATA_LEN} 2)
math(EXPR BREAK_LINE "(${I} + 2) % 38")
string(SUBSTRING "${ZONE_DATA}" "${I}" "2" HEX_DATA)
if ("${HEX_DATA}" STREQUAL "")
string(SUBSTRING "${ZONE_DATA}" "${I}" 2 HEX_DATA)
if (NOT HEX_DATA)
break()
endif()
string(APPEND FILE_DATA "0x${HEX_DATA},")
if ("${BREAK_LINE}" STREQUAL "0")
if (BREAK_LINE EQUAL 0)
string(APPEND FILE_DATA "\n")
else()
string(APPEND FILE_DATA " ")

View file

@ -716,6 +716,7 @@ void BufferCache<P>::BindHostIndexBuffer() {
template <class P>
void BufferCache<P>::BindHostVertexBuffers() {
HostBindings<typename P::Buffer> host_bindings;
bool any_valid{false};
auto& flags = maxwell3d->dirty.flags;
for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
const Binding& binding = channel_state->vertex_buffers[index];
@ -727,17 +728,27 @@ void BufferCache<P>::BindHostVertexBuffers() {
}
flags[Dirty::VertexBuffer0 + index] = false;
const u32 stride = maxwell3d->regs.vertex_streams[index].stride;
const u32 offset = buffer.Offset(binding.cpu_addr);
host_bindings.buffers.push_back(&buffer);
host_bindings.offsets.push_back(offset);
host_bindings.sizes.push_back(binding.size);
host_bindings.strides.push_back(stride);
host_bindings.min_index = std::min(host_bindings.min_index, index);
host_bindings.count++;
host_bindings.max_index = std::max(host_bindings.max_index, index);
any_valid = true;
}
if (host_bindings.count > 0) {
if (any_valid) {
host_bindings.max_index++;
for (u32 index = host_bindings.min_index; index < host_bindings.max_index; index++) {
flags[Dirty::VertexBuffer0 + index] = false;
const Binding& binding = channel_state->vertex_buffers[index];
Buffer& buffer = slot_buffers[binding.buffer_id];
const u32 stride = maxwell3d->regs.vertex_streams[index].stride;
const u32 offset = buffer.Offset(binding.cpu_addr);
host_bindings.buffers.push_back(&buffer);
host_bindings.offsets.push_back(offset);
host_bindings.sizes.push_back(binding.size);
host_bindings.strides.push_back(stride);
}
runtime.BindVertexBuffers(host_bindings);
}
}
@ -790,7 +801,8 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
!HasFastUniformBufferBound(stage, binding_index) ||
channel_state->uniform_buffer_binding_sizes[stage][binding_index] != size;
if (should_fast_bind) {
// We only have to bind when the currently bound buffer is not the fast version
// We only have to bind when the currently bound buffer is not the fast
// version
channel_state->fast_bound_uniform_buffers[stage] |= 1U << binding_index;
channel_state->uniform_buffer_binding_sizes[stage][binding_index] = size;
runtime.BindFastUniformBuffer(stage, binding_index, size);
@ -893,6 +905,7 @@ void BufferCache<P>::BindHostTransformFeedbackBuffers() {
return;
}
HostBindings<typename P::Buffer> host_bindings;
bool any_valid{false};
for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
const Binding& binding = channel_state->transform_feedback_buffers[index];
if (maxwell3d->regs.transform_feedback.controls[index].varying_count == 0 &&
@ -908,9 +921,12 @@ void BufferCache<P>::BindHostTransformFeedbackBuffers() {
host_bindings.buffers.push_back(&buffer);
host_bindings.offsets.push_back(offset);
host_bindings.sizes.push_back(binding.size);
host_bindings.count++;
host_bindings.min_index = std::min(host_bindings.min_index, index);
host_bindings.max_index = std::max(host_bindings.max_index, index);
any_valid = true;
}
if (host_bindings.count > 0) {
if (any_valid) {
host_bindings.max_index++;
runtime.BindTransformFeedbackBuffers(host_bindings);
}
}
@ -1306,8 +1322,8 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu
}
stream_score += overlap.StreamScore();
if (stream_score > STREAM_LEAP_THRESHOLD && !has_stream_leap) {
// When this memory region has been joined a bunch of times, we assume it's being used
// as a stream buffer. Increase the size to skip constantly recreating buffers.
// When this memory region has been joined a bunch of times, we assume it's being
// used as a stream buffer. Increase the size to skip constantly recreating buffers.
has_stream_leap = true;
if (expands_right) {
begin -= CACHING_PAGESIZE * 256;
@ -1701,16 +1717,17 @@ Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr, u32 cbuf_index,
const GPUVAddr gpu_addr = gpu_memory->Read<u64>(ssbo_addr);
const auto size = [&]() {
const bool is_nvn_cbuf = cbuf_index == 0;
// The NVN driver buffer (index 0) is known to pack the SSBO address followed by its size.
// The NVN driver buffer (index 0) is known to pack the SSBO address followed by its
// size.
if (is_nvn_cbuf) {
const u32 ssbo_size = gpu_memory->Read<u32>(ssbo_addr + 8);
if (ssbo_size != 0) {
return ssbo_size;
}
}
// Other titles (notably Doom Eternal) may use STG/LDG on buffer addresses in custom defined
// cbufs, which do not store the sizes adjacent to the addresses, so use the fully
// mapped buffer size for now.
// Other titles (notably Doom Eternal) may use STG/LDG on buffer addresses in custom
// defined cbufs, which do not store the sizes adjacent to the addresses, so use the
// fully mapped buffer size for now.
const u32 memory_layout_size = static_cast<u32>(gpu_memory->GetMemoryLayoutSize(gpu_addr));
return std::min(memory_layout_size, static_cast<u32>(8_MiB));
}();

View file

@ -112,7 +112,7 @@ struct HostBindings {
boost::container::small_vector<u64, NUM_VERTEX_BUFFERS> sizes;
boost::container::small_vector<u64, NUM_VERTEX_BUFFERS> strides;
u32 min_index{NUM_VERTEX_BUFFERS};
u32 count{0};
u32 max_index{0};
};
class BufferCacheChannelInfo : public ChannelInfo {

View file

@ -518,15 +518,15 @@ void BufferCacheRuntime::BindVertexBuffers(VideoCommon::HostBindings<Buffer>& bi
if (device.IsExtExtendedDynamicStateSupported()) {
scheduler.Record([bindings = std::move(bindings),
buffer_handles = std::move(buffer_handles)](vk::CommandBuffer cmdbuf) {
cmdbuf.BindVertexBuffers2EXT(bindings.min_index, bindings.count, buffer_handles.data(),
bindings.offsets.data(), bindings.sizes.data(),
bindings.strides.data());
cmdbuf.BindVertexBuffers2EXT(
bindings.min_index, bindings.max_index - bindings.min_index, buffer_handles.data(),
bindings.offsets.data(), bindings.sizes.data(), bindings.strides.data());
});
} else {
scheduler.Record([bindings = std::move(bindings),
buffer_handles = std::move(buffer_handles)](vk::CommandBuffer cmdbuf) {
cmdbuf.BindVertexBuffers(bindings.min_index, bindings.count, buffer_handles.data(),
bindings.offsets.data());
cmdbuf.BindVertexBuffers(bindings.min_index, bindings.max_index - bindings.min_index,
buffer_handles.data(), bindings.offsets.data());
});
}
}
@ -563,8 +563,9 @@ void BufferCacheRuntime::BindTransformFeedbackBuffers(VideoCommon::HostBindings<
}
scheduler.Record([bindings = std::move(bindings),
buffer_handles = std::move(buffer_handles)](vk::CommandBuffer cmdbuf) {
cmdbuf.BindTransformFeedbackBuffersEXT(0, bindings.count, buffer_handles.data(),
bindings.offsets.data(), bindings.sizes.data());
cmdbuf.BindTransformFeedbackBuffersEXT(0, bindings.max_index - bindings.max_index,
buffer_handles.data(), bindings.offsets.data(),
bindings.sizes.data());
});
}