2019-01-24 06:17:55 +03:00
|
|
|
// Copyright 2019 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <atomic>
|
|
|
|
#include <condition_variable>
|
|
|
|
#include <mutex>
|
|
|
|
#include <optional>
|
|
|
|
#include <thread>
|
|
|
|
#include <variant>
|
|
|
|
|
2019-02-19 04:58:32 +03:00
|
|
|
#include "common/threadsafe_queue.h"
|
|
|
|
#include "video_core/gpu.h"
|
|
|
|
|
2019-01-24 06:17:55 +03:00
|
|
|
namespace Tegra {
|
|
|
|
struct FramebufferConfig;
|
|
|
|
class DmaPusher;
|
|
|
|
} // namespace Tegra
|
|
|
|
|
2019-03-31 03:08:09 +03:00
|
|
|
namespace Core {
|
|
|
|
class System;
|
|
|
|
namespace Timing {
|
|
|
|
struct EventType;
|
|
|
|
} // namespace Timing
|
|
|
|
} // namespace Core
|
2019-01-24 06:17:55 +03:00
|
|
|
|
|
|
|
namespace VideoCommon::GPUThread {
|
|
|
|
|
2019-02-19 04:58:32 +03:00
|
|
|
/// Command to signal to the GPU thread that processing has ended
|
|
|
|
struct EndProcessingCommand final {};
|
|
|
|
|
2019-01-24 06:17:55 +03:00
|
|
|
/// Command to signal to the GPU thread that a command list is ready for processing
|
|
|
|
struct SubmitListCommand final {
|
|
|
|
explicit SubmitListCommand(Tegra::CommandList&& entries) : entries{std::move(entries)} {}
|
|
|
|
|
|
|
|
Tegra::CommandList entries;
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Command to signal to the GPU thread that a swap buffers is pending
|
|
|
|
struct SwapBuffersCommand final {
|
|
|
|
explicit SwapBuffersCommand(std::optional<const Tegra::FramebufferConfig> framebuffer)
|
|
|
|
: framebuffer{std::move(framebuffer)} {}
|
|
|
|
|
2019-02-19 04:58:32 +03:00
|
|
|
std::optional<Tegra::FramebufferConfig> framebuffer;
|
2019-01-24 06:17:55 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/// Command to signal to the GPU thread to flush a region
|
|
|
|
struct FlushRegionCommand final {
|
2019-02-19 04:58:32 +03:00
|
|
|
explicit constexpr FlushRegionCommand(CacheAddr addr, u64 size) : addr{addr}, size{size} {}
|
2019-01-24 06:17:55 +03:00
|
|
|
|
2019-02-19 04:58:32 +03:00
|
|
|
CacheAddr addr;
|
|
|
|
u64 size;
|
2019-01-24 06:17:55 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/// Command to signal to the GPU thread to invalidate a region
|
|
|
|
struct InvalidateRegionCommand final {
|
2019-02-19 04:58:32 +03:00
|
|
|
explicit constexpr InvalidateRegionCommand(CacheAddr addr, u64 size) : addr{addr}, size{size} {}
|
2019-01-24 06:17:55 +03:00
|
|
|
|
2019-02-19 04:58:32 +03:00
|
|
|
CacheAddr addr;
|
|
|
|
u64 size;
|
2019-01-24 06:17:55 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/// Command to signal to the GPU thread to flush and invalidate a region
|
|
|
|
struct FlushAndInvalidateRegionCommand final {
|
2019-02-19 04:58:32 +03:00
|
|
|
explicit constexpr FlushAndInvalidateRegionCommand(CacheAddr addr, u64 size)
|
2019-01-24 06:17:55 +03:00
|
|
|
: addr{addr}, size{size} {}
|
|
|
|
|
2019-02-19 04:58:32 +03:00
|
|
|
CacheAddr addr;
|
|
|
|
u64 size;
|
2019-01-24 06:17:55 +03:00
|
|
|
};
|
|
|
|
|
2019-02-19 04:58:32 +03:00
|
|
|
using CommandData =
|
|
|
|
std::variant<EndProcessingCommand, SubmitListCommand, SwapBuffersCommand, FlushRegionCommand,
|
|
|
|
InvalidateRegionCommand, FlushAndInvalidateRegionCommand>;
|
|
|
|
|
|
|
|
struct CommandDataContainer {
|
|
|
|
CommandDataContainer() = default;
|
|
|
|
|
2019-03-31 03:08:09 +03:00
|
|
|
CommandDataContainer(CommandData&& data, u64 next_fence)
|
|
|
|
: data{std::move(data)}, fence{next_fence} {}
|
2019-02-19 04:58:32 +03:00
|
|
|
|
|
|
|
CommandDataContainer& operator=(const CommandDataContainer& t) {
|
|
|
|
data = std::move(t.data);
|
2019-03-31 03:08:09 +03:00
|
|
|
fence = t.fence;
|
2019-02-19 04:58:32 +03:00
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
CommandData data;
|
2019-03-31 03:08:09 +03:00
|
|
|
u64 fence{};
|
2019-02-19 04:58:32 +03:00
|
|
|
};
|
2019-01-24 06:17:55 +03:00
|
|
|
|
|
|
|
/// Struct used to synchronize the GPU thread
|
|
|
|
struct SynchState final {
|
2019-02-19 04:58:32 +03:00
|
|
|
std::atomic_bool is_running{true};
|
|
|
|
std::atomic_int queued_frame_count{};
|
2019-03-31 03:08:09 +03:00
|
|
|
std::mutex synchronization_mutex;
|
2019-02-19 04:58:32 +03:00
|
|
|
std::mutex commands_mutex;
|
|
|
|
std::condition_variable commands_condition;
|
2019-03-31 03:08:09 +03:00
|
|
|
std::condition_variable synchronization_condition;
|
2019-02-19 04:58:32 +03:00
|
|
|
|
2019-03-31 03:08:09 +03:00
|
|
|
/// Returns true if the gap in GPU commands is small enough that we can consider the CPU and GPU
|
|
|
|
/// synchronized. This is entirely empirical.
|
|
|
|
bool IsSynchronized() const {
|
|
|
|
constexpr std::size_t max_queue_gap{5};
|
|
|
|
return queue.Size() <= max_queue_gap;
|
2019-02-19 04:58:32 +03:00
|
|
|
}
|
|
|
|
|
2019-03-31 03:08:09 +03:00
|
|
|
void TrySynchronize() {
|
|
|
|
if (IsSynchronized()) {
|
|
|
|
std::lock_guard<std::mutex> lock{synchronization_mutex};
|
|
|
|
synchronization_condition.notify_one();
|
2019-02-19 04:58:32 +03:00
|
|
|
}
|
2019-01-24 06:17:55 +03:00
|
|
|
}
|
2019-02-19 04:58:32 +03:00
|
|
|
|
2019-03-31 03:08:09 +03:00
|
|
|
void WaitForSynchronization(u64 fence);
|
2019-02-19 04:58:32 +03:00
|
|
|
|
|
|
|
void SignalCommands() {
|
2019-03-31 03:08:09 +03:00
|
|
|
if (queue.Empty()) {
|
|
|
|
return;
|
2019-02-19 04:58:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
commands_condition.notify_one();
|
|
|
|
}
|
|
|
|
|
|
|
|
void WaitForCommands() {
|
2019-04-01 19:29:59 +03:00
|
|
|
std::unique_lock lock{commands_mutex};
|
2019-02-19 04:58:32 +03:00
|
|
|
commands_condition.wait(lock, [this] { return !queue.Empty(); });
|
|
|
|
}
|
|
|
|
|
|
|
|
using CommandQueue = Common::SPSCQueue<CommandDataContainer>;
|
|
|
|
CommandQueue queue;
|
2019-03-31 03:08:09 +03:00
|
|
|
u64 last_fence{};
|
|
|
|
std::atomic<u64> signaled_fence{};
|
2019-01-24 06:17:55 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/// Class used to manage the GPU thread
|
|
|
|
class ThreadManager final {
|
|
|
|
public:
|
2019-04-09 21:02:00 +03:00
|
|
|
explicit ThreadManager(Core::System& system);
|
2019-01-24 06:17:55 +03:00
|
|
|
~ThreadManager();
|
|
|
|
|
2019-04-09 21:02:00 +03:00
|
|
|
/// Creates and starts the GPU thread.
|
|
|
|
void StartThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher);
|
|
|
|
|
2019-01-24 06:17:55 +03:00
|
|
|
/// Push GPU command entries to be processed
|
|
|
|
void SubmitList(Tegra::CommandList&& entries);
|
|
|
|
|
|
|
|
/// Swap buffers (render frame)
|
|
|
|
void SwapBuffers(
|
|
|
|
std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer);
|
|
|
|
|
|
|
|
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
2019-02-19 04:58:32 +03:00
|
|
|
void FlushRegion(CacheAddr addr, u64 size);
|
2019-01-24 06:17:55 +03:00
|
|
|
|
|
|
|
/// Notify rasterizer that any caches of the specified region should be invalidated
|
2019-02-19 04:58:32 +03:00
|
|
|
void InvalidateRegion(CacheAddr addr, u64 size);
|
2019-01-24 06:17:55 +03:00
|
|
|
|
|
|
|
/// Notify rasterizer that any caches of the specified region should be flushed and invalidated
|
2019-02-19 04:58:32 +03:00
|
|
|
void FlushAndInvalidateRegion(CacheAddr addr, u64 size);
|
2019-01-24 06:17:55 +03:00
|
|
|
|
|
|
|
private:
|
|
|
|
/// Pushes a command to be executed by the GPU thread
|
2019-03-31 03:08:09 +03:00
|
|
|
u64 PushCommand(CommandData&& command_data);
|
2019-01-24 06:17:55 +03:00
|
|
|
|
|
|
|
private:
|
|
|
|
SynchState state;
|
2019-03-31 03:08:09 +03:00
|
|
|
Core::System& system;
|
|
|
|
Core::Timing::EventType* synchronization_event{};
|
2019-03-08 00:05:46 +03:00
|
|
|
std::thread thread;
|
|
|
|
std::thread::id thread_id;
|
2019-01-24 06:17:55 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace VideoCommon::GPUThread
|