mirror of
https://github.com/klzgrad/naiveproxy.git
synced 2024-12-01 01:36:09 +03:00
581 lines
21 KiB
C++
581 lines
21 KiB
C++
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||
|
// Use of this source code is governed by a BSD-style license that can be
|
||
|
// found in the LICENSE file.
|
||
|
|
||
|
#include "base/message_loop/message_pump_win.h"
|
||
|
|
||
|
#include <math.h>
|
||
|
#include <stdint.h>
|
||
|
|
||
|
#include <limits>
|
||
|
|
||
|
#include "base/memory/ptr_util.h"
|
||
|
#include "base/message_loop/message_loop.h"
|
||
|
#include "base/metrics/histogram_macros.h"
|
||
|
#include "base/strings/stringprintf.h"
|
||
|
#include "base/trace_event/trace_event.h"
|
||
|
#include "base/win/current_module.h"
|
||
|
#include "base/win/wrapped_window_proc.h"
|
||
|
|
||
|
namespace base {
|
||
|
|
||
|
namespace {
|
||
|
|
||
|
enum MessageLoopProblems {
|
||
|
MESSAGE_POST_ERROR,
|
||
|
COMPLETION_POST_ERROR,
|
||
|
SET_TIMER_ERROR,
|
||
|
RECEIVED_WM_QUIT_ERROR,
|
||
|
MESSAGE_LOOP_PROBLEM_MAX,
|
||
|
};
|
||
|
|
||
|
} // namespace
|
||
|
|
||
|
// Message sent to get an additional time slice for pumping (processing) another
|
||
|
// task (a series of such messages creates a continuous task pump).
|
||
|
static const int kMsgHaveWork = WM_USER + 1;
|
||
|
|
||
|
//-----------------------------------------------------------------------------
|
||
|
// MessagePumpWin public:
|
||
|
|
||
|
MessagePumpWin::MessagePumpWin() = default;
|
||
|
|
||
|
void MessagePumpWin::Run(Delegate* delegate) {
|
||
|
RunState s;
|
||
|
s.delegate = delegate;
|
||
|
s.should_quit = false;
|
||
|
s.run_depth = state_ ? state_->run_depth + 1 : 1;
|
||
|
|
||
|
// TODO(stanisc): crbug.com/596190: Remove this code once the bug is fixed.
|
||
|
s.schedule_work_error_count = 0;
|
||
|
s.last_schedule_work_error_time = Time();
|
||
|
|
||
|
RunState* previous_state = state_;
|
||
|
state_ = &s;
|
||
|
|
||
|
DoRunLoop();
|
||
|
|
||
|
state_ = previous_state;
|
||
|
}
|
||
|
|
||
|
void MessagePumpWin::Quit() {
|
||
|
DCHECK(state_);
|
||
|
state_->should_quit = true;
|
||
|
}
|
||
|
|
||
|
//-----------------------------------------------------------------------------
|
||
|
// MessagePumpWin protected:
|
||
|
|
||
|
int MessagePumpWin::GetCurrentDelay() const {
|
||
|
if (delayed_work_time_.is_null())
|
||
|
return -1;
|
||
|
|
||
|
// Be careful here. TimeDelta has a precision of microseconds, but we want a
|
||
|
// value in milliseconds. If there are 5.5ms left, should the delay be 5 or
|
||
|
// 6? It should be 6 to avoid executing delayed work too early.
|
||
|
double timeout =
|
||
|
ceil((delayed_work_time_ - TimeTicks::Now()).InMillisecondsF());
|
||
|
|
||
|
// Range check the |timeout| while converting to an integer. If the |timeout|
|
||
|
// is negative, then we need to run delayed work soon. If the |timeout| is
|
||
|
// "overflowingly" large, that means a delayed task was posted with a
|
||
|
// super-long delay.
|
||
|
return timeout < 0 ? 0 :
|
||
|
(timeout > std::numeric_limits<int>::max() ?
|
||
|
std::numeric_limits<int>::max() : static_cast<int>(timeout));
|
||
|
}
|
||
|
|
||
|
//-----------------------------------------------------------------------------
|
||
|
// MessagePumpForUI public:
|
||
|
|
||
|
MessagePumpForUI::MessagePumpForUI() {
|
||
|
bool succeeded = message_window_.Create(
|
||
|
BindRepeating(&MessagePumpForUI::MessageCallback, Unretained(this)));
|
||
|
DCHECK(succeeded);
|
||
|
}
|
||
|
|
||
|
MessagePumpForUI::~MessagePumpForUI() = default;
|
||
|
|
||
|
void MessagePumpForUI::ScheduleWork() {
|
||
|
if (InterlockedExchange(&work_state_, HAVE_WORK) != READY)
|
||
|
return; // Someone else continued the pumping.
|
||
|
|
||
|
// Make sure the MessagePump does some work for us.
|
||
|
BOOL ret = PostMessage(message_window_.hwnd(), kMsgHaveWork, 0, 0);
|
||
|
if (ret)
|
||
|
return; // There was room in the Window Message queue.
|
||
|
|
||
|
// We have failed to insert a have-work message, so there is a chance that we
|
||
|
// will starve tasks/timers while sitting in a nested run loop. Nested
|
||
|
// loops only look at Windows Message queues, and don't look at *our* task
|
||
|
// queues, etc., so we might not get a time slice in such. :-(
|
||
|
// We could abort here, but the fear is that this failure mode is plausibly
|
||
|
// common (queue is full, of about 2000 messages), so we'll do a near-graceful
|
||
|
// recovery. Nested loops are pretty transient (we think), so this will
|
||
|
// probably be recoverable.
|
||
|
|
||
|
// Clarify that we didn't really insert.
|
||
|
InterlockedExchange(&work_state_, READY);
|
||
|
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR,
|
||
|
MESSAGE_LOOP_PROBLEM_MAX);
|
||
|
state_->schedule_work_error_count++;
|
||
|
state_->last_schedule_work_error_time = Time::Now();
|
||
|
}
|
||
|
|
||
|
void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
|
||
|
delayed_work_time_ = delayed_work_time;
|
||
|
RescheduleTimer();
|
||
|
}
|
||
|
|
||
|
//-----------------------------------------------------------------------------
|
||
|
// MessagePumpForUI private:
|
||
|
|
||
|
bool MessagePumpForUI::MessageCallback(
|
||
|
UINT message, WPARAM wparam, LPARAM lparam, LRESULT* result) {
|
||
|
switch (message) {
|
||
|
case kMsgHaveWork:
|
||
|
HandleWorkMessage();
|
||
|
break;
|
||
|
case WM_TIMER:
|
||
|
HandleTimerMessage();
|
||
|
break;
|
||
|
}
|
||
|
return false;
|
||
|
}
|
||
|
|
||
|
void MessagePumpForUI::DoRunLoop() {
|
||
|
// IF this was just a simple PeekMessage() loop (servicing all possible work
|
||
|
// queues), then Windows would try to achieve the following order according
|
||
|
// to MSDN documentation about PeekMessage with no filter):
|
||
|
// * Sent messages
|
||
|
// * Posted messages
|
||
|
// * Sent messages (again)
|
||
|
// * WM_PAINT messages
|
||
|
// * WM_TIMER messages
|
||
|
//
|
||
|
// Summary: none of the above classes is starved, and sent messages has twice
|
||
|
// the chance of being processed (i.e., reduced service time).
|
||
|
|
||
|
for (;;) {
|
||
|
// If we do any work, we may create more messages etc., and more work may
|
||
|
// possibly be waiting in another task group. When we (for example)
|
||
|
// ProcessNextWindowsMessage(), there is a good chance there are still more
|
||
|
// messages waiting. On the other hand, when any of these methods return
|
||
|
// having done no work, then it is pretty unlikely that calling them again
|
||
|
// quickly will find any work to do. Finally, if they all say they had no
|
||
|
// work, then it is a good time to consider sleeping (waiting) for more
|
||
|
// work.
|
||
|
|
||
|
bool more_work_is_plausible = ProcessNextWindowsMessage();
|
||
|
if (state_->should_quit)
|
||
|
break;
|
||
|
|
||
|
more_work_is_plausible |= state_->delegate->DoWork();
|
||
|
if (state_->should_quit)
|
||
|
break;
|
||
|
|
||
|
more_work_is_plausible |=
|
||
|
state_->delegate->DoDelayedWork(&delayed_work_time_);
|
||
|
// If we did not process any delayed work, then we can assume that our
|
||
|
// existing WM_TIMER if any will fire when delayed work should run. We
|
||
|
// don't want to disturb that timer if it is already in flight. However,
|
||
|
// if we did do all remaining delayed work, then lets kill the WM_TIMER.
|
||
|
if (more_work_is_plausible && delayed_work_time_.is_null())
|
||
|
KillTimer(message_window_.hwnd(), reinterpret_cast<UINT_PTR>(this));
|
||
|
if (state_->should_quit)
|
||
|
break;
|
||
|
|
||
|
if (more_work_is_plausible)
|
||
|
continue;
|
||
|
|
||
|
more_work_is_plausible = state_->delegate->DoIdleWork();
|
||
|
if (state_->should_quit)
|
||
|
break;
|
||
|
|
||
|
if (more_work_is_plausible)
|
||
|
continue;
|
||
|
|
||
|
WaitForWork(); // Wait (sleep) until we have work to do again.
|
||
|
}
|
||
|
}
|
||
|
|
||
|
void MessagePumpForUI::WaitForWork() {
|
||
|
// Wait until a message is available, up to the time needed by the timer
|
||
|
// manager to fire the next set of timers.
|
||
|
int delay;
|
||
|
DWORD wait_flags = MWMO_INPUTAVAILABLE;
|
||
|
|
||
|
while ((delay = GetCurrentDelay()) != 0) {
|
||
|
if (delay < 0) // Negative value means no timers waiting.
|
||
|
delay = INFINITE;
|
||
|
|
||
|
DWORD result = MsgWaitForMultipleObjectsEx(0, nullptr, delay, QS_ALLINPUT,
|
||
|
wait_flags);
|
||
|
|
||
|
if (WAIT_OBJECT_0 == result) {
|
||
|
// A WM_* message is available.
|
||
|
// If a parent child relationship exists between windows across threads
|
||
|
// then their thread inputs are implicitly attached.
|
||
|
// This causes the MsgWaitForMultipleObjectsEx API to return indicating
|
||
|
// that messages are ready for processing (Specifically, mouse messages
|
||
|
// intended for the child window may appear if the child window has
|
||
|
// capture).
|
||
|
// The subsequent PeekMessages call may fail to return any messages thus
|
||
|
// causing us to enter a tight loop at times.
|
||
|
// The code below is a workaround to give the child window
|
||
|
// some time to process its input messages by looping back to
|
||
|
// MsgWaitForMultipleObjectsEx above when there are no messages for the
|
||
|
// current thread.
|
||
|
MSG msg = {0};
|
||
|
bool has_pending_sent_message =
|
||
|
(HIWORD(GetQueueStatus(QS_SENDMESSAGE)) & QS_SENDMESSAGE) != 0;
|
||
|
if (has_pending_sent_message ||
|
||
|
PeekMessage(&msg, nullptr, 0, 0, PM_NOREMOVE)) {
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
// We know there are no more messages for this thread because PeekMessage
|
||
|
// has returned false. Reset |wait_flags| so that we wait for a *new*
|
||
|
// message.
|
||
|
wait_flags = 0;
|
||
|
}
|
||
|
|
||
|
DCHECK_NE(WAIT_FAILED, result) << GetLastError();
|
||
|
}
|
||
|
}
|
||
|
|
||
|
void MessagePumpForUI::HandleWorkMessage() {
|
||
|
// If we are being called outside of the context of Run, then don't try to do
|
||
|
// any work. This could correspond to a MessageBox call or something of that
|
||
|
// sort.
|
||
|
if (!state_) {
|
||
|
// Since we handled a kMsgHaveWork message, we must still update this flag.
|
||
|
InterlockedExchange(&work_state_, READY);
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
// Let whatever would have run had we not been putting messages in the queue
|
||
|
// run now. This is an attempt to make our dummy message not starve other
|
||
|
// messages that may be in the Windows message queue.
|
||
|
ProcessPumpReplacementMessage();
|
||
|
|
||
|
// Now give the delegate a chance to do some work. It'll let us know if it
|
||
|
// needs to do more work.
|
||
|
if (state_->delegate->DoWork())
|
||
|
ScheduleWork();
|
||
|
state_->delegate->DoDelayedWork(&delayed_work_time_);
|
||
|
RescheduleTimer();
|
||
|
}
|
||
|
|
||
|
void MessagePumpForUI::HandleTimerMessage() {
|
||
|
KillTimer(message_window_.hwnd(), reinterpret_cast<UINT_PTR>(this));
|
||
|
|
||
|
// If we are being called outside of the context of Run, then don't do
|
||
|
// anything. This could correspond to a MessageBox call or something of
|
||
|
// that sort.
|
||
|
if (!state_)
|
||
|
return;
|
||
|
|
||
|
state_->delegate->DoDelayedWork(&delayed_work_time_);
|
||
|
RescheduleTimer();
|
||
|
}
|
||
|
|
||
|
void MessagePumpForUI::RescheduleTimer() {
|
||
|
if (delayed_work_time_.is_null())
|
||
|
return;
|
||
|
//
|
||
|
// We would *like* to provide high resolution timers. Windows timers using
|
||
|
// SetTimer() have a 10ms granularity. We have to use WM_TIMER as a wakeup
|
||
|
// mechanism because the application can enter modal windows loops where it
|
||
|
// is not running our MessageLoop; the only way to have our timers fire in
|
||
|
// these cases is to post messages there.
|
||
|
//
|
||
|
// To provide sub-10ms timers, we process timers directly from our run loop.
|
||
|
// For the common case, timers will be processed there as the run loop does
|
||
|
// its normal work. However, we *also* set the system timer so that WM_TIMER
|
||
|
// events fire. This mops up the case of timers not being able to work in
|
||
|
// modal message loops. It is possible for the SetTimer to pop and have no
|
||
|
// pending timers, because they could have already been processed by the
|
||
|
// run loop itself.
|
||
|
//
|
||
|
// We use a single SetTimer corresponding to the timer that will expire
|
||
|
// soonest. As new timers are created and destroyed, we update SetTimer.
|
||
|
// Getting a spurious SetTimer event firing is benign, as we'll just be
|
||
|
// processing an empty timer queue.
|
||
|
//
|
||
|
int delay_msec = GetCurrentDelay();
|
||
|
DCHECK_GE(delay_msec, 0);
|
||
|
if (delay_msec == 0) {
|
||
|
ScheduleWork();
|
||
|
} else {
|
||
|
if (delay_msec < USER_TIMER_MINIMUM)
|
||
|
delay_msec = USER_TIMER_MINIMUM;
|
||
|
|
||
|
// Create a WM_TIMER event that will wake us up to check for any pending
|
||
|
// timers (in case we are running within a nested, external sub-pump).
|
||
|
BOOL ret = SetTimer(message_window_.hwnd(), 0, delay_msec, nullptr);
|
||
|
if (ret)
|
||
|
return;
|
||
|
// If we can't set timers, we are in big trouble... but cross our fingers
|
||
|
// for now.
|
||
|
// TODO(jar): If we don't see this error, use a CHECK() here instead.
|
||
|
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", SET_TIMER_ERROR,
|
||
|
MESSAGE_LOOP_PROBLEM_MAX);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
bool MessagePumpForUI::ProcessNextWindowsMessage() {
|
||
|
// If there are sent messages in the queue then PeekMessage internally
|
||
|
// dispatches the message and returns false. We return true in this
|
||
|
// case to ensure that the message loop peeks again instead of calling
|
||
|
// MsgWaitForMultipleObjectsEx again.
|
||
|
bool sent_messages_in_queue = false;
|
||
|
DWORD queue_status = GetQueueStatus(QS_SENDMESSAGE);
|
||
|
if (HIWORD(queue_status) & QS_SENDMESSAGE)
|
||
|
sent_messages_in_queue = true;
|
||
|
|
||
|
MSG msg;
|
||
|
if (PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE)
|
||
|
return ProcessMessageHelper(msg);
|
||
|
|
||
|
return sent_messages_in_queue;
|
||
|
}
|
||
|
|
||
|
bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) {
|
||
|
TRACE_EVENT1("base", "MessagePumpForUI::ProcessMessageHelper",
|
||
|
"message", msg.message);
|
||
|
if (WM_QUIT == msg.message) {
|
||
|
// Receiving WM_QUIT is unusual and unexpected on most message loops.
|
||
|
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem",
|
||
|
RECEIVED_WM_QUIT_ERROR, MESSAGE_LOOP_PROBLEM_MAX);
|
||
|
// Repost the QUIT message so that it will be retrieved by the primary
|
||
|
// GetMessage() loop.
|
||
|
state_->should_quit = true;
|
||
|
PostQuitMessage(static_cast<int>(msg.wParam));
|
||
|
return false;
|
||
|
}
|
||
|
|
||
|
// While running our main message pump, we discard kMsgHaveWork messages.
|
||
|
if (msg.message == kMsgHaveWork && msg.hwnd == message_window_.hwnd())
|
||
|
return ProcessPumpReplacementMessage();
|
||
|
|
||
|
TranslateMessage(&msg);
|
||
|
DispatchMessage(&msg);
|
||
|
|
||
|
return true;
|
||
|
}
|
||
|
|
||
|
bool MessagePumpForUI::ProcessPumpReplacementMessage() {
|
||
|
// When we encounter a kMsgHaveWork message, this method is called to peek and
|
||
|
// process a replacement message. The goal is to make the kMsgHaveWork as non-
|
||
|
// intrusive as possible, even though a continuous stream of such messages are
|
||
|
// posted. This method carefully peeks a message while there is no chance for
|
||
|
// a kMsgHaveWork to be pending, then resets the |have_work_| flag (allowing a
|
||
|
// replacement kMsgHaveWork to possibly be posted), and finally dispatches
|
||
|
// that peeked replacement. Note that the re-post of kMsgHaveWork may be
|
||
|
// asynchronous to this thread!!
|
||
|
|
||
|
MSG msg;
|
||
|
const bool have_message =
|
||
|
PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE;
|
||
|
|
||
|
// Expect no message or a message different than kMsgHaveWork.
|
||
|
DCHECK(!have_message || kMsgHaveWork != msg.message ||
|
||
|
msg.hwnd != message_window_.hwnd());
|
||
|
|
||
|
// Since we discarded a kMsgHaveWork message, we must update the flag.
|
||
|
int old_work_state_ = InterlockedExchange(&work_state_, READY);
|
||
|
DCHECK_EQ(HAVE_WORK, old_work_state_);
|
||
|
|
||
|
// We don't need a special time slice if we didn't have_message to process.
|
||
|
if (!have_message)
|
||
|
return false;
|
||
|
|
||
|
// Guarantee we'll get another time slice in the case where we go into native
|
||
|
// windows code. This ScheduleWork() may hurt performance a tiny bit when
|
||
|
// tasks appear very infrequently, but when the event queue is busy, the
|
||
|
// kMsgHaveWork events get (percentage wise) rarer and rarer.
|
||
|
ScheduleWork();
|
||
|
return ProcessMessageHelper(msg);
|
||
|
}
|
||
|
|
||
|
//-----------------------------------------------------------------------------
|
||
|
// MessagePumpForIO public:
|
||
|
|
||
|
MessagePumpForIO::IOContext::IOContext() {
|
||
|
memset(&overlapped, 0, sizeof(overlapped));
|
||
|
}
|
||
|
|
||
|
MessagePumpForIO::MessagePumpForIO() {
|
||
|
port_.Set(CreateIoCompletionPort(INVALID_HANDLE_VALUE, nullptr,
|
||
|
reinterpret_cast<ULONG_PTR>(nullptr), 1));
|
||
|
DCHECK(port_.IsValid());
|
||
|
}
|
||
|
|
||
|
MessagePumpForIO::~MessagePumpForIO() = default;
|
||
|
|
||
|
void MessagePumpForIO::ScheduleWork() {
|
||
|
if (InterlockedExchange(&work_state_, HAVE_WORK) != READY)
|
||
|
return; // Someone else continued the pumping.
|
||
|
|
||
|
// Make sure the MessagePump does some work for us.
|
||
|
BOOL ret = PostQueuedCompletionStatus(port_.Get(), 0,
|
||
|
reinterpret_cast<ULONG_PTR>(this),
|
||
|
reinterpret_cast<OVERLAPPED*>(this));
|
||
|
if (ret)
|
||
|
return; // Post worked perfectly.
|
||
|
|
||
|
// See comment in MessagePumpForUI::ScheduleWork() for this error recovery.
|
||
|
InterlockedExchange(&work_state_, READY); // Clarify that we didn't succeed.
|
||
|
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", COMPLETION_POST_ERROR,
|
||
|
MESSAGE_LOOP_PROBLEM_MAX);
|
||
|
state_->schedule_work_error_count++;
|
||
|
state_->last_schedule_work_error_time = Time::Now();
|
||
|
}
|
||
|
|
||
|
void MessagePumpForIO::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
|
||
|
// We know that we can't be blocked right now since this method can only be
|
||
|
// called on the same thread as Run, so we only need to update our record of
|
||
|
// how long to sleep when we do sleep.
|
||
|
delayed_work_time_ = delayed_work_time;
|
||
|
}
|
||
|
|
||
|
void MessagePumpForIO::RegisterIOHandler(HANDLE file_handle,
|
||
|
IOHandler* handler) {
|
||
|
HANDLE port = CreateIoCompletionPort(file_handle, port_.Get(),
|
||
|
reinterpret_cast<ULONG_PTR>(handler), 1);
|
||
|
DPCHECK(port);
|
||
|
}
|
||
|
|
||
|
bool MessagePumpForIO::RegisterJobObject(HANDLE job_handle,
|
||
|
IOHandler* handler) {
|
||
|
JOBOBJECT_ASSOCIATE_COMPLETION_PORT info;
|
||
|
info.CompletionKey = handler;
|
||
|
info.CompletionPort = port_.Get();
|
||
|
return SetInformationJobObject(job_handle,
|
||
|
JobObjectAssociateCompletionPortInformation,
|
||
|
&info,
|
||
|
sizeof(info)) != FALSE;
|
||
|
}
|
||
|
|
||
|
//-----------------------------------------------------------------------------
|
||
|
// MessagePumpForIO private:
|
||
|
|
||
|
void MessagePumpForIO::DoRunLoop() {
|
||
|
for (;;) {
|
||
|
// If we do any work, we may create more messages etc., and more work may
|
||
|
// possibly be waiting in another task group. When we (for example)
|
||
|
// WaitForIOCompletion(), there is a good chance there are still more
|
||
|
// messages waiting. On the other hand, when any of these methods return
|
||
|
// having done no work, then it is pretty unlikely that calling them
|
||
|
// again quickly will find any work to do. Finally, if they all say they
|
||
|
// had no work, then it is a good time to consider sleeping (waiting) for
|
||
|
// more work.
|
||
|
|
||
|
bool more_work_is_plausible = state_->delegate->DoWork();
|
||
|
if (state_->should_quit)
|
||
|
break;
|
||
|
|
||
|
more_work_is_plausible |= WaitForIOCompletion(0, nullptr);
|
||
|
if (state_->should_quit)
|
||
|
break;
|
||
|
|
||
|
more_work_is_plausible |=
|
||
|
state_->delegate->DoDelayedWork(&delayed_work_time_);
|
||
|
if (state_->should_quit)
|
||
|
break;
|
||
|
|
||
|
if (more_work_is_plausible)
|
||
|
continue;
|
||
|
|
||
|
more_work_is_plausible = state_->delegate->DoIdleWork();
|
||
|
if (state_->should_quit)
|
||
|
break;
|
||
|
|
||
|
if (more_work_is_plausible)
|
||
|
continue;
|
||
|
|
||
|
WaitForWork(); // Wait (sleep) until we have work to do again.
|
||
|
}
|
||
|
}
|
||
|
|
||
|
// Wait until IO completes, up to the time needed by the timer manager to fire
|
||
|
// the next set of timers.
|
||
|
void MessagePumpForIO::WaitForWork() {
|
||
|
// We do not support nested IO message loops. This is to avoid messy
|
||
|
// recursion problems.
|
||
|
DCHECK_EQ(1, state_->run_depth) << "Cannot nest an IO message loop!";
|
||
|
|
||
|
int timeout = GetCurrentDelay();
|
||
|
if (timeout < 0) // Negative value means no timers waiting.
|
||
|
timeout = INFINITE;
|
||
|
|
||
|
WaitForIOCompletion(timeout, nullptr);
|
||
|
}
|
||
|
|
||
|
bool MessagePumpForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
|
||
|
IOItem item;
|
||
|
if (completed_io_.empty() || !MatchCompletedIOItem(filter, &item)) {
|
||
|
// We have to ask the system for another IO completion.
|
||
|
if (!GetIOItem(timeout, &item))
|
||
|
return false;
|
||
|
|
||
|
if (ProcessInternalIOItem(item))
|
||
|
return true;
|
||
|
}
|
||
|
|
||
|
if (filter && item.handler != filter) {
|
||
|
// Save this item for later
|
||
|
completed_io_.push_back(item);
|
||
|
} else {
|
||
|
item.handler->OnIOCompleted(item.context, item.bytes_transfered,
|
||
|
item.error);
|
||
|
}
|
||
|
return true;
|
||
|
}
|
||
|
|
||
|
// Asks the OS for another IO completion result.
|
||
|
bool MessagePumpForIO::GetIOItem(DWORD timeout, IOItem* item) {
|
||
|
memset(item, 0, sizeof(*item));
|
||
|
ULONG_PTR key = reinterpret_cast<ULONG_PTR>(nullptr);
|
||
|
OVERLAPPED* overlapped = nullptr;
|
||
|
if (!GetQueuedCompletionStatus(port_.Get(), &item->bytes_transfered, &key,
|
||
|
&overlapped, timeout)) {
|
||
|
if (!overlapped)
|
||
|
return false; // Nothing in the queue.
|
||
|
item->error = GetLastError();
|
||
|
item->bytes_transfered = 0;
|
||
|
}
|
||
|
|
||
|
item->handler = reinterpret_cast<IOHandler*>(key);
|
||
|
item->context = reinterpret_cast<IOContext*>(overlapped);
|
||
|
return true;
|
||
|
}
|
||
|
|
||
|
bool MessagePumpForIO::ProcessInternalIOItem(const IOItem& item) {
|
||
|
if (reinterpret_cast<void*>(this) == reinterpret_cast<void*>(item.context) &&
|
||
|
reinterpret_cast<void*>(this) == reinterpret_cast<void*>(item.handler)) {
|
||
|
// This is our internal completion.
|
||
|
DCHECK(!item.bytes_transfered);
|
||
|
InterlockedExchange(&work_state_, READY);
|
||
|
return true;
|
||
|
}
|
||
|
return false;
|
||
|
}
|
||
|
|
||
|
// Returns a completion item that was previously received.
|
||
|
bool MessagePumpForIO::MatchCompletedIOItem(IOHandler* filter, IOItem* item) {
|
||
|
DCHECK(!completed_io_.empty());
|
||
|
for (std::list<IOItem>::iterator it = completed_io_.begin();
|
||
|
it != completed_io_.end(); ++it) {
|
||
|
if (!filter || it->handler == filter) {
|
||
|
*item = *it;
|
||
|
completed_io_.erase(it);
|
||
|
return true;
|
||
|
}
|
||
|
}
|
||
|
return false;
|
||
|
}
|
||
|
|
||
|
} // namespace base
|