[DEV] add v1.66.0

This commit is contained in:
2018-01-12 21:47:58 +01:00
parent 87059bb1af
commit a97e9ae7d4
49032 changed files with 7668950 additions and 0 deletions

View File

@@ -0,0 +1,50 @@
#
# Copyright (c) 2003-2017 Christopher M. Kohlhoff (chris at kohlhoff dot com)
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#
import os ;
if [ os.name ] = SOLARIS
{
lib socket ;
lib nsl ;
}
else if [ os.name ] = NT
{
lib ws2_32 ;
lib mswsock ;
}
else if [ os.name ] = HPUX
{
lib ipv6 ;
}
else if [ os.name ] = HAIKU
{
lib network ;
}
project
: requirements
<library>/boost/system//boost_system
<define>BOOST_ALL_NO_LIB=1
<threading>multi
<os>SOLARIS:<library>socket
<os>SOLARIS:<library>nsl
<os>NT:<define>_WIN32_WINNT=0x0501
<os>NT,<toolset>gcc:<library>ws2_32
<os>NT,<toolset>gcc:<library>mswsock
<os>NT,<toolset>gcc-cygwin:<define>__USE_W32_SOCKETS
<os>HPUX,<toolset>gcc:<define>_XOPEN_SOURCE_EXTENDED
<os>HPUX:<library>ipv6
<os>HAIKU:<library>network
;
exe actor : actor.cpp ;
exe bank_account_1 : bank_account_1.cpp ;
exe bank_account_2 : bank_account_2.cpp ;
exe fork_join : fork_join.cpp ;
exe pipeline : pipeline.cpp ;
exe priority_scheduler : priority_scheduler.cpp ;

View File

@@ -0,0 +1,286 @@
#include <boost/asio/defer.hpp>
#include <boost/asio/executor.hpp>
#include <boost/asio/post.hpp>
#include <boost/asio/strand.hpp>
#include <boost/asio/system_executor.hpp>
#include <condition_variable>
#include <deque>
#include <memory>
#include <mutex>
#include <typeinfo>
#include <vector>
using boost::asio::defer;
using boost::asio::executor;
using boost::asio::post;
using boost::asio::strand;
using boost::asio::system_executor;
//------------------------------------------------------------------------------
// A tiny actor framework
// ~~~~~~~~~~~~~~~~~~~~~~
class actor;
// Used to identify the sender and recipient of messages.
typedef actor* actor_address;
// Base class for all registered message handlers.
class message_handler_base
{
public:
virtual ~message_handler_base() {}
// Used to determine which message handlers receive an incoming message.
virtual const std::type_info& message_id() const = 0;
};
// Base class for a handler for a specific message type.
template <class Message>
class message_handler : public message_handler_base
{
public:
// Handle an incoming message.
virtual void handle_message(Message msg, actor_address from) = 0;
};
// Concrete message handler for a specific message type.
template <class Actor, class Message>
class mf_message_handler : public message_handler<Message>
{
public:
// Construct a message handler to invoke the specified member function.
mf_message_handler(void (Actor::* mf)(Message, actor_address), Actor* a)
: function_(mf), actor_(a)
{
}
// Used to determine which message handlers receive an incoming message.
virtual const std::type_info& message_id() const
{
return typeid(Message);
}
// Handle an incoming message.
virtual void handle_message(Message msg, actor_address from)
{
(actor_->*function_)(std::move(msg), from);
}
// Determine whether the message handler represents the specified function.
bool is_function(void (Actor::* mf)(Message, actor_address)) const
{
return mf == function_;
}
private:
void (Actor::* function_)(Message, actor_address);
Actor* actor_;
};
// Base class for all actors.
class actor
{
public:
virtual ~actor()
{
}
// Obtain the actor's address for use as a message sender or recipient.
actor_address address()
{
return this;
}
// Send a message from one actor to another.
template <class Message>
friend void send(Message msg, actor_address from, actor_address to)
{
// Execute the message handler in the context of the target's executor.
post(to->executor_,
[=]
{
to->call_handler(std::move(msg), from);
});
}
protected:
// Construct the actor to use the specified executor for all message handlers.
actor(executor e)
: executor_(std::move(e))
{
}
// Register a handler for a specific message type. Duplicates are permitted.
template <class Actor, class Message>
void register_handler(void (Actor::* mf)(Message, actor_address))
{
handlers_.push_back(
std::make_shared<mf_message_handler<Actor, Message>>(
mf, static_cast<Actor*>(this)));
}
// Deregister a handler. Removes only the first matching handler.
template <class Actor, class Message>
void deregister_handler(void (Actor::* mf)(Message, actor_address))
{
const std::type_info& id = typeid(message_handler<Message>);
for (auto iter = handlers_.begin(); iter != handlers_.end(); ++iter)
{
if ((*iter)->message_id() == id)
{
auto mh = static_cast<mf_message_handler<Actor, Message>*>(iter->get());
if (mh->is_function(mf))
{
handlers_.erase(iter);
return;
}
}
}
}
// Send a message from within a message handler.
template <class Message>
void tail_send(Message msg, actor_address to)
{
// Execute the message handler in the context of the target's executor.
actor* from = this;
defer(to->executor_,
[=]
{
to->call_handler(std::move(msg), from);
});
}
private:
// Find the matching message handlers, if any, and call them.
template <class Message>
void call_handler(Message msg, actor_address from)
{
const std::type_info& message_id = typeid(Message);
for (auto& h: handlers_)
{
if (h->message_id() == message_id)
{
auto mh = static_cast<message_handler<Message>*>(h.get());
mh->handle_message(msg, from);
}
}
}
// All messages associated with a single actor object should be processed
// non-concurrently. We use a strand to ensure non-concurrent execution even
// if the underlying executor may use multiple threads.
strand<executor> executor_;
std::vector<std::shared_ptr<message_handler_base>> handlers_;
};
// A concrete actor that allows synchronous message retrieval.
template <class Message>
class receiver : public actor
{
public:
receiver()
: actor(system_executor())
{
register_handler(&receiver::message_handler);
}
// Block until a message has been received.
Message wait()
{
std::unique_lock<std::mutex> lock(mutex_);
condition_.wait(lock, [this]{ return !message_queue_.empty(); });
Message msg(std::move(message_queue_.front()));
message_queue_.pop_front();
return msg;
}
private:
// Handle a new message by adding it to the queue and waking a waiter.
void message_handler(Message msg, actor_address /* from */)
{
std::lock_guard<std::mutex> lock(mutex_);
message_queue_.push_back(std::move(msg));
condition_.notify_one();
}
std::mutex mutex_;
std::condition_variable condition_;
std::deque<Message> message_queue_;
};
//------------------------------------------------------------------------------
#include <boost/asio/thread_pool.hpp>
#include <iostream>
using boost::asio::thread_pool;
class member : public actor
{
public:
explicit member(executor e)
: actor(std::move(e))
{
register_handler(&member::init_handler);
}
private:
void init_handler(actor_address next, actor_address from)
{
next_ = next;
caller_ = from;
register_handler(&member::token_handler);
deregister_handler(&member::init_handler);
}
void token_handler(int token, actor_address /*from*/)
{
int msg(token);
actor_address to(caller_);
if (token > 0)
{
msg = token - 1;
to = next_;
}
tail_send(msg, to);
}
actor_address next_;
actor_address caller_;
};
int main()
{
const std::size_t num_threads = 16;
const int num_hops = 50000000;
const std::size_t num_actors = 503;
const int token_value = (num_hops + num_actors - 1) / num_actors;
const std::size_t actors_per_thread = num_actors / num_threads;
struct single_thread_pool : thread_pool { single_thread_pool() : thread_pool(1) {} };
single_thread_pool pools[num_threads];
std::vector<std::shared_ptr<member>> members(num_actors);
receiver<int> rcvr;
// Create the member actors.
for (std::size_t i = 0; i < num_actors; ++i)
members[i] = std::make_shared<member>(pools[(i / actors_per_thread) % num_threads].get_executor());
// Initialise the actors by passing each one the address of the next actor in the ring.
for (std::size_t i = num_actors, next_i = 0; i > 0; next_i = --i)
send(members[next_i]->address(), rcvr.address(), members[i - 1]->address());
// Send exactly one token to each actor, all with the same initial value, rounding up if required.
for (std::size_t i = 0; i < num_actors; ++i)
send(token_value, rcvr.address(), members[i]->address());
// Wait for all signal messages, indicating the tokens have all reached zero.
for (std::size_t i = 0; i < num_actors; ++i)
rcvr.wait();
}

View File

@@ -0,0 +1,54 @@
#include <boost/asio/post.hpp>
#include <boost/asio/thread_pool.hpp>
#include <iostream>
using boost::asio::post;
using boost::asio::thread_pool;
// Traditional active object pattern.
// Member functions do not block.
class bank_account
{
int balance_ = 0;
mutable thread_pool pool_{1};
public:
void deposit(int amount)
{
post(pool_, [=]
{
balance_ += amount;
});
}
void withdraw(int amount)
{
post(pool_, [=]
{
if (balance_ >= amount)
balance_ -= amount;
});
}
void print_balance() const
{
post(pool_, [=]
{
std::cout << "balance = " << balance_ << "\n";
});
}
~bank_account()
{
pool_.join();
}
};
int main()
{
bank_account acct;
acct.deposit(20);
acct.withdraw(10);
acct.print_balance();
}

View File

@@ -0,0 +1,54 @@
#include <boost/asio/post.hpp>
#include <boost/asio/thread_pool.hpp>
#include <boost/asio/use_future.hpp>
#include <iostream>
using boost::asio::post;
using boost::asio::thread_pool;
using boost::asio::use_future;
// Traditional active object pattern.
// Member functions block until operation is finished.
class bank_account
{
int balance_ = 0;
mutable thread_pool pool_{1};
public:
void deposit(int amount)
{
post(pool_,
use_future([=]
{
balance_ += amount;
})).get();
}
void withdraw(int amount)
{
post(pool_,
use_future([=]
{
if (balance_ >= amount)
balance_ -= amount;
})).get();
}
int balance() const
{
return post(pool_,
use_future([=]
{
return balance_;
})).get();
}
};
int main()
{
bank_account acct;
acct.deposit(20);
acct.withdraw(10);
std::cout << "balance = " << acct.balance() << "\n";
}

View File

@@ -0,0 +1,327 @@
#include <boost/asio/dispatch.hpp>
#include <boost/asio/execution_context.hpp>
#include <boost/asio/thread_pool.hpp>
#include <condition_variable>
#include <memory>
#include <mutex>
#include <queue>
#include <thread>
using boost::asio::dispatch;
using boost::asio::execution_context;
using boost::asio::thread_pool;
// A fixed-size thread pool used to implement fork/join semantics. Functions
// are scheduled using a simple FIFO queue. Implementing work stealing, or
// using a queue based on atomic operations, are left as tasks for the reader.
class fork_join_pool : public execution_context
{
public:
// The constructor starts a thread pool with the specified number of threads.
// Note that the thread_count is not a fixed limit on the pool's concurrency.
// Additional threads may temporarily be added to the pool if they join a
// fork_executor.
explicit fork_join_pool(
std::size_t thread_count = std::thread::hardware_concurrency() * 2)
: use_count_(1),
threads_(thread_count)
{
try
{
// Ask each thread in the pool to dequeue and execute functions until
// it is time to shut down, i.e. the use count is zero.
for (thread_count_ = 0; thread_count_ < thread_count; ++thread_count_)
{
dispatch(threads_, [&]
{
std::unique_lock<std::mutex> lock(mutex_);
while (use_count_ > 0)
if (!execute_next(lock))
condition_.wait(lock);
});
}
}
catch (...)
{
stop_threads();
threads_.join();
throw;
}
}
// The destructor waits for the pool to finish executing functions.
~fork_join_pool()
{
stop_threads();
threads_.join();
}
private:
friend class fork_executor;
// The base for all functions that are queued in the pool.
struct function_base
{
std::shared_ptr<std::size_t> work_count_;
void (*execute_)(std::shared_ptr<function_base>& p);
};
// Execute the next function from the queue, if any. Returns true if a
// function was executed, and false if the queue was empty.
bool execute_next(std::unique_lock<std::mutex>& lock)
{
if (queue_.empty())
return false;
auto p(queue_.front());
queue_.pop();
lock.unlock();
execute(lock, p);
return true;
}
// Execute a function and decrement the outstanding work.
void execute(std::unique_lock<std::mutex>& lock,
std::shared_ptr<function_base>& p)
{
std::shared_ptr<std::size_t> work_count(std::move(p->work_count_));
try
{
p->execute_(p);
lock.lock();
do_work_finished(work_count);
}
catch (...)
{
lock.lock();
do_work_finished(work_count);
throw;
}
}
// Increment outstanding work.
void do_work_started(const std::shared_ptr<std::size_t>& work_count) noexcept
{
if (++(*work_count) == 1)
++use_count_;
}
// Decrement outstanding work. Notify waiting threads if we run out.
void do_work_finished(const std::shared_ptr<std::size_t>& work_count) noexcept
{
if (--(*work_count) == 0)
{
--use_count_;
condition_.notify_all();
}
}
// Dispatch a function, executing it immediately if the queue is already
// loaded. Otherwise adds the function to the queue and wakes a thread.
void do_dispatch(std::shared_ptr<function_base> p,
const std::shared_ptr<std::size_t>& work_count)
{
std::unique_lock<std::mutex> lock(mutex_);
if (queue_.size() > thread_count_ * 16)
{
do_work_started(work_count);
lock.unlock();
execute(lock, p);
}
else
{
queue_.push(p);
do_work_started(work_count);
condition_.notify_one();
}
}
// Add a function to the queue and wake a thread.
void do_post(std::shared_ptr<function_base> p,
const std::shared_ptr<std::size_t>& work_count)
{
std::lock_guard<std::mutex> lock(mutex_);
queue_.push(p);
do_work_started(work_count);
condition_.notify_one();
}
// Ask all threads to shut down.
void stop_threads()
{
std::lock_guard<std::mutex> lock(mutex_);
--use_count_;
condition_.notify_all();
}
std::mutex mutex_;
std::condition_variable condition_;
std::queue<std::shared_ptr<function_base>> queue_;
std::size_t use_count_;
std::size_t thread_count_;
thread_pool threads_;
};
// A class that satisfies the Executor requirements. Every function or piece of
// work associated with a fork_executor is part of a single, joinable group.
class fork_executor
{
public:
fork_executor(fork_join_pool& ctx)
: context_(ctx),
work_count_(std::make_shared<std::size_t>(0))
{
}
fork_join_pool& context() const noexcept
{
return context_;
}
void on_work_started() const noexcept
{
std::lock_guard<std::mutex> lock(context_.mutex_);
context_.do_work_started(work_count_);
}
void on_work_finished() const noexcept
{
std::lock_guard<std::mutex> lock(context_.mutex_);
context_.do_work_finished(work_count_);
}
template <class Func, class Alloc>
void dispatch(Func&& f, const Alloc& a) const
{
auto p(std::allocate_shared<function<Func>>(
typename std::allocator_traits<Alloc>::template rebind_alloc<char>(a),
std::move(f), work_count_));
context_.do_dispatch(p, work_count_);
}
template <class Func, class Alloc>
void post(Func f, const Alloc& a) const
{
auto p(std::allocate_shared<function<Func>>(
typename std::allocator_traits<Alloc>::template rebind_alloc<char>(a),
std::move(f), work_count_));
context_.do_post(p, work_count_);
}
template <class Func, class Alloc>
void defer(Func&& f, const Alloc& a) const
{
post(std::forward<Func>(f), a);
}
friend bool operator==(const fork_executor& a,
const fork_executor& b) noexcept
{
return a.work_count_ == b.work_count_;
}
friend bool operator!=(const fork_executor& a,
const fork_executor& b) noexcept
{
return a.work_count_ != b.work_count_;
}
// Block until all work associated with the executor is complete. While it is
// waiting, the thread may be borrowed to execute functions from the queue.
void join() const
{
std::unique_lock<std::mutex> lock(context_.mutex_);
while (*work_count_ > 0)
if (!context_.execute_next(lock))
context_.condition_.wait(lock);
}
private:
template <class Func>
struct function : fork_join_pool::function_base
{
explicit function(Func f, const std::shared_ptr<std::size_t>& w)
: function_(std::move(f))
{
work_count_ = w;
execute_ = [](std::shared_ptr<fork_join_pool::function_base>& p)
{
Func tmp(std::move(static_cast<function*>(p.get())->function_));
p.reset();
tmp();
};
}
Func function_;
};
fork_join_pool& context_;
std::shared_ptr<std::size_t> work_count_;
};
// Helper class to automatically join a fork_executor when exiting a scope.
class join_guard
{
public:
explicit join_guard(const fork_executor& ex) : ex_(ex) {}
join_guard(const join_guard&) = delete;
join_guard(join_guard&&) = delete;
~join_guard() { ex_.join(); }
private:
fork_executor ex_;
};
//------------------------------------------------------------------------------
#include <algorithm>
#include <iostream>
#include <random>
#include <vector>
fork_join_pool pool;
template <class Iterator>
void fork_join_sort(Iterator begin, Iterator end)
{
std::size_t n = end - begin;
if (n > 32768)
{
{
fork_executor fork(pool);
join_guard join(fork);
dispatch(fork, [=]{ fork_join_sort(begin, begin + n / 2); });
dispatch(fork, [=]{ fork_join_sort(begin + n / 2, end); });
}
std::inplace_merge(begin, begin + n / 2, end);
}
else
{
std::sort(begin, end);
}
}
int main(int argc, char* argv[])
{
if (argc != 2)
{
std::cerr << "Usage: fork_join <size>\n";
return 1;
}
std::vector<double> vec(std::atoll(argv[1]));
std::iota(vec.begin(), vec.end(), 0);
std::random_device rd;
std::mt19937 g(rd());
std::shuffle(vec.begin(), vec.end(), g);
std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
fork_join_sort(vec.begin(), vec.end());
std::chrono::steady_clock::duration elapsed = std::chrono::steady_clock::now() - start;
std::cout << "sort took ";
std::cout << std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count();
std::cout << " microseconds" << std::endl;
}

View File

@@ -0,0 +1,298 @@
#include <boost/asio/associated_executor.hpp>
#include <boost/asio/bind_executor.hpp>
#include <boost/asio/execution_context.hpp>
#include <boost/asio/post.hpp>
#include <boost/asio/system_executor.hpp>
#include <boost/asio/use_future.hpp>
#include <condition_variable>
#include <future>
#include <memory>
#include <mutex>
#include <queue>
#include <thread>
#include <vector>
using boost::asio::execution_context;
using boost::asio::executor_binder;
using boost::asio::get_associated_executor;
using boost::asio::post;
using boost::asio::system_executor;
using boost::asio::use_future;
using boost::asio::use_service;
// An executor that launches a new thread for each function submitted to it.
// This class satisfies the Executor requirements.
class thread_executor
{
private:
// Service to track all threads started through a thread_executor.
class thread_bag : public execution_context::service
{
public:
typedef thread_bag key_type;
explicit thread_bag(execution_context& ctx)
: execution_context::service(ctx)
{
}
void add_thread(std::thread&& t)
{
std::unique_lock<std::mutex> lock(mutex_);
threads_.push_back(std::move(t));
}
private:
virtual void shutdown()
{
for (auto& t : threads_)
t.join();
}
std::mutex mutex_;
std::vector<std::thread> threads_;
};
public:
execution_context& context() const noexcept
{
return system_executor().context();
}
void on_work_started() const noexcept
{
// This executor doesn't count work.
}
void on_work_finished() const noexcept
{
// This executor doesn't count work.
}
template <class Func, class Alloc>
void dispatch(Func&& f, const Alloc& a) const
{
post(std::forward<Func>(f), a);
}
template <class Func, class Alloc>
void post(Func f, const Alloc&) const
{
thread_bag& bag = use_service<thread_bag>(context());
bag.add_thread(std::thread(std::move(f)));
}
template <class Func, class Alloc>
void defer(Func&& f, const Alloc& a) const
{
post(std::forward<Func>(f), a);
}
friend bool operator==(const thread_executor&,
const thread_executor&) noexcept
{
return true;
}
friend bool operator!=(const thread_executor&,
const thread_executor&) noexcept
{
return false;
}
};
// Base class for all thread-safe queue implementations.
class queue_impl_base
{
template <class> friend class queue_front;
template <class> friend class queue_back;
std::mutex mutex_;
std::condition_variable condition_;
bool stop_ = false;
};
// Underlying implementation of a thread-safe queue, shared between the
// queue_front and queue_back classes.
template <class T>
class queue_impl : public queue_impl_base
{
template <class> friend class queue_front;
template <class> friend class queue_back;
std::queue<T> queue_;
};
// The front end of a queue between consecutive pipeline stages.
template <class T>
class queue_front
{
public:
typedef T value_type;
explicit queue_front(std::shared_ptr<queue_impl<T>> impl)
: impl_(impl)
{
}
void push(T t)
{
std::unique_lock<std::mutex> lock(impl_->mutex_);
impl_->queue_.push(std::move(t));
impl_->condition_.notify_one();
}
void stop()
{
std::unique_lock<std::mutex> lock(impl_->mutex_);
impl_->stop_ = true;
impl_->condition_.notify_one();
}
private:
std::shared_ptr<queue_impl<T>> impl_;
};
// The back end of a queue between consecutive pipeline stages.
template <class T>
class queue_back
{
public:
typedef T value_type;
explicit queue_back(std::shared_ptr<queue_impl<T>> impl)
: impl_(impl)
{
}
bool pop(T& t)
{
std::unique_lock<std::mutex> lock(impl_->mutex_);
while (impl_->queue_.empty() && !impl_->stop_)
impl_->condition_.wait(lock);
if (!impl_->queue_.empty())
{
t = impl_->queue_.front();
impl_->queue_.pop();
return true;
}
return false;
}
private:
std::shared_ptr<queue_impl<T>> impl_;
};
// Launch the last stage in a pipeline.
template <class T, class F>
std::future<void> pipeline(queue_back<T> in, F f)
{
// Get the function's associated executor, defaulting to thread_executor.
auto ex = get_associated_executor(f, thread_executor());
// Run the function, and as we're the last stage return a future so that the
// caller can wait for the pipeline to finish.
return post(ex, use_future([in, f]() mutable { f(in); }));
}
// Launch an intermediate stage in a pipeline.
template <class T, class F, class... Tail>
std::future<void> pipeline(queue_back<T> in, F f, Tail... t)
{
// Determine the output queue type.
typedef typename executor_binder<F, thread_executor>::second_argument_type::value_type output_value_type;
// Create the output queue and its implementation.
auto out_impl = std::make_shared<queue_impl<output_value_type>>();
queue_front<output_value_type> out(out_impl);
queue_back<output_value_type> next_in(out_impl);
// Get the function's associated executor, defaulting to thread_executor.
auto ex = get_associated_executor(f, thread_executor());
// Run the function.
post(ex, [in, out, f]() mutable
{
f(in, out);
out.stop();
});
// Launch the rest of the pipeline.
return pipeline(next_in, std::move(t)...);
}
// Launch the first stage in a pipeline.
template <class F, class... Tail>
std::future<void> pipeline(F f, Tail... t)
{
// Determine the output queue type.
typedef typename executor_binder<F, thread_executor>::argument_type::value_type output_value_type;
// Create the output queue and its implementation.
auto out_impl = std::make_shared<queue_impl<output_value_type>>();
queue_front<output_value_type> out(out_impl);
queue_back<output_value_type> next_in(out_impl);
// Get the function's associated executor, defaulting to thread_executor.
auto ex = get_associated_executor(f, thread_executor());
// Run the function.
post(ex, [out, f]() mutable
{
f(out);
out.stop();
});
// Launch the rest of the pipeline.
return pipeline(next_in, std::move(t)...);
}
//------------------------------------------------------------------------------
#include <boost/asio/thread_pool.hpp>
#include <iostream>
#include <string>
using boost::asio::bind_executor;
using boost::asio::thread_pool;
void reader(queue_front<std::string> out)
{
std::string line;
while (std::getline(std::cin, line))
out.push(line);
}
void filter(queue_back<std::string> in, queue_front<std::string> out)
{
std::string line;
while (in.pop(line))
if (line.length() > 5)
out.push(line);
}
void upper(queue_back<std::string> in, queue_front<std::string> out)
{
std::string line;
while (in.pop(line))
{
std::string new_line;
for (char c : line)
new_line.push_back(std::toupper(c));
out.push(new_line);
}
}
void writer(queue_back<std::string> in)
{
std::size_t count = 0;
std::string line;
while (in.pop(line))
std::cout << count++ << ": " << line << std::endl;
}
int main()
{
thread_pool pool;
auto f = pipeline(reader, filter, bind_executor(pool, upper), writer);
f.wait();
}

View File

@@ -0,0 +1,168 @@
#include <boost/asio/dispatch.hpp>
#include <boost/asio/execution_context.hpp>
#include <condition_variable>
#include <iostream>
#include <memory>
#include <mutex>
#include <queue>
using boost::asio::dispatch;
using boost::asio::execution_context;
class priority_scheduler : public execution_context
{
public:
// A class that satisfies the Executor requirements.
class executor_type
{
public:
executor_type(priority_scheduler& ctx, int pri) noexcept
: context_(ctx), priority_(pri)
{
}
priority_scheduler& context() const noexcept
{
return context_;
}
void on_work_started() const noexcept
{
// This executor doesn't count work. Instead, the scheduler simply runs
// until explicitly stopped.
}
void on_work_finished() const noexcept
{
// This executor doesn't count work. Instead, the scheduler simply runs
// until explicitly stopped.
}
template <class Func, class Alloc>
void dispatch(Func&& f, const Alloc& a) const
{
post(std::forward<Func>(f), a);
}
template <class Func, class Alloc>
void post(Func f, const Alloc& a) const
{
auto p(std::allocate_shared<item<Func>>(
typename std::allocator_traits<
Alloc>::template rebind_alloc<char>(a),
priority_, std::move(f)));
std::lock_guard<std::mutex> lock(context_.mutex_);
context_.queue_.push(p);
context_.condition_.notify_one();
}
template <class Func, class Alloc>
void defer(Func&& f, const Alloc& a) const
{
post(std::forward<Func>(f), a);
}
friend bool operator==(const executor_type& a,
const executor_type& b) noexcept
{
return &a.context_ == &b.context_;
}
friend bool operator!=(const executor_type& a,
const executor_type& b) noexcept
{
return &a.context_ != &b.context_;
}
private:
priority_scheduler& context_;
int priority_;
};
executor_type get_executor(int pri = 0) noexcept
{
return executor_type(*const_cast<priority_scheduler*>(this), pri);
}
void run()
{
std::unique_lock<std::mutex> lock(mutex_);
for (;;)
{
condition_.wait(lock, [&]{ return stopped_ || !queue_.empty(); });
if (stopped_)
return;
auto p(queue_.top());
queue_.pop();
lock.unlock();
p->execute_(p);
lock.lock();
}
}
void stop()
{
std::lock_guard<std::mutex> lock(mutex_);
stopped_ = true;
condition_.notify_all();
}
private:
struct item_base
{
int priority_;
void (*execute_)(std::shared_ptr<item_base>&);
};
template <class Func>
struct item : item_base
{
item(int pri, Func f) : function_(std::move(f))
{
priority_ = pri;
execute_ = [](std::shared_ptr<item_base>& p)
{
Func tmp(std::move(static_cast<item*>(p.get())->function_));
p.reset();
tmp();
};
}
Func function_;
};
struct item_comp
{
bool operator()(
const std::shared_ptr<item_base>& a,
const std::shared_ptr<item_base>& b)
{
return a->priority_ < b->priority_;
}
};
std::mutex mutex_;
std::condition_variable condition_;
std::priority_queue<
std::shared_ptr<item_base>,
std::vector<std::shared_ptr<item_base>>,
item_comp> queue_;
bool stopped_ = false;
};
int main()
{
priority_scheduler sched;
auto low = sched.get_executor(0);
auto med = sched.get_executor(1);
auto high = sched.get_executor(2);
dispatch(low, []{ std::cout << "1\n"; });
dispatch(low, []{ std::cout << "11\n"; });
dispatch(med, []{ std::cout << "2\n"; });
dispatch(med, []{ std::cout << "22\n"; });
dispatch(high, []{ std::cout << "3\n"; });
dispatch(high, []{ std::cout << "33\n"; });
dispatch(high, []{ std::cout << "333\n"; });
dispatch(sched.get_executor(-1), [&]{ sched.stop(); });
sched.run();
}