/*  $Id: thread_pool.cpp 99785 2023-05-10 17:50:50Z vasilche $
* ===========================================================================
*
*                            PUBLIC DOMAIN NOTICE
*               National Center for Biotechnology Information
*
*  This software/database is a "United States Government Work" under the
*  terms of the United States Copyright Act.  It was written as part of
*  the author's official duties as a United States Government employee and
*  thus cannot be copyrighted.  This software/database is freely available
*  to the public for use. The National Library of Medicine and the U.S.
*  Government have not placed any restriction on its use or reproduction.
*
*  Although all reasonable efforts have been taken to ensure the accuracy
*  and reliability of the software and data, the NLM and the U.S.
*  Government do not and cannot warrant the performance or results that
*  may be obtained by using this software or data. The NLM and the U.S.
*  Government disclaim all warranties, express or implied, including
*  warranties of performance, merchantability or fitness for any particular
*  purpose.
*
*  Please cite the author in any work or product based on this material.
*
* ===========================================================================
*
* Author:  Pavel Ivanov, Denis Vakatov
*
* File Description:
*   Pool of threads.
*/

#include <ncbi_pch.hpp>
#include <util/thread_pool.hpp>
#include <util/thread_pool_ctrl.hpp>
#include <util/sync_queue.hpp>
#include <util/error_codes.hpp>

#define NCBI_USE_ERRCODE_X  Util_Thread

BEGIN_NCBI_SCOPE


class CThreadPool_Guard;
class CThreadPool_ServiceThread;


/// Functor to compare tasks by priority
struct SThreadPool_TaskCompare {
    bool operator() (const CRef<CThreadPool_Task>& left,
                     const CRef<CThreadPool_Task>& right) const
    {
        return left->GetPriority() < right->GetPriority();
    }
};


/// Real implementation of all ThreadPool functions
class CThreadPool_Impl : public CObject
{
public:
    typedef CThreadPool::TExclusiveFlags  TExclusiveFlags;

    /// Convert pointer to CThreadPool object into pointer to CThreadPool_Impl
    /// object. Can be done only here to avoid excessive friendship to
    /// CThreadPool class.
    static CThreadPool_Impl* s_GetImplPointer(CThreadPool* pool);

    /// Call x_SetTaskStatus() for the given task.
    /// Method introduced to avoid excessive friendship to CThreadPool_Task
    /// class.
    ///
    /// @sa CThreadPool_Task::x_SetTaskStatus()
    static void sx_SetTaskStatus(CThreadPool_Task*          task,
                                 CThreadPool_Task::EStatus  status);

    /// Call x_RequestToCancel() for the given task.
    /// Method introduced to avoid excessive friendship to CThreadPool_Task
    /// class.
    ///
    /// @sa CThreadPool_Task::x_RequestToCancel()
    static void sx_RequestToCancel(CThreadPool_Task* task);


    /// Constructor with default controller
    /// @param pool_intf
    ///   ThreadPool interface object attached to this implementation
    ///
    /// @sa CThreadPool::CThreadPool()
    CThreadPool_Impl(CThreadPool*      pool_intf,
                     unsigned int      queue_size,
                     unsigned int      max_threads,
                     unsigned int      min_threads,
                     CThread::TRunMode threads_mode = CThread::fRunDefault);

    /// Constructor with explicitly given controller
    /// @param pool_intf
    ///   ThreadPool interface object attached to this implementation
    ///
    /// @sa CThreadPool::CThreadPool()
    CThreadPool_Impl(CThreadPool*        pool_intf,
                     unsigned int        queue_size,
                     CThreadPool_Controller* controller,
                     CThread::TRunMode   threads_mode = CThread::fRunDefault);

    /// Get pointer to ThreadPool interface object
    CThreadPool* GetPoolInterface(void) const;

    /// Set destroy timeout for the pool
    ///
    /// @sa CThreadPool::SetDestroyTimeout()
    void SetDestroyTimeout(const CTimeSpan& timeout);

    /// Get destroy timeout for the pool
    ///
    /// @sa CThreadPool::GetDestroyTimeout()
    const CTimeSpan& GetDestroyTimeout(void) const;

    /// Destroy reference to this object
    /// Method is called when CThreadPool object is destroyed which means
    /// that implementation can be destroyed too if there is no references
    /// to it left.
    void DestroyReference(void);

    /// Get main pool mutex
    ///
    /// @sa CThreadPool::GetMainPoolMutex()
    CMutex& GetMainPoolMutex(void);

    /// Add task to the pool
    ///
    /// @sa CThreadPool::AddTask()
    void AddTask(CThreadPool_Task* task, const CTimeSpan* timeout);

    /// Request to cancel the task
    ///
    /// @sa CThreadPool::CancelTask()
    void CancelTask(CThreadPool_Task* task);

    /// Cancel the selected groups of tasks in the pool
    ///
    /// @sa CThreadPool::CancelTasks()
    void CancelTasks(TExclusiveFlags tasks_group);

    /// Add the task for exclusive execution in the pool
    ///
    /// @sa CThreadPool::RequestExclusiveExecution()
    void RequestExclusiveExecution(CThreadPool_Task*  task,
                                   TExclusiveFlags    flags);

    /// Launch new threads in pool
    /// @param count
    ///   Number of threads to launch
    void LaunchThreads(unsigned int count);

    /// Finish threads in pool
    /// Stop first all idle threads then stop busy threads without
    /// cancelation of currently executing tasks.
    /// @param count
    ///   Number of threads to finish
    void FinishThreads(unsigned int count);

    /// Get number of threads running in the pool
    unsigned int GetThreadsCount(void) const;

    /// Mark thread as idle or non-idle
    /// @param thread
    ///   Thread to mark
    /// @param is_idle
    ///   If thread should be marked as idle or not
    bool SetThreadIdle(CThreadPool_ThreadImpl* thread, bool is_idle);

    /// Callback from working thread when it finished its Main() method
    void ThreadStopped(CThreadPool_ThreadImpl* thread);

    /// Callback when some thread changed its idleness or finished
    /// (including service thread)
    void ThreadStateChanged(void);

    /// Get next task from queue if there is one
    /// If the queue is empty then return NULL.
    CRef<CThreadPool_Task> TryGetNextTask(void);

    /// Callback from thread when it is starting to execute task
    void TaskStarting(void);

    /// Callback from thread when it has finished to execute task
    void TaskFinished(void);

    /// Get the number of tasks currently waiting in queue
    unsigned int GetQueuedTasksCount(void) const;

    /// Get the number of currently executing tasks
    unsigned int GetExecutingTasksCount(void) const;

    /// Type for storing information about exclusive task launching
    struct SExclusiveTaskInfo {
        TExclusiveFlags         flags;
        CRef<CThreadPool_Task>  task;
        SExclusiveTaskInfo(TExclusiveFlags f, CRef<CThreadPool_Task> t)
            : flags(f), task(t) {}
    };

    /// Get the next exclusive task to execute
    SExclusiveTaskInfo TryGetExclusiveTask(void);

    /// Request suspension of the pool
    /// @param flags
    ///   Parameters for necessary exclusive execution environment
    void RequestSuspend(TExclusiveFlags flags);

    /// Resume the pool operation after exclusive task execution
    void ResumeWork(void);

    /// Check if the pool is suspended for exclusive execution
    bool IsSuspended(void) const;

    /// Check if it is already allowed to execute exclusive task
    bool CanDoExclusiveTask(void) const;

    /// Abort the pool operation
    ///
    /// @sa CThreadPool::Abort()
    void Abort(const CTimeSpan* timeout);

    /// Check if the pool is already aborted
    bool IsAborted(void) const;

    /// Finish all current threads and replace them with new ones
    ///
    /// @sa CThreadPool::FlushThreads()
    void FlushThreads(CThreadPool::EFlushType flush_type);

    /// Call the CThreadPool_Controller::HandleEvent() method of the pool
    /// controller with the given event type. If ThreadPool is already aborted
    /// and controller is reset then do nothing.
    void CallController(CThreadPool_Controller::EEvent event);

    /// Schedule running of CThreadPool_Controller::HandleEvent() with eOther
    /// event type
    void CallControllerOther(void);

    /// Call the CThreadPool_Controller::GetSafeSleepTime() method of the pool
    /// controller. If ThreadPool is already aborted and controller is reset
    /// then return time period of 1 second.
    CTimeSpan GetSafeSleepTime(void) const;

    /// Mark that initialization of the interface was finished
    void SetInterfaceStarted(void);


private:
    /// Type of queue used for storing tasks
    typedef CSyncQueue< CRef<CThreadPool_Task>,
                        CSyncQueue_multiset< CRef<CThreadPool_Task>,
                                             SThreadPool_TaskCompare > >
            TQueue;
    /// Type of queue used for storing information about exclusive tasks
    typedef CSyncQueue<SExclusiveTaskInfo>                 TExclusiveQueue;
    /// Type of list of all poolled threads
    typedef set<CThreadPool_ThreadImpl*> TThreadsList;


    /// Prohibit copying and assigning
    CThreadPool_Impl(const CThreadPool_Impl&);
    CThreadPool_Impl& operator= (const CThreadPool_Impl&);

    /// Transform size of queue given in constructor to the size passed to
    /// CSyncQueue constructor.
    /// Method can be called only from constructor because it initializes
    /// value of m_IsQueueAllowed member variable.
    unsigned int x_GetQueueSize(unsigned int queue_size);

    /// Initialization of all class member variables that can be initialized
    /// outside of constructor
    /// @param pool_intf
    ///   ThreadPool interface object attached to this implementation
    /// @param controller
    ///   Controller for the pool
    void x_Init(CThreadPool*            pool_intf,
                CThreadPool_Controller* controller,
                CThread::TRunMode       threads_mode);

    /// Destructor. Will be called from CRef
    ~CThreadPool_Impl(void);

    /// Delete task from the queue
    /// If task does not exist in queue then does nothing.
    void x_RemoveTaskFromQueue(const CThreadPool_Task* task);

    /// Cancel all tasks waiting in the queue
    void x_CancelQueuedTasks(void);

    /// Cancel all currently executing tasks
    void x_CancelExecutingTasks(void);

    /// Type of some simple predicate
    ///
    /// @sa x_WaitForPredicate
    typedef bool (CThreadPool_Impl::*TWaitPredicate)(void) const;

    /// Check if addeding new tasks to the pool is prohibited
    bool x_NoNewTaskAllowed(void) const;

    /// Check if new task can be added to the pool when queueiing is disabled
    bool x_CanAddImmediateTask(void) const;

    /// Check if all threads in pool finished their work
    bool x_HasNoThreads(void) const;

    /// Wait for some predicate to be true
    /// @param wait_func
    ///   Predicate to wait for
    /// @param pool_guard
    ///   Guardian that locks main pool mutex at the time of method call and
    ///   that have to be unlocked for the time of waiting
    /// @param wait_sema
    ///   Semaphore which will be posted when predicate become true
    /// @param timeout
    ///   Maximum amount of time to wait
    /// @param timer
    ///   Timer for mesuring elapsed time. Method assumes that timer is
    ///   started at the moment from which timeout should be calculated.
    bool x_WaitForPredicate(TWaitPredicate      wait_func,
                            CThreadPool_Guard*  pool_guard,
                            CSemaphore*         wait_sema,
                            const CTimeSpan*    timeout,
                            const CStopWatch*   timer);


private:
    /// ThreadPool interface object attached to this implementation
    CThreadPool*                     m_Interface;
    /// Reference to this pool to prevent its destroying earlier than we
    /// allow it to
    CRef<CThreadPool_Impl>           m_SelfRef;
    /// Timeout to wait for all threads to finish before the ThreadPool
    /// interface object will be able to destroy
    CTimeSpan                        m_DestroyTimeout;
    /// Queue for storing tasks
    TQueue                           m_Queue;
    /// Mutex for guarding all changes in the pool, its threads and controller
    CMutex                           m_MainPoolMutex;
    /// Semaphore for waiting for available threads to process task when
    /// queuing is disabled.
    CSemaphore                       m_RoomWait;
    /// Controller managing count of threads in pool
    CRef<CThreadPool_Controller>     m_Controller;
    /// List of all idle threads
    TThreadsList                     m_IdleThreads;
    /// List of all threads currently executing some tasks
    TThreadsList                     m_WorkingThreads;
    /// Running mode of all threads
    CThread::TRunMode                m_ThreadsMode;
    /// Total number of threads
    /// Introduced for more adequate and fast reflecting to threads starting
    /// and stopping events
    CAtomicCounter                   m_ThreadsCount;
    /// Number of tasks executing now
    /// Introduced for more adequate and fast reflecting to task executing
    /// start and finish events
    CAtomicCounter                   m_ExecutingTasks;
    /// Total number of tasks acquired by pool
    /// Includes queued tasks and executing (but not exclusive!) tasks.
    /// Introduced for maintaining atomicity of this number changing
    CAtomicCounter                   m_TotalTasks;
    /// Flag about working with special case:
    /// FALSE - queue_size == 0, TRUE - queue_size > 0
    bool                             m_IsQueueAllowed;
    /// If pool is already aborted or not
    atomic<bool>                     m_Aborted;
    /// Semaphore for waiting for threads finishing in Abort() method
    ///
    /// @sa Abort()
    CSemaphore                       m_AbortWait;
    /// If pool is suspended for exclusive task execution or not.
    /// Thread Checker can complain that access to this variable everywhere is
    /// not guarded by some mutex. But it's okay because special care is taken
    /// to make any race a matter of timing - suspend will happen properly in
    /// any case. Also everything is written with the assumption that there's
    /// no other threads (besides this very thread pool) that could call any
    /// methods here.
    atomic<bool>                     m_Suspended;
    /// Requested requirements for the exclusive execution environment
    volatile TExclusiveFlags         m_SuspendFlags;
    /// Flag indicating if flush of threads requested after adding exclusive
    /// task but before it is started its execution.
    volatile bool                    m_FlushRequested;
    /// Thread for execution of exclusive tasks and passing of events
    /// to the controller
    CRef<CThreadPool_ServiceThread>  m_ServiceThread;
    /// Queue for information about exclusive tasks
    TExclusiveQueue                  m_ExclusiveQueue;
};



/// Real implementation of all CThreadPool_Thread functions
class CThreadPool_ThreadImpl
{
public:
    /// Convert pointer to CThreadPool_Thread object into pointer
    /// to CThreadPool_ThreadImpl object. Can be done only here to avoid
    /// excessive friendship to CThreadPool_Thread class.
    static CThreadPool_ThreadImpl*
    s_GetImplPointer(CThreadPool_Thread* thread);

    /// Create new CThreadPool_Thread object
    /// Method introduced to avoid excessive friendship to CThreadPool_Thread
    /// class.
    ///
    /// @sa CThreadPool_Thread::CThreadPool_Thread()
    static CThreadPool_Thread* s_CreateThread(CThreadPool* pool);

    /// Constructor
    /// @param thread_intf
    ///   ThreadPool_Thread interface object attached to this implementation
    /// @param pool
    ///   Pool implementation owning this thread
    CThreadPool_ThreadImpl(CThreadPool_Thread* thread_intf,
                           CThreadPool_Impl*   pool);

    /// Destructor
    /// Called directly from CThreadPool destructor
    ~CThreadPool_ThreadImpl(void);

    /// Get ThreadPool interface object owning this thread
    ///
    /// @sa CThreadPool_Thread::GetPool()
    CThreadPool* GetPool(void) const;

    /// Request this thread to finish its operation.
    /// It renders the thread unusable and eventually ready for destruction
    /// (as soon as its current task is finished and there are no CRefs to
    /// this thread left).
    void RequestToFinish(void);

    /// If finishing of this thread is already in progress or not
    bool IsFinishing(void) const;

    /// Wake up the thread from idle state
    ///
    /// @sa x_Idle
    void WakeUp(void);

    /// Get task currently executing in the thread
    /// May be NULL if thread is idle or is in the middle of changing of
    /// current task
    ///
    /// @sa CThreadPool_Thread::GetCurrentTask()
    CRef<CThreadPool_Task> GetCurrentTask(void) const;

    /// Request to cancel current task execution
    void CancelCurrentTask(void);

    /// Implementation of thread Main() method
    ///
    /// @sa CThreadPool_Thread::Main()
    void Main(void);

    /// Implementation of threadOnExit() method
    ///
    /// @sa CThreadPool_Thread::OnExit()
    void OnExit(void);

private:
    /// Prohibit copying and assigning
    CThreadPool_ThreadImpl(const CThreadPool_ThreadImpl&);
    CThreadPool_ThreadImpl& operator= (const CThreadPool_ThreadImpl&);

    /// Suspend until the wake up signal.
    ///
    /// @sa WakeUp()
    void x_Idle(void);

    /// Mark the thread idle or non-idle
    bool x_SetIdleState(bool is_idle);

    /// Do finalizing when task finished its execution
    /// @param status
    ///   Status that the task must get
    void x_TaskFinished(CThreadPool_Task::EStatus status);


    /// ThreadPool_Thread interface object attached to this implementation
    CThreadPool_Thread*          m_Interface;
    /// Pool running the thread
    CRef<CThreadPool_Impl>       m_Pool;
    /// If the thread is already asked to finish or not
    atomic<bool>                 m_Finishing;
    /// If cancel of the currently executing task is requested or not
    atomic<bool>                 m_CancelRequested;
    /// Idleness of the thread
    bool                         m_IsIdle;
    /// Task currently executing in the thread
    CRef<CThreadPool_Task>       m_CurrentTask;
    /// Semaphore for waking up from idle waiting
    CSemaphore                   m_IdleTrigger;
    /// General-use mutex for very (very!) trivial ops
    mutable CFastMutex           m_FastMutex;
};



/// Thread used in pool for different internal needs: execution of exclusive
/// tasks and passing events to controller
class CThreadPool_ServiceThread : public CThread
{
public:
    /// Constructor
    /// @param pool
    ///   ThreadPool owning this thread
    CThreadPool_ServiceThread(CThreadPool_Impl* pool);

    /// Wake up from idle waiting or waiting of pool preparing exclusive
    /// environment
    void WakeUp(void);

    /// Request finishing of the thread
    void RequestToFinish(void);

    /// Check if this thread have already finished or not
    bool IsFinished(void);

    /// Tell the thread that controller should handle eOther event
    ///
    /// @sa CThreadPool_Controller::HandleEvent()
    void NeedCallController(void);

protected:
    /// Destructor. Will be called from CRef
    virtual ~CThreadPool_ServiceThread(void);

private:
    /// Main thread execution
    virtual void* Main(void);

    /// Do "idle" work when thread is not busy executing exclusive tasks
    void x_Idle(void);

    /// Pool owning this thread
    CRef<CThreadPool_Impl>  m_Pool;
    /// Semaphore for idle sleeping
    CSemaphore              m_IdleTrigger;
    /// If finishing of the thread is already requested
    atomic<bool>            m_Finishing;
    /// If the thread has already finished its Main() method
    atomic<bool>            m_Finished;
    /// Currently executing exclusive task
    CRef<CThreadPool_Task>  m_CurrentTask;
    /// Flag indicating that thread should pass eOther event to the controller
    CAtomicCounter          m_NeedCallController;
    /// General-use mutex for very (very!) trivial ops
    mutable CFastMutex      m_FastMutex;
};



/// Guardian for protecting pool by locking its main mutex
class CThreadPool_Guard : private CMutexGuard
{
public:
    /// Constructor
    /// @param pool
    ///   Pool to protect
    /// @param is_active
    ///   If the mutex should be locked in constructor or not
    CThreadPool_Guard(CThreadPool_Impl* pool, bool is_active = true);

    /// Turn this guardian on
    void Guard(void);

    /// Turn this guardian off
    void Release(void);

private:
    /// Pool protected by the guardian
    CThreadPool_Impl* m_Pool;
};



/// Special task which does nothing
/// It's used in FlushThreads to force pool to wait while all old threads
/// finish their operation to start new ones.
///
/// @sa CThreadPool_Impl::FlushThreads()
class CThreadPool_EmptyTask : public CThreadPool_Task
{
public:
    /// Empty main method
    virtual EStatus Execute(void) { return eCompleted; }

    // In the absence of the following constructor, new compilers (as required
    // by the new C++ standard) may fill the object memory with zeros,
    // erasing flags set by CObject::operator new (see CXX-1808)
    CThreadPool_EmptyTask(void) {}
};



/// Check if status returned from CThreadPool_Task::Execute() is allowed
/// and change it to eCompleted value if it is invalid
static inline CThreadPool_Task::EStatus
s_ConvertTaskResult(CThreadPool_Task::EStatus status)
{
    _ASSERT(status == CThreadPool_Task::eCompleted
            ||  status == CThreadPool_Task::eFailed
            ||  status == CThreadPool_Task::eCanceled);

    if (status != CThreadPool_Task::eCompleted
        &&  status != CThreadPool_Task::eFailed
        &&  status != CThreadPool_Task::eCanceled)
    {
        ERR_POST_X(9, Critical
                      << "Wrong status returned from "
                         "CThreadPool_Task::Execute(): "
                      << status);
        status = CThreadPool_Task::eCompleted;
    }

    return status;
}



const CAtomicCounter::TValue kNeedCallController_Shift = 0x0FFFFFFF;


inline void
CThreadPool_ServiceThread::WakeUp(void)
{
    m_IdleTrigger.Post();
}

inline void
CThreadPool_ServiceThread::NeedCallController(void)
{
    if (m_NeedCallController.Add(1) > kNeedCallController_Shift + 1) {
        m_NeedCallController.Add(-1);
    }
    else {
        WakeUp();
    }
}



inline void
CThreadPool_ThreadImpl::WakeUp(void)
{
    m_IdleTrigger.Post();
}



inline CMutex&
CThreadPool_Impl::GetMainPoolMutex(void)
{
    return m_MainPoolMutex;
}



CThreadPool_Guard::CThreadPool_Guard(CThreadPool_Impl* pool, bool is_active)
    : CMutexGuard(eEmptyGuard),
      m_Pool(pool)
{
    _ASSERT(pool);

    if (is_active)
        Guard();
}

void
CThreadPool_Guard::Guard(void)
{
    CMutexGuard::Guard(m_Pool->GetMainPoolMutex());
}

void
CThreadPool_Guard::Release(void)
{
    CMutexGuard::Release();
}



inline void
CThreadPool_Impl::sx_SetTaskStatus(CThreadPool_Task*          task,
                                   CThreadPool_Task::EStatus  status)
{
    task->x_SetStatus(status);
}

inline void
CThreadPool_Impl::sx_RequestToCancel(CThreadPool_Task* task)
{
    task->x_RequestToCancel();
}

inline CThreadPool*
CThreadPool_Impl::GetPoolInterface(void) const
{
    return m_Interface;
}

inline void
CThreadPool_Impl::SetInterfaceStarted(void)
{
    m_ServiceThread->Run(CThread::fRunDetached);
}

inline bool
CThreadPool_Impl::IsAborted(void) const
{
    return m_Aborted;
}

inline bool
CThreadPool_Impl::IsSuspended(void) const
{
    return m_Suspended.load(memory_order_acquire);
}

inline unsigned int
CThreadPool_Impl::GetThreadsCount(void) const
{
    return (unsigned int)m_ThreadsCount.Get();
}

inline unsigned int
CThreadPool_Impl::GetQueuedTasksCount(void) const
{
    return (unsigned int)m_Queue.GetSize();
}

inline unsigned int
CThreadPool_Impl::GetExecutingTasksCount(void) const
{
    return (unsigned int)m_ExecutingTasks.Get();
}

inline CTimeSpan
CThreadPool_Impl::GetSafeSleepTime(void) const
{
    // m_Controller variable can be uninitialized in only when ThreadPool
    // is already aborted
    CThreadPool_Controller* controller = m_Controller.GetNCPointerOrNull();
    if (controller  &&  ! m_Aborted) {
        return controller->GetSafeSleepTime();
    }
    else {
        return CTimeSpan(0, 0);
    }
}

inline void
CThreadPool_Impl::CallController(CThreadPool_Controller::EEvent event)
{
    CThreadPool_Controller* controller = m_Controller.GetNCPointerOrNull();
    if (controller  &&  ! m_Aborted  &&
        (! IsSuspended()  ||  event == CThreadPool_Controller::eSuspend))
    {
        controller->HandleEvent(event);
    }
}

inline void
CThreadPool_Impl::CallControllerOther(void)
{
    CThreadPool_ServiceThread* thread = m_ServiceThread;
    if (thread) {
        thread->NeedCallController();
    }
}

inline void
CThreadPool_Impl::TaskStarting(void)
{
    m_ExecutingTasks.Add(1);
    // In current implementation controller operation doesn't depend on this
    // action. So we will save mutex locks for the sake of performance
    //CallControllerOther();
}

inline void
CThreadPool_Impl::TaskFinished(void)
{
    m_ExecutingTasks.Add(-1);
    m_TotalTasks.Add(-1);
    if ( !m_IsQueueAllowed ) {
        m_RoomWait.Post();
    }
    CallControllerOther();
}

inline void
CThreadPool_Impl::ThreadStateChanged(void)
{
    if (m_Aborted) {

        // This lock is actually to protect access to the threads containers.
        // It was decided that this lock must not be inside x_HasNoThreads()
        // but to be outside.
        bool    has_no_threads = false;
        {{
            CThreadPool_Guard guard(this);
            has_no_threads = x_HasNoThreads();
        }}

        if (has_no_threads) {
            m_AbortWait.Post();
        }
    }
    else if (IsSuspended()) {
        if (((m_SuspendFlags & CThreadPool::fFlushThreads)
                 &&  GetThreadsCount() == 0)
            ||  (! (m_SuspendFlags & CThreadPool::fFlushThreads)
                 &&  m_WorkingThreads.size() == 0))
        {
            m_ServiceThread->WakeUp();
        }
    }
}

inline void
CThreadPool_Impl::ThreadStopped(CThreadPool_ThreadImpl* thread)
{
    CThreadPool_Guard guard(this);

    m_ThreadsCount.Add(-1);

    m_IdleThreads.erase(thread);
    m_WorkingThreads.erase(thread);

    CallControllerOther();

    ThreadStateChanged();
}

inline CRef<CThreadPool_Task>
CThreadPool_Impl::TryGetNextTask(void)
{
    if ( !IsSuspended() ) {
        TQueue::TAccessGuard guard(m_Queue);

        if (m_Queue.GetSize() != 0) {
            return m_Queue.Pop();
        }
    }

    return CRef<CThreadPool_Task>();
}


inline CThreadPool_Impl::SExclusiveTaskInfo
CThreadPool_Impl::TryGetExclusiveTask(void)
{
    TExclusiveQueue::TAccessGuard guard(m_ExclusiveQueue);

    if (m_ExclusiveQueue.GetSize() == 0
        || ((guard.Begin()->flags & CThreadPool::fExecuteQueuedTasks) != 0
            &&  (m_TotalTasks.Get() != 0))) {
        return SExclusiveTaskInfo(0, CRef<CThreadPool_Task>());
    }

    CThreadPool_Impl::SExclusiveTaskInfo info = m_ExclusiveQueue.Pop();

    if (m_FlushRequested) {
        info.flags |= CThreadPool::fFlushThreads;
        m_FlushRequested = false;
    }
    return info;
}


inline bool
CThreadPool_Impl::CanDoExclusiveTask(void) const
{
    if ((m_SuspendFlags & CThreadPool::fFlushThreads)
        &&  GetThreadsCount() != 0)
    {
        return false;
    }

    return m_WorkingThreads.size() == 0;
}

inline void
CThreadPool_Impl::RequestSuspend(TExclusiveFlags flags)
{
    m_SuspendFlags = flags;
    m_Suspended.store(true, memory_order_release);
    if (flags & CThreadPool::fCancelQueuedTasks) {
        x_CancelQueuedTasks();
    }
    if (flags & CThreadPool::fCancelExecutingTasks) {
        x_CancelExecutingTasks();
    }

    if (flags & CThreadPool::fFlushThreads) {
        FinishThreads((unsigned int)m_IdleThreads.size());
    }

    CallController(CThreadPool_Controller::eSuspend);
}

inline void
CThreadPool_Impl::ResumeWork(void)
{
    m_Suspended.store(false, memory_order_release);

    CallController(CThreadPool_Controller::eResume);

    ITERATE(TThreadsList, it, m_IdleThreads) {
        (*it)->WakeUp();
    }
}



inline void
CThreadPool_Controller::x_AttachToPool(CThreadPool_Impl* pool)
{
    if (m_Pool != NULL) {
        NCBI_THROW(CThreadPoolException, eControllerBusy,
                   "Cannot attach Controller to several ThreadPools.");
    }

    m_Pool = pool;
}

inline void
CThreadPool_Controller::x_DetachFromPool(void)
{
    m_Pool = NULL;
}



CThreadPool_Task::CThreadPool_Task(unsigned int priority)
{
    x_Init(priority);
}

CThreadPool_Task::CThreadPool_Task(const CThreadPool_Task& other)
{
    x_Init(other.m_Priority);
}

void
CThreadPool_Task::x_Init(unsigned int priority)
{
    m_Pool = NULL;
    m_Priority = priority;
    // Thread Checker complains here but this code is called only from
    // constructor, so no one else can reference this task yet.
    m_Status = eIdle;
    m_CancelRequested = false;
}

CThreadPool_Task::~CThreadPool_Task(void)
{}

CThreadPool_Task&
CThreadPool_Task::operator= (const CThreadPool_Task& other)
{
    if (m_IsBusy.Get() != 0) {
        NCBI_THROW(CThreadPoolException, eTaskBusy,
                   "Cannot change task when it is already added "
                   "to ThreadPool");
    }

    CObject::operator= (other);
    // There can be race with CThreadPool_Impl::AddTask()
    // If task will be already added to queue and priority will be then
    // changed queue can crush later
    m_Priority = other.m_Priority;
    return *this;
}

void
CThreadPool_Task::OnStatusChange(EStatus /* old */)
{}

void
CThreadPool_Task::OnCancelRequested(void)
{}

inline void
CThreadPool_Task::x_SetOwner(CThreadPool_Impl* pool)
{
    if (m_IsBusy.Add(1) != 1) {
        m_IsBusy.Add(-1);
        NCBI_THROW(CThreadPoolException, eTaskBusy,
                   "Cannot add task in ThreadPool several times");
    }

    // Thread Checker complains that this races with task canceling and
    // resetting m_Pool below. But it's an thread pool usage error if
    // someone tries to call concurrently AddTask and CancelTask. With a proper
    // workflow CancelTask shouldn't be called until AddTask has returned.
    m_Pool = pool;
}

inline void
CThreadPool_Task::x_ResetOwner(void)
{
    m_Pool = NULL;
    m_IsBusy.Add(-1);
}

void
CThreadPool_Task::x_SetStatus(EStatus new_status)
{
    EStatus old_status = m_Status;
    if (old_status != new_status  &&  old_status != eCanceled) {
        // Thread Checker complains here, but all status transitions are
        // properly guarded with different mutexes and they cannot mix with
        // each other.
        m_Status = new_status;
        OnStatusChange(old_status);
    }

    if (IsFinished()) {
        // Thread Checker complains here. See comment in x_SetOwner above for
        // details.
        m_Pool = NULL;
    }
}

inline void
CThreadPool_Task::x_RequestToCancel(void)
{
    m_CancelRequested = true;

    OnCancelRequested();

    if (GetStatus() <= eQueued) {
        // This can race with calling task's Execute() method but it's okay.
        // For details see comment in CThreadPool_ThreadImpl::Main().
        x_SetStatus(eCanceled);
    }
}

void
CThreadPool_Task::RequestToCancel(void)
{
    // Protect from possible reseting of the pool variable during execution
    CThreadPool_Impl* pool = m_Pool;
    if (IsFinished()) {
        return;
    }
    else if (!pool) {
        x_RequestToCancel();
    }
    else {
        pool->CancelTask(this);
    }
}

CThreadPool*
CThreadPool_Task::GetPool(void) const
{
    // GCC thread sanitizer complains on GetPool() when a task is cancelled in
    // a thread pool. This is however a false positive. The CancelTask() may
    // happened only when the thread pool exists so there m_Pool set properly.

    // Protect from possible reseting of the pool variable during execution
    CThreadPool_Impl* pool_impl = m_Pool;
    return pool_impl? pool_impl->GetPoolInterface(): NULL;
}



CThreadPool_ServiceThread::CThreadPool_ServiceThread(CThreadPool_Impl* pool)
    : m_Pool(pool),
      m_IdleTrigger(0, kMax_Int),
      m_Finishing(false),
      m_Finished(false)
{
    _ASSERT(pool);

    m_NeedCallController.Set(kNeedCallController_Shift);
}

CThreadPool_ServiceThread::~CThreadPool_ServiceThread(void)
{}

inline bool
CThreadPool_ServiceThread::IsFinished(void)
{
    return m_Finished;
}

inline void
CThreadPool_ServiceThread::x_Idle(void)
{
    if (m_NeedCallController.Add(-1) < kNeedCallController_Shift) {
        m_NeedCallController.Add(1);
    }
    m_Pool->CallController(CThreadPool_Controller::eOther);

    CTimeSpan timeout = m_Pool->GetSafeSleepTime();
    // TODO: it would be better to use CTimeout for all timeouts
    _ASSERT(timeout.GetSign() != eNegative);
    m_IdleTrigger.TryWait((unsigned int)timeout.GetCompleteSeconds(),
                          (unsigned int)timeout.GetNanoSecondsAfterSecond());
}

inline void
CThreadPool_ServiceThread::RequestToFinish(void)
{
    m_Finishing = true;
    WakeUp();

    CRef<CThreadPool_Task> task;
    {{
        CFastMutexGuard fast_guard(m_FastMutex);
        task = m_CurrentTask;
    }}

    if ( task.NotNull() ) {
        CThreadPool_Impl::sx_RequestToCancel(task);
    }
}

void*
CThreadPool_ServiceThread::Main(void)
{
    while (! m_Finishing) {
        CThreadPool_Impl::SExclusiveTaskInfo task_info =
            m_Pool->TryGetExclusiveTask();

        {{
            CFastMutexGuard fast_guard(m_FastMutex);
            m_CurrentTask = task_info.task;
        }}
        

        if ( m_CurrentTask.IsNull() ) {
            x_Idle();
            continue;
        }

        CThreadPool_Guard guard(m_Pool);

        if (m_Finishing) {
            if (! m_CurrentTask->IsCancelRequested()) {
                CThreadPool_Impl::sx_RequestToCancel(m_CurrentTask);
            }
            CThreadPool_Impl::sx_SetTaskStatus(m_CurrentTask,
                                               CThreadPool_Task::eCanceled);
            break;
        }


        // Signal to suspend the threads for the execution of exclusive task
        m_Pool->RequestSuspend(task_info.flags
                               & ~CThreadPool::fExecuteQueuedTasks);

        // Wait until pool is ready for execution of the exclusive task
        while (! m_Pool->IsAborted()  &&  ! m_Pool->CanDoExclusiveTask()) {
            guard.Release();
            m_IdleTrigger.Wait();
            guard.Guard();
        }

        if (m_Finishing) {
            if (!m_CurrentTask->IsCancelRequested()) {
                CThreadPool_Impl::sx_RequestToCancel(m_CurrentTask);
            }
            CThreadPool_Impl::sx_SetTaskStatus(m_CurrentTask,
                                               CThreadPool_Task::eCanceled);
            break;
        }

        guard.Release();

        CThreadPool_Impl::sx_SetTaskStatus(m_CurrentTask,
                                           CThreadPool_Task::eExecuting);
        try {
            CThreadPool_Task::EStatus status =
                s_ConvertTaskResult(m_CurrentTask->Execute());
            CThreadPool_Impl::sx_SetTaskStatus(m_CurrentTask, status);
        }
        NCBI_CATCH_ALL_X(11, "Exception from exclusive task in ThreadPool");

        guard.Guard();
        m_Pool->ResumeWork();
    }

    m_Finished = true;
    m_Pool->ThreadStateChanged();

    return NULL;
}



inline CThreadPool_ThreadImpl*
CThreadPool_ThreadImpl::s_GetImplPointer(CThreadPool_Thread* thread)
{
    return thread->m_Impl;
}

inline CThreadPool_Thread*
CThreadPool_ThreadImpl::s_CreateThread(CThreadPool* pool)
{
    return new CThreadPool_Thread(pool);
}

inline
CThreadPool_ThreadImpl::CThreadPool_ThreadImpl
(
    CThreadPool_Thread*  thread_intf,
    CThreadPool_Impl*    pool
)
  : m_Interface(thread_intf),
    m_Pool(pool),
    m_Finishing(false),
    m_CancelRequested(false),
    m_IsIdle(true),
    m_IdleTrigger(0, kMax_Int)
{}

inline
CThreadPool_ThreadImpl::~CThreadPool_ThreadImpl(void)
{}

inline CThreadPool*
CThreadPool_ThreadImpl::GetPool(void) const
{
    return m_Pool->GetPoolInterface();
}

inline bool
CThreadPool_ThreadImpl::IsFinishing(void) const
{
    return m_Finishing;
}

inline CRef<CThreadPool_Task>
CThreadPool_ThreadImpl::GetCurrentTask(void) const
{
    CFastMutexGuard fast_guard(m_FastMutex);
    return m_CurrentTask;
}

inline bool
CThreadPool_ThreadImpl::x_SetIdleState(bool is_idle)
{
    if (m_IsIdle == is_idle)
        return true;

    if ( !m_Pool->SetThreadIdle(this, is_idle) )
        return false;

    m_IsIdle = is_idle;
    return true;
}

inline void
CThreadPool_ThreadImpl::x_TaskFinished(CThreadPool_Task::EStatus status)
{
    if (m_CurrentTask->GetStatus() == CThreadPool_Task::eExecuting) {
        CThreadPool_Impl::sx_SetTaskStatus(m_CurrentTask, status);
    }

    {{
        CFastMutexGuard fast_guard(m_FastMutex);
        m_CurrentTask.Reset();
    }}
    m_Pool->TaskFinished();
}

inline void
CThreadPool_ThreadImpl::x_Idle(void)
{
    if ( x_SetIdleState(true) )
        m_IdleTrigger.Wait();
}

inline void
CThreadPool_ThreadImpl::RequestToFinish(void)
{
    m_Finishing = true;
    WakeUp();
}

inline void
CThreadPool_ThreadImpl::CancelCurrentTask(void)
{
    // Avoid resetting of the pointer during execution
    CRef<CThreadPool_Task> task;
    {{
        CFastMutexGuard fast_guard(m_FastMutex);
        task = m_CurrentTask;
    }}

    if (task.NotNull()) {
        CThreadPool_Impl::sx_RequestToCancel(task);
    }
    else {
        m_CancelRequested = true;
    }
}

inline void
CThreadPool_ThreadImpl::Main(void)
{
    m_Interface->Initialize();

    while (!m_Finishing) {
        // We have to heed call to CancelCurrentTask() only after this point.
        // So we reset value of m_CancelRequested here without any mutexes.
        // If CancelCurrentTask() is called earlier or this assignment races
        // with assignment in CancelCurrentTask() then caller of
        // CancelCurrentTask() will make sure that TryGetNextTask() returns
        // NULL.
        m_CancelRequested = false;

        {{
            CRef<CThreadPool_Task> task = m_Pool->TryGetNextTask();
            CFastMutexGuard fast_guard(m_FastMutex);
            m_CurrentTask = task;
        }}


        if (m_CurrentTask.IsNull()) {
            x_Idle();
        }
        else {
            if (m_CurrentTask->IsCancelRequested()  ||  m_CancelRequested) {
                // Some race can appear if task is canceled at the time
                // when it's being queued or at the time when it's being
                // unqueued
                if (! m_CurrentTask->IsCancelRequested()) {
                    CThreadPool_Impl::sx_RequestToCancel(m_CurrentTask);
                }
                CThreadPool_Impl::sx_SetTaskStatus(m_CurrentTask,
                                                 CThreadPool_Task::eCanceled);
                CFastMutexGuard fast_guard(m_FastMutex);
                m_CurrentTask = NULL;
                continue;
            }

            x_SetIdleState(false);
            m_Pool->TaskStarting();

            // This can race with canceling of the task. This can result in
            // task's Execute() method called with the state of eCanceled
            // already set or cancellation being totally ignored in the task's
            // status (m_CancelRequested will be still set). Both outcomes are
            // simple timing and cancellation should be checked in the task's
            // Execute() method anyways. The worst outcome here is that task
            // can be marked as eCanceled when it's completely and successfully
            // executed. I don't think it's too bad though.
            CThreadPool_Impl::sx_SetTaskStatus(m_CurrentTask,
                                               CThreadPool_Task::eExecuting);

            try {
                CThreadPool_Task::EStatus status =
                                s_ConvertTaskResult(m_CurrentTask->Execute());
                x_TaskFinished(status);
            }
            catch (exception& e) {
                ERR_POST_X(7, "Exception from task in ThreadPool: " << e);
                if (m_CurrentTask.NotEmpty()) {
                    x_TaskFinished(CThreadPool_Task::eFailed);
                }
            }
            catch (...) {
                ERR_POST_X(7, "Non-standard exception from task in ThreadPool");
                if (m_CurrentTask.NotEmpty()) {
                    x_TaskFinished(CThreadPool_Task::eFailed);
                }
                throw;
            }
        }
    }
}

inline void
CThreadPool_ThreadImpl::OnExit(void)
{
    try {
        m_Interface->Finalize();
    } STD_CATCH_ALL_X(8, "Finalize")

    m_Pool->ThreadStopped(this);
}



inline CThreadPool_Impl*
CThreadPool_Impl::s_GetImplPointer(CThreadPool* pool)
{
    return pool->m_Impl;
}

inline unsigned int
CThreadPool_Impl::x_GetQueueSize(unsigned int queue_size)
{
    if (queue_size == 0) {
        // 10 is just in case, in fact when queue_size == 0 pool will always
        // check for idle threads, so tasks will never crowd in the queue
        queue_size = 10;
        m_IsQueueAllowed = false;
    }
    else {
        m_IsQueueAllowed = true;
    }

    return queue_size;
}

inline
CThreadPool_Impl::CThreadPool_Impl(CThreadPool*      pool_intf,
                                   unsigned int      queue_size,
                                   unsigned int      max_threads,
                                   unsigned int      min_threads,
                                   CThread::TRunMode threads_mode)
    : m_Queue(x_GetQueueSize(queue_size)),
      m_RoomWait(0, kMax_Int),
      m_AbortWait(0, kMax_Int)
{
    x_Init(pool_intf,
           new CThreadPool_Controller_PID(max_threads, min_threads),
           threads_mode);
}

inline
CThreadPool_Impl::CThreadPool_Impl(CThreadPool*            pool_intf,
                                   unsigned int            queue_size,
                                   CThreadPool_Controller* controller,
                                   CThread::TRunMode       threads_mode)
    : m_Queue(x_GetQueueSize(queue_size)),
      m_RoomWait(0, kMax_Int),
      m_AbortWait(0, kMax_Int)
{
    x_Init(pool_intf, controller, threads_mode);
}

void
CThreadPool_Impl::x_Init(CThreadPool*             pool_intf,
                         CThreadPool_Controller*  controller,
                         CThread::TRunMode        threads_mode)
{
    m_Interface = pool_intf;
    m_SelfRef = this;
    m_DestroyTimeout = CTimeSpan(10, 0);
    m_ThreadsCount.Set(0);
    m_ExecutingTasks.Set(0);
    m_TotalTasks.Set(0);
    m_Aborted = false;
    m_Suspended.store(false, memory_order_relaxed);
    m_FlushRequested = false;
    m_ThreadsMode = (threads_mode | CThread::fRunDetached)
                     & ~CThread::fRunAllowST;

    controller->x_AttachToPool(this);
    m_Controller = controller;

    m_ServiceThread = new CThreadPool_ServiceThread(this);
}

CThreadPool_Impl::~CThreadPool_Impl(void)
{}

inline void
CThreadPool_Impl::DestroyReference(void)
{
    // Abort even if m_Aborted == true because threads can still be running
    // and we have to wait for their termination
    Abort(&m_DestroyTimeout);

    m_Interface = NULL;
    {{
        CThreadPool_Guard guard(this);
        m_ServiceThread = NULL;
    }}
    m_SelfRef = NULL;
}

inline void
CThreadPool_Impl::SetDestroyTimeout(const CTimeSpan& timeout)
{
    m_DestroyTimeout = timeout;
}

inline const CTimeSpan&
CThreadPool_Impl::GetDestroyTimeout(void) const
{
    return m_DestroyTimeout;
}

void
CThreadPool_Impl::LaunchThreads(unsigned int count)
{
    if (count == 0)
        return;

    CThreadPool_Guard guard(this);

    for (unsigned int i = 0; i < count; ++i) {
        CRef<CThreadPool_Thread> thread(m_Interface->CreateThread());
        m_IdleThreads.insert(
                        CThreadPool_ThreadImpl::s_GetImplPointer(thread));
        thread->Run(m_ThreadsMode);
    }

    m_ThreadsCount.Add(count);
    CallControllerOther();
}

void
CThreadPool_Impl::FinishThreads(unsigned int count)
{
    if (count == 0)
        return;

    CThreadPool_Guard guard(this);

    // The cast is theoretically extraneous, but Sun's WorkShop
    // compiler otherwise calls the wrong versions of begin() and
    // end() and refuses to convert the resulting iterators.
    REVERSE_ITERATE(TThreadsList, it,
                    static_cast<const TThreadsList&>(m_IdleThreads))
    {
        // Maybe in case of several quick consecutive calls we should favor
        // the willing to finish several threads.
        //if ((*it)->IsFinishing())
        //    continue;

        (*it)->RequestToFinish();
        --count;
        if (count == 0)
            break;
    }

    REVERSE_ITERATE(TThreadsList, it,
                    static_cast<const TThreadsList&>(m_WorkingThreads))
    {
        if (count == 0)
            break;

        (*it)->RequestToFinish();
        --count;
    }
}


bool
CThreadPool_Impl::SetThreadIdle(CThreadPool_ThreadImpl* thread, bool is_idle)
{
    CThreadPool_Guard guard(this);

    if (is_idle  &&  !IsSuspended()  &&  m_Queue.GetSize() != 0) {
        thread->WakeUp();
        return false;
    }

    TThreadsList* to_del;
    TThreadsList* to_ins;
    if (is_idle) {
        to_del = &m_WorkingThreads;
        to_ins = &m_IdleThreads;
    }
    else {
        to_del = &m_IdleThreads;
        to_ins = &m_WorkingThreads;
    }

    TThreadsList::iterator it = to_del->find(thread);
    if (it != to_del->end()) {
        to_del->erase(it);
    }
    to_ins->insert(thread);

    if (is_idle  &&  IsSuspended()
        &&  (m_SuspendFlags & CThreadPool::fFlushThreads))
    {
        thread->RequestToFinish();
    }

    ThreadStateChanged();
    return true;
}

inline bool
CThreadPool_Impl::x_NoNewTaskAllowed(void) const
{
    return
        m_Aborted  ||
        (IsSuspended()  &&  (m_SuspendFlags & CThreadPool::fDoNotAllowNewTasks));
}

bool
CThreadPool_Impl::x_CanAddImmediateTask(void) const
{
    if ( x_NoNewTaskAllowed() ) {
        // A very special kludge -- to allow immediately breaking the wait
        // loop when adding new tasks to the pool has been explicitly
        // prohibited (including if Abort() was called)
        return true;
    }

    return
        !IsSuspended()  &&
        (unsigned int) m_TotalTasks.Get() < m_Controller->GetMaxThreads();
}

bool
CThreadPool_Impl::x_HasNoThreads(void) const
{
    CThreadPool_ServiceThread* thread = m_ServiceThread.GetNCPointerOrNull();
    return m_IdleThreads.size() + m_WorkingThreads.size() == 0
           &&  (!thread  ||  thread->IsFinished());
}

bool
CThreadPool_Impl::x_WaitForPredicate(TWaitPredicate      wait_func,
                                     CThreadPool_Guard*  pool_guard,
                                     CSemaphore*         wait_sema,
                                     const CTimeSpan*    timeout,
                                     const CStopWatch*   timer)
{
    bool done = (this->*wait_func)();
    if (done) {
        wait_sema->TryWait();
        return true;
    }

    while ( !done ) {
        pool_guard->Release();

        if (timeout) {
            CTimeSpan next_tm = CTimeSpan(timeout->GetAsDouble() - timer->Elapsed());
            if (next_tm.GetSign() == eNegative) {
                return false;
            }
            if (! wait_sema->TryWait(CTimeout(next_tm))) {
                return false;
            }
        }
        else {
            wait_sema->Wait();
        }

        pool_guard->Guard();
        done = (this->*wait_func)();
    }

    return true;
}

/// Throw an exception with standard message when AddTask() is called
/// but ThreadPool is aborted or do not allow new tasks
NCBI_NORETURN
static inline void
ThrowAddProhibited(void)
{
    NCBI_THROW(CThreadPoolException, eProhibited,
               "Adding of new tasks is prohibited");
}

inline void
CThreadPool_Impl::AddTask(CThreadPool_Task* task, const CTimeSpan* timeout)
{
    _ASSERT(task);

    // To be sure that if simple new operator was passed as argument the task
    // will still be referenced even if some exception happen in this method
    CRef<CThreadPool_Task> task_ref(task);

    if ( x_NoNewTaskAllowed() ) {
        ThrowAddProhibited();
    }

    CThreadPool_Guard guard(this, false);
    unique_ptr<CTimeSpan> adjusted_timeout;

    if (!m_IsQueueAllowed) {
        guard.Guard();

        CStopWatch timer(CStopWatch::eStart);
        if (! x_WaitForPredicate(&CThreadPool_Impl::x_CanAddImmediateTask,
                                 &guard, &m_RoomWait, timeout, &timer))
        {
            NCBI_THROW(CSyncQueueException, eNoRoom,
                       "Cannot add task - all threads are busy");
        }

        if ( x_NoNewTaskAllowed() ) {
            ThrowAddProhibited();
        }

        if ( timeout ) {
            adjusted_timeout.reset(new CTimeSpan
                                   (timeout->GetAsDouble() - timer.Elapsed()));
            timeout = adjusted_timeout.get();
        }
    }

    task->x_SetOwner(this);
    task->x_SetStatus(CThreadPool_Task::eQueued);
    try {
        // Pushing to queue must be out of mutex to be able to wait
        // for available space.
        m_Queue.Push(Ref(task), timeout);
    }
    catch (...) {
        task->x_SetStatus(CThreadPool_Task::eIdle);
        task->x_ResetOwner();
        throw;
    }

    if (m_IsQueueAllowed) {
        guard.Guard();
    }

    // Check if someone aborted the pool or suspended it with cancelation of
    // queued tasks after we added this task to the queue but before we were
    // able to acquire the mutex
    CThreadPool::TExclusiveFlags check_flags
        = CThreadPool::fDoNotAllowNewTasks | CThreadPool::fCancelQueuedTasks;
    if (m_Aborted  ||  (IsSuspended()
                        &&  (m_SuspendFlags & check_flags)  == check_flags))
    {
        if (m_Queue.GetSize() != 0) {
            x_CancelQueuedTasks();
        }
        return;
    }

    unsigned int cnt_req = (unsigned int) m_TotalTasks.Add(1);

    if (!m_IsQueueAllowed  &&  cnt_req > GetThreadsCount()) {
        LaunchThreads(cnt_req - GetThreadsCount());
    }

    if (! IsSuspended()) {
        int count = GetQueuedTasksCount();
        ITERATE(TThreadsList, it, m_IdleThreads) {
            if (! (*it)->IsFinishing()) {
                (*it)->WakeUp();
                --count;
                if (count == 0)
                    break;
            }
        }
    }

    CallControllerOther();
}

inline void
CThreadPool_Impl::x_RemoveTaskFromQueue(const CThreadPool_Task* task)
{
    TQueue::TAccessGuard q_guard(m_Queue);

    TQueue::TAccessGuard::TIterator it = q_guard.Begin();
    while (it != q_guard.End()  &&  *it != task) {
        ++it;
    }

    if (it != q_guard.End()) {
        q_guard.Erase(it);
    }
}

void
CThreadPool_Impl::RequestExclusiveExecution(CThreadPool_Task*  task,
                                            TExclusiveFlags    flags)
{
    _ASSERT(task);

    // To be sure that if simple new operator was passed as argument the task
    // will still be referenced even if some exception happen in this method
    CRef<CThreadPool_Task> task_ref(task);

    if (m_Aborted) {
        NCBI_THROW(CThreadPoolException, eProhibited,
                   "Cannot add exclusive task when ThreadPool is aborted");
    }

    task->x_SetOwner(this);
    task->x_SetStatus(CThreadPool_Task::eQueued);
    m_ExclusiveQueue.Push(SExclusiveTaskInfo(flags, Ref(task)));

    CThreadPool_ServiceThread* thread = m_ServiceThread;
    if (thread) {
        thread->WakeUp();
    }
}

void
CThreadPool_Impl::CancelTask(CThreadPool_Task* task)
{
    _ASSERT(task);

    if (task->IsFinished()) {
        return;
    }
    // Some race can happen here if the task is being queued now
    if (task->GetStatus() == CThreadPool_Task::eIdle) {
        task->x_RequestToCancel();
        return;
    }

    CThreadPool* task_pool = task->GetPool();
    if (task_pool != m_Interface) {
        if (!task_pool) {
            // Task have just finished - we can do nothing
            return;
        }

        NCBI_THROW(CThreadPoolException, eInvalid,
                   "Cannot cancel task execution "
                   "if it is inserted in another ThreadPool");
    }

    task->x_RequestToCancel();
    x_RemoveTaskFromQueue(task);

    CallControllerOther();
}

inline void
CThreadPool_Impl::CancelTasks(TExclusiveFlags tasks_group)
{
    _ASSERT( (tasks_group & (CThreadPool::fCancelExecutingTasks
                             + CThreadPool::fCancelQueuedTasks))
                  == tasks_group
             &&  tasks_group != 0);

    if (tasks_group & CThreadPool::fCancelQueuedTasks) {
        x_CancelQueuedTasks();
    }
    if (tasks_group & CThreadPool::fCancelExecutingTasks) {
        x_CancelExecutingTasks();
    }

    CallControllerOther();
}

void
CThreadPool_Impl::x_CancelExecutingTasks(void)
{
    CThreadPool_Guard guard(this);

    ITERATE(TThreadsList, it, m_WorkingThreads) {
        (*it)->CancelCurrentTask();
    }

    // CThreadPool_ThreadImpl::Main() acts not under guard, so we cannot be
    // sure that it doesn't have already task to execute before it marked
    // itself as working
    ITERATE(TThreadsList, it, m_IdleThreads) {
        (*it)->CancelCurrentTask();
    }
}

void
CThreadPool_Impl::x_CancelQueuedTasks(void)
{
    TQueue::TAccessGuard q_guard(m_Queue);

    for (TQueue::TAccessGuard::TIterator it = q_guard.Begin();
                                         it != q_guard.End(); ++it)
    {
        it->GetNCPointer()->x_RequestToCancel();
    }

    m_Queue.Clear();
}

inline void
CThreadPool_Impl::FlushThreads(CThreadPool::EFlushType flush_type)
{
    CThreadPool_Guard guard(this);

    if (m_Aborted) {
        NCBI_THROW(CThreadPoolException, eProhibited,
                   "Cannot flush threads when ThreadPool aborted");
    }

    if (flush_type == CThreadPool::eStartImmediately
        ||  (flush_type == CThreadPool::eWaitToFinish  &&  IsSuspended()))
    {
        FinishThreads(GetThreadsCount());
    }
    else if (flush_type == CThreadPool::eWaitToFinish) {
        bool need_add = true;

        {{
            // To avoid races with TryGetExclusiveTask() we need to put
            // guard here
            TExclusiveQueue::TAccessGuard q_guard(m_ExclusiveQueue);

            if (m_ExclusiveQueue.GetSize() != 0) {
                m_FlushRequested = true;
                need_add = false;
            }
        }}

        if (need_add) {
            RequestExclusiveExecution(new CThreadPool_EmptyTask(),
                                      CThreadPool::fFlushThreads);
        }
    }
}

inline void
CThreadPool_Impl::Abort(const CTimeSpan* timeout)
{
    CThreadPool_Guard guard(this);

    // Method can be called several times in a row and every time we need
    // to wait for threads to finish operation
    m_Aborted = true;

    // Cancel queued tasks
    unsigned int n_queued_tasks = GetQueuedTasksCount();
    if ( n_queued_tasks ) {
        ERR_POST_X(14, Warning <<
                   "CThreadPool is being aborted or destroyed while still "
                   "having " << n_queued_tasks << " regular tasks "
                   "waiting to be executed; they are now canceled");
    }
    x_CancelQueuedTasks();

    // Cancel currently executing tasks
    x_CancelExecutingTasks();

    // Cancel exclusive tasks
    {{
        TExclusiveQueue::TAccessGuard q_guard(m_ExclusiveQueue);

        TExclusiveQueue::TSize n_exclusive_tasks = m_ExclusiveQueue.GetSize();
        if ( n_exclusive_tasks ) {
            ERR_POST_X(15, Warning <<
                       "CThreadPool is being aborted or destroyed while still "
                       "having " << n_exclusive_tasks << " exclusive tasks "
                       "waiting to be executed; they are now canceled");
        }

        for (TExclusiveQueue::TAccessGuard::TIterator it = q_guard.Begin();
                                                it != q_guard.End(); ++it)
        {
            it->task->x_RequestToCancel();
        }

        m_ExclusiveQueue.Clear();
    }}

    // Stop threads
    if (m_ServiceThread.NotNull()) {
        m_ServiceThread->RequestToFinish();
    }

    FinishThreads(GetThreadsCount());

    if (m_Controller.NotNull()) {
        m_Controller->x_DetachFromPool();
    }

    CStopWatch timer(CStopWatch::eStart);
    x_WaitForPredicate(&CThreadPool_Impl::x_HasNoThreads,
                       &guard, &m_AbortWait, timeout, &timer);

    if ( !CThreadPool_Impl::x_HasNoThreads() ) {
        if ( timeout )
            ERR_POST_X(16, Warning <<
                       "CThreadPool::Abort() was unable to terminate "
                       "all of its threads within the specified timeout: "
                       << timeout->AsSmartString());
        else
            ERR_POST_X(17, Critical <<
                       "CThreadPool::Abort() was not able to terminate"
                       "all of its threads despite being given an infinite "
                       "time for doing so");
    }

    m_AbortWait.Post();

    // This assigning can destroy the controller. If some threads are not
    // finished yet and at this very moment will call controller it can crash.
    //m_Controller = NULL;
}



CThreadPool_Controller::CThreadPool_Controller(unsigned int max_threads,
                                               unsigned int min_threads)
    : m_Pool(NULL),
      m_MinThreads(min_threads),
      m_MaxThreads(max_threads),
      m_InHandleEvent(false)
{
    if (max_threads < min_threads  ||  max_threads == 0) {
        NCBI_THROW_FMT(CThreadPoolException, eInvalid,
                       "Invalid numbers of min and max number of threads:"
                       " min=" << min_threads << ", max=" << max_threads);
    }
}

CThreadPool_Controller::~CThreadPool_Controller(void)
{}

CThreadPool*
CThreadPool_Controller::GetPool(void) const
{
    // Avoid changing of pointer during method execution
    CThreadPool_Impl* pool = m_Pool;
    return pool? pool->GetPoolInterface(): NULL;
}

CMutex&
CThreadPool_Controller::GetMainPoolMutex(CThreadPool* pool) const
{
    CThreadPool_Impl* impl = CThreadPool_Impl::s_GetImplPointer(pool);
    if (!impl) {
        NCBI_THROW(CThreadPoolException, eInactive,
                   "Cannot do active work when not attached "
                   "to some ThreadPool");
    }
    return impl->GetMainPoolMutex();
}

void
CThreadPool_Controller::EnsureLimits(void)
{
    CThreadPool_Impl* pool = m_Pool;

    if (! pool)
        return;

    Uint4 count = pool->GetThreadsCount();
    if (count > m_MaxThreads) {
        pool->FinishThreads(count - m_MaxThreads);
    }
    if (count < m_MinThreads) {
        pool->LaunchThreads(m_MinThreads - count);
    }
}

void
CThreadPool_Controller::SetMinThreads(unsigned int min_threads)
{
    CThreadPool_Guard guard(m_Pool, false);
    if (m_Pool)
        guard.Guard();

    m_MinThreads = min_threads;

    EnsureLimits();
}

void
CThreadPool_Controller::SetMaxThreads(unsigned int max_threads)
{
    CThreadPool_Guard guard(m_Pool, false);
    if (m_Pool)
        guard.Guard();

    m_MaxThreads = max_threads;

    EnsureLimits();
}

void
CThreadPool_Controller::SetThreadsCount(unsigned int count)
{
    if (count > GetMaxThreads())
        count = GetMaxThreads();
    if (count < GetMinThreads())
        count = GetMinThreads();

    CThreadPool_Impl* pool = m_Pool;

    unsigned int now_cnt = pool->GetThreadsCount();
    if (count > now_cnt) {
        pool->LaunchThreads(count - now_cnt);
    }
    else if (count < now_cnt) {
        pool->FinishThreads(now_cnt - count);
    }
}

void
CThreadPool_Controller::HandleEvent(EEvent event)
{
    CThreadPool_Impl* pool = m_Pool;
    if (! pool)
        return;

    CThreadPool_Guard guard(pool);

    if (m_InHandleEvent  ||  pool->IsAborted()  ||  pool->IsSuspended())
        return;

    m_InHandleEvent = true;

    try {
        OnEvent(event);
        m_InHandleEvent = false;
    }
    catch (...) {
        m_InHandleEvent = false;
        throw;
    }
}

CTimeSpan
CThreadPool_Controller::GetSafeSleepTime(void) const
{
    if (m_Pool) {
        return CTimeSpan(1, 0);
    }
    else {
        return CTimeSpan(0, 0);
    }
}



CThreadPool_Thread::CThreadPool_Thread(CThreadPool* pool)
{
    _ASSERT(pool);

    m_Impl = new CThreadPool_ThreadImpl(this,
                                    CThreadPool_Impl::s_GetImplPointer(pool));
}

CThreadPool_Thread::~CThreadPool_Thread(void)
{
    delete m_Impl;
}

void
CThreadPool_Thread::Initialize(void)
{}

void
CThreadPool_Thread::Finalize(void)
{}

CThreadPool*
CThreadPool_Thread::GetPool(void) const
{
    return m_Impl->GetPool();
}

CRef<CThreadPool_Task>
CThreadPool_Thread::GetCurrentTask(void) const
{
    return m_Impl->GetCurrentTask();
}

void*
CThreadPool_Thread::Main(void)
{
    m_Impl->Main();
    return NULL;
}

void
CThreadPool_Thread::OnExit(void)
{
    m_Impl->OnExit();
}



CThreadPool::CThreadPool(unsigned int      queue_size,
                         unsigned int      max_threads,
                         unsigned int      min_threads,
                         CThread::TRunMode threads_mode)
{
    m_Impl = new CThreadPool_Impl(this, queue_size, max_threads, min_threads,
                                  threads_mode);
    m_Impl->SetInterfaceStarted();
}

CThreadPool::CThreadPool(unsigned int            queue_size,
                         CThreadPool_Controller* controller,
                         CThread::TRunMode       threads_mode)
{
    m_Impl = new CThreadPool_Impl(this, queue_size, controller, threads_mode);
    m_Impl->SetInterfaceStarted();
}

CThreadPool::~CThreadPool(void)
{
    m_Impl->DestroyReference();
}

CMutex&
CThreadPool::GetMainPoolMutex(void)
{
    return m_Impl->GetMainPoolMutex();
}

CThreadPool_Thread*
CThreadPool::CreateThread(void)
{
    return CThreadPool_ThreadImpl::s_CreateThread(this);
}

void
CThreadPool::AddTask(CThreadPool_Task* task, const CTimeSpan* timeout)
{
    m_Impl->AddTask(task, timeout);
}

void
CThreadPool::CancelTask(CThreadPool_Task* task)
{
    m_Impl->CancelTask(task);
}

void
CThreadPool::Abort(const CTimeSpan* timeout)
{
    m_Impl->Abort(timeout);
}

bool
CThreadPool::IsAborted(void) const
{
    return m_Impl->IsAborted();
}

void
CThreadPool::SetDestroyTimeout(const CTimeSpan& timeout)
{
    m_Impl->SetDestroyTimeout(timeout);
}

const CTimeSpan&
CThreadPool::GetDestroyTimeout(void) const
{
    return m_Impl->GetDestroyTimeout();
}

void
CThreadPool::RequestExclusiveExecution(CThreadPool_Task*  task,
                                       TExclusiveFlags    flags)
{
    m_Impl->RequestExclusiveExecution(task, flags);
}

void
CThreadPool::CancelTasks(TExclusiveFlags tasks_group)
{
    m_Impl->CancelTasks(tasks_group);
}

void
CThreadPool::FlushThreads(EFlushType flush_type)
{
    m_Impl->FlushThreads(flush_type);
}

unsigned int
CThreadPool::GetThreadsCount(void) const
{
    return m_Impl->GetThreadsCount();
}

unsigned int
CThreadPool::GetQueuedTasksCount(void) const
{
    return m_Impl->GetQueuedTasksCount();
}

unsigned int
CThreadPool::GetExecutingTasksCount(void) const
{
    return m_Impl->GetExecutingTasksCount();
}



END_NCBI_SCOPE
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287

-