2009-11-23 01:55:08 +01:00
|
|
|
/*
|
|
|
|
|
threadpool.c - Manage pools of threads
|
|
|
|
|
|
|
|
|
|
Copyright (C) Lumiera.org
|
|
|
|
|
2009, Michael Ploujnikov <ploujj@gmail.com>
|
|
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or
|
|
|
|
|
modify it under the terms of the GNU General Public License as
|
|
|
|
|
published by the Free Software Foundation; either version 2 of the
|
|
|
|
|
License, or (at your option) any later version.
|
|
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
|
along with this program; if not, write to the Free Software
|
|
|
|
|
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
//TODO: Support library includes//
|
|
|
|
|
|
|
|
|
|
#include "include/logging.h"
|
|
|
|
|
#include "lib/safeclib.h"
|
|
|
|
|
|
|
|
|
|
//TODO: Lumiera header includes//
|
|
|
|
|
#include "backend/threadpool.h"
|
|
|
|
|
|
|
|
|
|
//TODO: internal/static forward declarations//
|
2009-11-23 22:40:31 +01:00
|
|
|
static lumiera_threadpool threadpool;
|
2009-11-23 01:55:08 +01:00
|
|
|
|
|
|
|
|
//TODO: System includes//
|
|
|
|
|
#include <pthread.h>
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* @file
|
|
|
|
|
*
|
|
|
|
|
*/
|
2009-11-25 04:25:00 +01:00
|
|
|
NOBUG_DEFINE_FLAG_PARENT (threadpool, threads_dbg); /*TODO insert a suitable/better parent flag here */
|
2009-11-23 01:55:08 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
//code goes here//
|
|
|
|
|
|
|
|
|
|
void* pool_thread_loop(void * arg)
|
|
|
|
|
{
|
|
|
|
|
(void) arg;
|
|
|
|
|
while (1)
|
|
|
|
|
{
|
|
|
|
|
;
|
|
|
|
|
}
|
|
|
|
|
return arg;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2009-11-26 03:46:03 +01:00
|
|
|
lumiera_threadpool_init(unsigned limit)
|
2009-11-23 01:55:08 +01:00
|
|
|
{
|
2009-11-23 22:40:31 +01:00
|
|
|
for (int i = 0; i < LUMIERA_THREADCLASS_COUNT; ++i)
|
|
|
|
|
{
|
2009-11-26 17:27:50 +01:00
|
|
|
llist_init(&threadpool.pool[i].list);
|
|
|
|
|
threadpool.pool[i].max_threads = limit;
|
|
|
|
|
threadpool.pool[i].working_thread_count = 0;
|
|
|
|
|
threadpool.pool[i].idle_thread_count = 0;
|
|
|
|
|
lumiera_mutex_init(&threadpool.pool[i].lock,"pool of threads", &NOBUG_FLAG(threadpool));
|
2009-11-23 22:40:31 +01:00
|
|
|
}
|
|
|
|
|
}
|
2009-11-23 01:55:08 +01:00
|
|
|
|
2009-11-25 04:25:00 +01:00
|
|
|
void
|
|
|
|
|
lumiera_threadpool_destroy(void)
|
|
|
|
|
{
|
2009-11-25 17:06:53 +01:00
|
|
|
ECHO ("destroying threadpool");
|
2009-11-25 04:25:00 +01:00
|
|
|
for (int i = 0; i < LUMIERA_THREADCLASS_COUNT; ++i)
|
|
|
|
|
{
|
2009-11-25 17:06:53 +01:00
|
|
|
ECHO ("destroying individual pool #%d", i);
|
2009-11-25 04:25:00 +01:00
|
|
|
// no locking is done at this point
|
2009-11-26 17:27:50 +01:00
|
|
|
ECHO ("number of threads in the pool=%d", llist_count(&threadpool.pool[i].list));
|
|
|
|
|
LLIST_WHILE_HEAD(&threadpool.pool[i].list, thread)
|
2009-11-25 17:06:53 +01:00
|
|
|
lumiera_thread_delete((LumieraThread)thread);
|
|
|
|
|
ECHO ("destroying the pool mutex");
|
2009-11-26 17:27:50 +01:00
|
|
|
lumiera_mutex_destroy (&threadpool.pool[i].lock, &NOBUG_FLAG (threadpool));
|
2009-11-25 17:06:53 +01:00
|
|
|
ECHO ("pool mutex destroyed");
|
2009-11-25 04:25:00 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-11-23 01:55:08 +01:00
|
|
|
LumieraThread
|
|
|
|
|
lumiera_threadpool_acquire_thread(enum lumiera_thread_class kind,
|
|
|
|
|
const char* purpose,
|
|
|
|
|
struct nobug_flag* flag)
|
|
|
|
|
{
|
2009-11-25 04:25:00 +01:00
|
|
|
LumieraThread ret;
|
|
|
|
|
|
|
|
|
|
REQUIRE (kind < LUMIERA_THREADCLASS_COUNT, "unknown pool kind specified: %d", kind);
|
2009-11-26 17:27:50 +01:00
|
|
|
if (llist_is_empty (&threadpool.pool[kind].list))
|
2009-11-23 02:08:08 +01:00
|
|
|
{
|
2009-11-25 04:25:00 +01:00
|
|
|
// TODO: fill in the reccondition argument, currently NULL
|
2009-11-26 03:46:03 +01:00
|
|
|
FIXME ("this max thread logic needs to be deeply thought about and made more efficient as well as rebust");
|
2009-11-26 17:27:50 +01:00
|
|
|
if (threadpool.pool[kind].working_thread_count
|
|
|
|
|
+ threadpool.pool[kind].idle_thread_count
|
|
|
|
|
< threadpool.pool[kind].max_threads) {
|
2009-11-26 03:46:03 +01:00
|
|
|
ret = lumiera_thread_new (kind, NULL, purpose, flag);
|
2009-11-26 17:27:50 +01:00
|
|
|
threadpool.pool[kind].working_thread_count++;
|
2009-11-26 03:46:03 +01:00
|
|
|
ENSURE (ret, "did not create a valid thread");
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
2009-11-26 17:27:50 +01:00
|
|
|
//ERROR (threadpool, "did not create a new thread because per-pool limit was reached: %d", threadpool.pool[kind].max_threads);
|
2009-11-26 03:46:03 +01:00
|
|
|
LUMIERA_DIE(ERRNO);
|
|
|
|
|
}
|
2009-11-23 02:08:08 +01:00
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
// use an existing thread, pick the first one
|
2009-11-23 22:40:31 +01:00
|
|
|
// remove it from the pool's list
|
2009-11-26 17:27:50 +01:00
|
|
|
LUMIERA_MUTEX_SECTION (threadpool, &threadpool.pool[kind].lock)
|
2009-11-25 04:25:00 +01:00
|
|
|
{
|
2009-11-26 17:27:50 +01:00
|
|
|
ret = (LumieraThread)(llist_unlink(llist_head (&threadpool.pool[kind].list)));
|
|
|
|
|
threadpool.pool[kind].working_thread_count++;
|
|
|
|
|
threadpool.pool[kind].idle_thread_count--; // cheaper than using llist_count
|
|
|
|
|
ENSURE (threadpool.pool[kind].idle_thread_count ==
|
|
|
|
|
llist_count(&threadpool.pool[kind].list),
|
2009-11-26 03:46:03 +01:00
|
|
|
"idle thread count %d is wrong, should be %d",
|
2009-11-26 17:27:50 +01:00
|
|
|
threadpool.pool[kind].idle_thread_count,
|
|
|
|
|
llist_count(&threadpool.pool[kind].list));
|
2009-11-25 04:25:00 +01:00
|
|
|
}
|
2009-11-26 00:58:15 +01:00
|
|
|
ENSURE (ret, "did not find a valid thread");
|
2009-11-23 02:08:08 +01:00
|
|
|
}
|
2009-11-25 04:25:00 +01:00
|
|
|
return ret;
|
2009-11-23 01:55:08 +01:00
|
|
|
}
|
|
|
|
|
|
2009-11-23 22:51:18 +01:00
|
|
|
void
|
|
|
|
|
lumiera_threadpool_release_thread(LumieraThread thread)
|
|
|
|
|
{
|
2009-11-25 17:06:53 +01:00
|
|
|
REQUIRE (thread, "invalid thread given");
|
|
|
|
|
REQUIRE (thread->kind < LUMIERA_THREADCLASS_COUNT, "thread belongs to an unknown pool kind: %d", thread->kind);
|
2009-11-25 23:43:22 +01:00
|
|
|
|
2009-12-02 19:48:08 +01:00
|
|
|
LUMIERA_MUTEX_SECTION (threadpool, &threadpool.pool[thread->kind].lock)
|
|
|
|
|
{
|
2009-11-26 03:46:03 +01:00
|
|
|
REQUIRE (llist_is_single(&thread->node), "thread already belongs to some list");
|
2009-11-26 17:27:50 +01:00
|
|
|
llist_insert_head(&threadpool.pool[thread->kind].list, &thread->node);
|
|
|
|
|
threadpool.pool[thread->kind].working_thread_count--;
|
|
|
|
|
threadpool.pool[thread->kind].idle_thread_count++; // cheaper than using llist_count
|
|
|
|
|
ENSURE (threadpool.pool[thread->kind].idle_thread_count ==
|
|
|
|
|
llist_count(&threadpool.pool[thread->kind].list),
|
2009-11-26 03:46:03 +01:00
|
|
|
"idle thread count %d is wrong, should be %d",
|
2009-11-26 17:27:50 +01:00
|
|
|
threadpool.pool[thread->kind].idle_thread_count,
|
|
|
|
|
llist_count(&threadpool.pool[thread->kind].list));
|
|
|
|
|
// REQUIRE (!llist_is_empty (&threadpool.pool[thread->kind].list), "thread pool is still empty after insertion");
|
2009-12-02 19:48:08 +01:00
|
|
|
}
|
2009-11-23 22:51:18 +01:00
|
|
|
}
|
|
|
|
|
|
2009-11-23 01:55:08 +01:00
|
|
|
/*
|
|
|
|
|
// Local Variables:
|
|
|
|
|
// mode: C
|
|
|
|
|
// c-file-style: "gnu"
|
|
|
|
|
// indent-tabs-mode: nil
|
|
|
|
|
// End:
|
|
|
|
|
*/
|