aboutsummaryrefslogtreecommitdiff
path: root/rt
diff options
context:
space:
mode:
authorFlorian Weimer <fweimer@redhat.com>2021-06-22 09:50:27 +0200
committerFlorian Weimer <fweimer@redhat.com>2021-06-22 09:50:45 +0200
commitdaa3fc9bff55c1f8368a464ec802ab620901344e (patch)
tree14c3a5a951a84aeff9d554ab5a77c13a4ec29521 /rt
parentae830b2d9f5238e1bee9820cd4d4df7f7b13ecff (diff)
downloadglibc-daa3fc9bff55c1f8368a464ec802ab620901344e.tar.xz
glibc-daa3fc9bff55c1f8368a464ec802ab620901344e.zip
rt: Move generic implementation from sysdeps/pthread to rt
The pthread-based implementation is the generic one. Replacing the stubs makes it clear that they do not have to be adjusted for the libpthread move. Result of: git mv -f sysdeps/pthread/aio_misc.h sysdeps/generic/ git mv sysdeps/pthread/timer_routines.c sysdeps/htl/ git mv -f sysdeps/pthread/{aio,lio,timer}_*.c rt/ Followed by manual adjustment of the #include paths in sysdeps/unix/sysv/linux/wordsize-64, and a move of the version definitions formerly in sysdeps/pthread/Versions. Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Diffstat (limited to 'rt')
-rw-r--r--rt/Versions4
-rw-r--r--rt/aio_cancel.c129
-rw-r--r--rt/aio_error.c12
-rw-r--r--rt/aio_fsync.c26
-rw-r--r--rt/aio_misc.c699
-rw-r--r--rt/aio_notify.c144
-rw-r--r--rt/aio_read.c18
-rw-r--r--rt/aio_read64.c32
-rw-r--r--rt/aio_suspend.c237
-rw-r--r--rt/aio_write.c18
-rw-r--r--rt/aio_write64.c32
-rw-r--r--rt/lio_listio.c236
-rw-r--r--rt/lio_listio64.c33
-rw-r--r--rt/timer_create.c156
-rw-r--r--rt/timer_delete.c58
-rw-r--r--rt/timer_getoverr.c33
-rw-r--r--rt/timer_gettime.c64
-rw-r--r--rt/timer_settime.c120
18 files changed, 1931 insertions, 120 deletions
diff --git a/rt/Versions b/rt/Versions
index 309486be1e..26c6d1ac63 100644
--- a/rt/Versions
+++ b/rt/Versions
@@ -47,6 +47,10 @@ librt {
mq_timedsend;
mq_unlink;
}
+ GLIBC_2.4 {
+ lio_listio;
+ lio_listio64;
+ }
GLIBC_2.7 {
__mq_open_2;
}
diff --git a/rt/aio_cancel.c b/rt/aio_cancel.c
index dba1e45044..63fd88f36c 100644
--- a/rt/aio_cancel.c
+++ b/rt/aio_cancel.c
@@ -1,6 +1,7 @@
-/* Cancel requests associated with given file descriptor. Stub version.
- Copyright (C) 2001-2021 Free Software Foundation, Inc.
+/* Cancel requests associated with given file descriptor.
+ Copyright (C) 1997-2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -22,21 +23,135 @@
we want to avoid code duplication by using aliases. But gcc sees
the different parameter lists and prints a warning. We define here
a function so that aio_cancel64 has no prototype. */
+#ifndef aio_cancel
#define aio_cancel64 XXX
#include <aio.h>
/* And undo the hack. */
#undef aio_cancel64
+#endif
+#include <assert.h>
#include <errno.h>
+#include <fcntl.h>
+
+#include <aio_misc.h>
+
int
aio_cancel (int fildes, struct aiocb *aiocbp)
{
- __set_errno (ENOSYS);
- return -1;
+ struct requestlist *req = NULL;
+ int result = AIO_ALLDONE;
+
+ /* If fildes is invalid, error. */
+ if (fcntl (fildes, F_GETFL) < 0)
+ {
+ __set_errno (EBADF);
+ return -1;
+ }
+
+ /* Request the mutex. */
+ pthread_mutex_lock (&__aio_requests_mutex);
+
+ /* We are asked to cancel a specific AIO request. */
+ if (aiocbp != NULL)
+ {
+ /* If the AIO request is not for this descriptor it has no value
+ to look for the request block. */
+ if (aiocbp->aio_fildes != fildes)
+ {
+ pthread_mutex_unlock (&__aio_requests_mutex);
+ __set_errno (EINVAL);
+ return -1;
+ }
+ else if (aiocbp->__error_code == EINPROGRESS)
+ {
+ struct requestlist *last = NULL;
+
+ req = __aio_find_req_fd (fildes);
+
+ if (req == NULL)
+ {
+ not_found:
+ pthread_mutex_unlock (&__aio_requests_mutex);
+ __set_errno (EINVAL);
+ return -1;
+ }
+
+ while (req->aiocbp != (aiocb_union *) aiocbp)
+ {
+ last = req;
+ req = req->next_prio;
+ if (req == NULL)
+ goto not_found;
+ }
+
+ /* Don't remove the entry if a thread is already working on it. */
+ if (req->running == allocated)
+ {
+ result = AIO_NOTCANCELED;
+ req = NULL;
+ }
+ else
+ {
+ /* We can remove the entry. */
+ __aio_remove_request (last, req, 0);
+
+ result = AIO_CANCELED;
+
+ req->next_prio = NULL;
+ }
+ }
+ }
+ else
+ {
+ /* Find the beginning of the list of all requests for this
+ desriptor. */
+ req = __aio_find_req_fd (fildes);
+
+ /* If any request is worked on by a thread it must be the first.
+ So either we can delete all requests or all but the first. */
+ if (req != NULL)
+ {
+ if (req->running == allocated)
+ {
+ struct requestlist *old = req;
+ req = req->next_prio;
+ old->next_prio = NULL;
+
+ result = AIO_NOTCANCELED;
+
+ if (req != NULL)
+ __aio_remove_request (old, req, 1);
+ }
+ else
+ {
+ result = AIO_CANCELED;
+
+ /* We can remove the entry. */
+ __aio_remove_request (NULL, req, 1);
+ }
+ }
+ }
+
+ /* Mark requests as canceled and send signal. */
+ while (req != NULL)
+ {
+ struct requestlist *old = req;
+ assert (req->running == yes || req->running == queued);
+ req->aiocbp->aiocb.__error_code = ECANCELED;
+ req->aiocbp->aiocb.__return_value = -1;
+ __aio_notify (req);
+ req = req->next_prio;
+ __aio_free_request (old);
+ }
+
+ /* Release the mutex. */
+ pthread_mutex_unlock (&__aio_requests_mutex);
+
+ return result;
}
+#ifndef aio_cancel
weak_alias (aio_cancel, aio_cancel64)
-
-stub_warning (aio_cancel)
-stub_warning (aio_cancel64)
+#endif
diff --git a/rt/aio_error.c b/rt/aio_error.c
index 730b64b5e9..ed664ae0ef 100644
--- a/rt/aio_error.c
+++ b/rt/aio_error.c
@@ -28,11 +28,21 @@
/* And undo the hack. */
#undef aio_error64
+#include <aio_misc.h>
+
int
aio_error (const struct aiocb *aiocbp)
{
- return aiocbp->__error_code;
+ int ret;
+
+ /* Acquire the mutex to make sure all operations for this request are
+ complete. */
+ pthread_mutex_lock(&__aio_requests_mutex);
+ ret = aiocbp->__error_code;
+ pthread_mutex_unlock(&__aio_requests_mutex);
+
+ return ret;
}
weak_alias (aio_error, aio_error64)
diff --git a/rt/aio_fsync.c b/rt/aio_fsync.c
index 86727246f8..5a52e2fec0 100644
--- a/rt/aio_fsync.c
+++ b/rt/aio_fsync.c
@@ -1,6 +1,7 @@
-/* Synchronize I/O in given file descriptor. Stub version.
- Copyright (C) 2001-2021 Free Software Foundation, Inc.
+/* Synchronize I/O in given file descriptor.
+ Copyright (C) 1997-2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -26,24 +27,31 @@
#include <aio.h>
/* And undo the hack. */
#undef aio_fsync64
-
#include <errno.h>
#include <fcntl.h>
+#include <aio_misc.h>
+
+
int
aio_fsync (int op, struct aiocb *aiocbp)
{
- if (op != O_SYNC && op != O_DSYNC)
+ if (op != O_DSYNC && __builtin_expect (op != O_SYNC, 0))
{
__set_errno (EINVAL);
return -1;
}
- __set_errno (ENOSYS);
- return -1;
+ /* Verify that this is an open file descriptor. */
+ if (__glibc_unlikely (fcntl (aiocbp->aio_fildes, F_GETFL) == -1))
+ {
+ __set_errno (EBADF);
+ return -1;
+ }
+
+ return (__aio_enqueue_request ((aiocb_union *) aiocbp,
+ op == O_SYNC ? LIO_SYNC : LIO_DSYNC) == NULL
+ ? -1 : 0);
}
weak_alias (aio_fsync, aio_fsync64)
-
-stub_warning (aio_fsync)
-stub_warning (aio_fsync64)
diff --git a/rt/aio_misc.c b/rt/aio_misc.c
index 2332f3ed53..b95f07d9d3 100644
--- a/rt/aio_misc.c
+++ b/rt/aio_misc.c
@@ -1,6 +1,7 @@
-/* Handle general operations. Stub version.
- Copyright (C) 2001-2021 Free Software Foundation, Inc.
+/* Handle general operations.
+ Copyright (C) 1997-2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -17,12 +18,704 @@
<https://www.gnu.org/licenses/>. */
#include <aio.h>
+#include <assert.h>
+#include <errno.h>
+#include <limits.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <sys/time.h>
#include <aio_misc.h>
-/* This file is for internal code needed by the aio_* implementation. */
+#ifndef aio_create_helper_thread
+# define aio_create_helper_thread __aio_create_helper_thread
+extern inline int
+__aio_create_helper_thread (pthread_t *threadp, void *(*tf) (void *), void *arg)
+{
+ pthread_attr_t attr;
+
+ /* Make sure the thread is created detached. */
+ pthread_attr_init (&attr);
+ pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
+
+ int ret = pthread_create (threadp, &attr, tf, arg);
+
+ (void) pthread_attr_destroy (&attr);
+ return ret;
+}
+#endif
+
+static void add_request_to_runlist (struct requestlist *newrequest);
+
+/* Pool of request list entries. */
+static struct requestlist **pool;
+
+/* Number of total and allocated pool entries. */
+static size_t pool_max_size;
+static size_t pool_size;
+
+/* We implement a two dimensional array but allocate each row separately.
+ The macro below determines how many entries should be used per row.
+ It should better be a power of two. */
+#define ENTRIES_PER_ROW 32
+
+/* How many rows we allocate at once. */
+#define ROWS_STEP 8
+
+/* List of available entries. */
+static struct requestlist *freelist;
+
+/* List of request waiting to be processed. */
+static struct requestlist *runlist;
+
+/* Structure list of all currently processed requests. */
+static struct requestlist *requests;
+
+/* Number of threads currently running. */
+static int nthreads;
+
+/* Number of threads waiting for work to arrive. */
+static int idle_thread_count;
+
+
+/* These are the values used to optimize the use of AIO. The user can
+ overwrite them by using the `aio_init' function. */
+static struct aioinit optim =
+{
+ 20, /* int aio_threads; Maximal number of threads. */
+ 64, /* int aio_num; Number of expected simultaneous requests. */
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 0
+};
+
+
+/* Since the list is global we need a mutex protecting it. */
+pthread_mutex_t __aio_requests_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+
+/* When you add a request to the list and there are idle threads present,
+ you signal this condition variable. When a thread finishes work, it waits
+ on this condition variable for a time before it actually exits. */
+pthread_cond_t __aio_new_request_notification = PTHREAD_COND_INITIALIZER;
+
+
+/* Functions to handle request list pool. */
+static struct requestlist *
+get_elem (void)
+{
+ struct requestlist *result;
+
+ if (freelist == NULL)
+ {
+ struct requestlist *new_row;
+ int cnt;
+
+ assert (sizeof (struct aiocb) == sizeof (struct aiocb64));
+
+ if (pool_size + 1 >= pool_max_size)
+ {
+ size_t new_max_size = pool_max_size + ROWS_STEP;
+ struct requestlist **new_tab;
+
+ new_tab = (struct requestlist **)
+ realloc (pool, new_max_size * sizeof (struct requestlist *));
+
+ if (new_tab == NULL)
+ return NULL;
+
+ pool_max_size = new_max_size;
+ pool = new_tab;
+ }
+
+ /* Allocate the new row. */
+ cnt = pool_size == 0 ? optim.aio_num : ENTRIES_PER_ROW;
+ new_row = (struct requestlist *) calloc (cnt,
+ sizeof (struct requestlist));
+ if (new_row == NULL)
+ return NULL;
+
+ pool[pool_size++] = new_row;
+
+ /* Put all the new entries in the freelist. */
+ do
+ {
+ new_row->next_prio = freelist;
+ freelist = new_row++;
+ }
+ while (--cnt > 0);
+ }
+
+ result = freelist;
+ freelist = freelist->next_prio;
+
+ return result;
+}
+
+
+void
+__aio_free_request (struct requestlist *elem)
+{
+ elem->running = no;
+ elem->next_prio = freelist;
+ freelist = elem;
+}
+
+
+struct requestlist *
+__aio_find_req (aiocb_union *elem)
+{
+ struct requestlist *runp = requests;
+ int fildes = elem->aiocb.aio_fildes;
+
+ while (runp != NULL && runp->aiocbp->aiocb.aio_fildes < fildes)
+ runp = runp->next_fd;
+
+ if (runp != NULL)
+ {
+ if (runp->aiocbp->aiocb.aio_fildes != fildes)
+ runp = NULL;
+ else
+ while (runp != NULL && runp->aiocbp != elem)
+ runp = runp->next_prio;
+ }
+
+ return runp;
+}
+
+
+struct requestlist *
+__aio_find_req_fd (int fildes)
+{
+ struct requestlist *runp = requests;
+
+ while (runp != NULL && runp->aiocbp->aiocb.aio_fildes < fildes)
+ runp = runp->next_fd;
+
+ return (runp != NULL && runp->aiocbp->aiocb.aio_fildes == fildes
+ ? runp : NULL);
+}
+
+
+void
+__aio_remove_request (struct requestlist *last, struct requestlist *req,
+ int all)
+{
+ assert (req->running == yes || req->running == queued
+ || req->running == done);
+
+ if (last != NULL)
+ last->next_prio = all ? NULL : req->next_prio;
+ else
+ {
+ if (all || req->next_prio == NULL)
+ {
+ if (req->last_fd != NULL)
+ req->last_fd->next_fd = req->next_fd;
+ else
+ requests = req->next_fd;
+ if (req->next_fd != NULL)
+ req->next_fd->last_fd = req->last_fd;
+ }
+ else
+ {
+ if (req->last_fd != NULL)
+ req->last_fd->next_fd = req->next_prio;
+ else
+ requests = req->next_prio;
+
+ if (req->next_fd != NULL)
+ req->next_fd->last_fd = req->next_prio;
+
+ req->next_prio->last_fd = req->last_fd;
+ req->next_prio->next_fd = req->next_fd;
+
+ /* Mark this entry as runnable. */
+ req->next_prio->running = yes;
+ }
+
+ if (req->running == yes)
+ {
+ struct requestlist *runp = runlist;
+
+ last = NULL;
+ while (runp != NULL)
+ {
+ if (runp == req)
+ {
+ if (last == NULL)
+ runlist = runp->next_run;
+ else
+ last->next_run = runp->next_run;
+ break;
+ }
+ last = runp;
+ runp = runp->next_run;
+ }
+ }
+ }
+}
+
+
+/* The thread handler. */
+static void *handle_fildes_io (void *arg);
+
+
+/* User optimization. */
void
__aio_init (const struct aioinit *init)
{
+ /* Get the mutex. */
+ pthread_mutex_lock (&__aio_requests_mutex);
+
+ /* Only allow writing new values if the table is not yet allocated. */
+ if (pool == NULL)
+ {
+ optim.aio_threads = init->aio_threads < 1 ? 1 : init->aio_threads;
+ assert (powerof2 (ENTRIES_PER_ROW));
+ optim.aio_num = (init->aio_num < ENTRIES_PER_ROW
+ ? ENTRIES_PER_ROW
+ : init->aio_num & ~(ENTRIES_PER_ROW - 1));
+ }
+
+ if (init->aio_idle_time != 0)
+ optim.aio_idle_time = init->aio_idle_time;
+
+ /* Release the mutex. */
+ pthread_mutex_unlock (&__aio_requests_mutex);
}
weak_alias (__aio_init, aio_init)
+
+
+/* The main function of the async I/O handling. It enqueues requests
+ and if necessary starts and handles threads. */
+struct requestlist *
+__aio_enqueue_request (aiocb_union *aiocbp, int operation)
+{
+ int result = 0;
+ int policy, prio;
+ struct sched_param param;
+ struct requestlist *last, *runp, *newp;
+ int running = no;
+
+ if (operation == LIO_SYNC || operation == LIO_DSYNC)
+ aiocbp->aiocb.aio_reqprio = 0;
+ else if (aiocbp->aiocb.aio_reqprio < 0
+#ifdef AIO_PRIO_DELTA_MAX
+ || aiocbp->aiocb.aio_reqprio > AIO_PRIO_DELTA_MAX
+#endif
+ )
+ {
+ /* Invalid priority value. */
+ __set_errno (EINVAL);
+ aiocbp->aiocb.__error_code = EINVAL;
+ aiocbp->aiocb.__return_value = -1;
+ return NULL;
+ }
+
+ /* Compute priority for this request. */
+ pthread_getschedparam (pthread_self (), &policy, &param);
+ prio = param.sched_priority - aiocbp->aiocb.aio_reqprio;
+
+ /* Get the mutex. */
+ pthread_mutex_lock (&__aio_requests_mutex);
+
+ last = NULL;
+ runp = requests;
+ /* First look whether the current file descriptor is currently
+ worked with. */
+ while (runp != NULL
+ && runp->aiocbp->aiocb.aio_fildes < aiocbp->aiocb.aio_fildes)
+ {
+ last = runp;
+ runp = runp->next_fd;
+ }
+
+ /* Get a new element for the waiting list. */
+ newp = get_elem ();
+ if (newp == NULL)
+ {
+ pthread_mutex_unlock (&__aio_requests_mutex);
+ __set_errno (EAGAIN);
+ return NULL;
+ }
+ newp->aiocbp = aiocbp;
+ newp->waiting = NULL;
+
+ aiocbp->aiocb.__abs_prio = prio;
+ aiocbp->aiocb.__policy = policy;
+ aiocbp->aiocb.aio_lio_opcode = operation;
+ aiocbp->aiocb.__error_code = EINPROGRESS;
+ aiocbp->aiocb.__return_value = 0;
+
+ if (runp != NULL
+ && runp->aiocbp->aiocb.aio_fildes == aiocbp->aiocb.aio_fildes)
+ {
+ /* The current file descriptor is worked on. It makes no sense
+ to start another thread since this new thread would fight
+ with the running thread for the resources. But we also cannot
+ say that the thread processing this desriptor shall immediately
+ after finishing the current job process this request if there
+ are other threads in the running queue which have a higher
+ priority. */
+
+ /* Simply enqueue it after the running one according to the
+ priority. */
+ last = NULL;
+ while (runp->next_prio != NULL
+ && runp->next_prio->aiocbp->aiocb.__abs_prio >= prio)
+ {
+ last = runp;
+ runp = runp->next_prio;
+ }
+
+ newp->next_prio = runp->next_prio;
+ runp->next_prio = newp;
+
+ running = queued;
+ }
+ else
+ {
+ running = yes;
+ /* Enqueue this request for a new descriptor. */
+ if (last == NULL)
+ {
+ newp->last_fd = NULL;
+ newp->next_fd = requests;
+ if (requests != NULL)
+ requests->last_fd = newp;
+ requests = newp;
+ }
+ else
+ {
+ newp->next_fd = last->next_fd;
+ newp->last_fd = last;
+ last-&g