debuggers.hg

view tools/blktap/drivers/tapaio.c @ 16703:003542d9ab77

tapaio check return value from read()

In tools/blktap/drivers/tapaio.c there is a call to read(2) whose
return value is not checked. The attached patch attempts to do
something vaguely sensible in cases of error.

Fully comprehensive error handling in this area would be quite tough
to introduce now but at least with this change when things go wrong
you stand a chance of getting some information about what happened.

Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Dec 27 12:28:58 2007 +0000 (2007-12-27)
parents eeeb77195ac2
children 89ee92328720
line source
1 /*
2 * Copyright (c) 2006 Andrew Warfield and Julian Chesterfield
3 * Copyright (c) 2007 Red Hat, Inc.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation; or, when distributed
8 * separately from the Linux kernel or incorporated into other
9 * software packages, subject to the following license:
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this source file (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use, copy, modify,
14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15 * and to permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 * IN THE SOFTWARE.
28 */
30 #include "tapaio.h"
31 #include "tapdisk.h"
32 #include <unistd.h>
33 #include <errno.h>
34 #include <string.h>
36 /**
37 * We used a kernel patch to return an fd associated with the AIO context
38 * so that we can concurrently poll on synchronous and async descriptors.
39 * This is signalled by passing 1 as the io context to io_setup.
40 */
41 #define REQUEST_ASYNC_FD 1
43 /*
44 * If we don't have any way to do epoll on aio events in a normal kernel,
45 * wait for aio events in a separate thread and return completion status
46 * that via a pipe that can be waited on normally.
47 *
48 * To keep locking problems between the completion thread and the submit
49 * thread to a minimum, there's a handshake which allows only one thread
50 * to be doing work on the completion queue at a time:
51 *
52 * 1) main thread sends completion thread a command via the command pipe;
53 * 2) completion thread waits for aio events and returns the number
54 * received on the completion pipe
55 * 3) main thread processes the received ctx->aio_events events
56 * 4) loop back to 1) to let the completion thread refill the aio_events
57 * buffer.
58 *
59 * This workaround needs to disappear once the kernel provides a single
60 * mechanism for waiting on both aio and normal fd wakeups.
61 */
62 static void *
63 tap_aio_completion_thread(void *arg)
64 {
65 tap_aio_context_t *ctx = (tap_aio_context_t *) arg;
66 int command;
67 int nr_events;
68 int rc;
70 while (1) {
71 rc = read(ctx->command_fd[0], &command, sizeof(command));
73 do {
74 rc = io_getevents(ctx->aio_ctx, 1,
75 ctx->max_aio_events, ctx->aio_events,
76 NULL);
77 if (rc) {
78 nr_events = rc;
79 rc = write(ctx->completion_fd[1], &nr_events,
80 sizeof(nr_events));
81 }
82 } while (!rc);
83 }
84 }
86 void
87 tap_aio_continue(tap_aio_context_t *ctx)
88 {
89 int cmd = 0;
91 if (!ctx->poll_in_thread)
92 return;
94 if (write(ctx->command_fd[1], &cmd, sizeof(cmd)) < 0)
95 DPRINTF("Cannot write to command pipe\n");
96 }
98 int
99 tap_aio_setup(tap_aio_context_t *ctx,
100 struct io_event *aio_events,
101 int max_aio_events)
102 {
103 int ret;
105 ctx->aio_events = aio_events;
106 ctx->max_aio_events = max_aio_events;
107 ctx->poll_in_thread = 0;
109 ctx->aio_ctx = (io_context_t) REQUEST_ASYNC_FD;
110 ret = io_setup(ctx->max_aio_events, &ctx->aio_ctx);
111 if (ret < 0 && ret != -EINVAL)
112 return ret;
113 else if (ret > 0) {
114 ctx->pollfd = ret;
115 return ctx->pollfd;
116 }
118 ctx->aio_ctx = (io_context_t) 0;
119 ret = io_setup(ctx->max_aio_events, &ctx->aio_ctx);
120 if (ret < 0)
121 return ret;
123 if ((ret = pipe(ctx->command_fd)) < 0) {
124 DPRINTF("Unable to create command pipe\n");
125 return -1;
126 }
127 if ((ret = pipe(ctx->completion_fd)) < 0) {
128 DPRINTF("Unable to create completion pipe\n");
129 return -1;
130 }
132 if ((ret = pthread_create(&ctx->aio_thread, NULL,
133 tap_aio_completion_thread, ctx)) != 0) {
134 DPRINTF("Unable to create completion thread\n");
135 return -1;
136 }
138 ctx->pollfd = ctx->completion_fd[0];
139 ctx->poll_in_thread = 1;
141 tap_aio_continue(ctx);
143 return 0;
144 }
146 int
147 tap_aio_get_events(tap_aio_context_t *ctx)
148 {
149 int nr_events = 0;
151 if (!ctx->poll_in_thread)
152 nr_events = io_getevents(ctx->aio_ctx, 1,
153 ctx->max_aio_events, ctx->aio_events, NULL);
154 else {
155 int r;
156 r = read(ctx->completion_fd[0], &nr_events, sizeof(nr_events));
157 if (r < 0) {
158 if (errno == EAGAIN || errno == EINTR)
159 return 0;
160 /* This is pretty bad, we'll probably spin */
161 DPRINTF("Aargh, read completion_fd failed: %s",
162 strerror(errno));
163 } else if (r != sizeof(nr_events)) {
164 /* Should never happen because sizeof(nr_events)
165 * fits in the guaranteed atomic pipe write size.
166 * Blundering on is slightly nicer than asserting */
167 DPRINTF("Aargh, read completion_fd short read %d", r);
168 }
169 }
171 return nr_events;
172 }
174 int tap_aio_more_events(tap_aio_context_t *ctx)
175 {
176 return io_getevents(ctx->aio_ctx, 0,
177 ctx->max_aio_events, ctx->aio_events, NULL);
178 }