summaryrefslogtreecommitdiffstats
path: root/sys/kern/tty_outq.c
blob: e945ccab0b483286051ff7c397d031943c9d50bd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
/*-
 * Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
 * All rights reserved.
 *
 * Portions of this software were developed under sponsorship from Snow
 * B.V., the Netherlands.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 */

#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");

#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/queue.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/tty.h>
#include <sys/uio.h>

#include <vm/uma.h>

/*
 * TTY output queue buffering.
 *
 * The previous design of the TTY layer offered the so-called clists.
 * These clists were used for both the input queues and the output
 * queue. We don't use certain features on the output side, like quoting
 * bits for parity marking and such. This mechanism is similar to the
 * old clists, but only contains the features we need to buffer the
 * output.
 */

/* Statistics. */
static long ttyoutq_nfast = 0;
SYSCTL_LONG(_kern, OID_AUTO, tty_outq_nfast, CTLFLAG_RD,
	&ttyoutq_nfast, 0, "Unbuffered reads to userspace on output");
static long ttyoutq_nslow = 0;
SYSCTL_LONG(_kern, OID_AUTO, tty_outq_nslow, CTLFLAG_RD,
	&ttyoutq_nslow, 0, "Buffered reads to userspace on output");

struct ttyoutq_block {
	STAILQ_ENTRY(ttyoutq_block) tob_list;
	char	tob_data[TTYOUTQ_DATASIZE];
};

static uma_zone_t ttyoutq_zone;

void
ttyoutq_flush(struct ttyoutq *to)
{

	to->to_begin = 0;
	to->to_end = 0;
}

void
ttyoutq_setsize(struct ttyoutq *to, struct tty *tp, size_t size)
{
	unsigned int nblocks;
	struct ttyoutq_block *tob;

	nblocks = howmany(size, TTYOUTQ_DATASIZE);

	while (nblocks > to->to_nblocks) {
		/*
		 * List is getting bigger.
		 * Add new blocks to the tail of the list.
		 *
		 * We must unlock the TTY temporarily, because we need
		 * to allocate memory. This won't be a problem, because
		 * in the worst case, another thread ends up here, which
		 * may cause us to allocate too many blocks, but this
		 * will be caught by the loop below.
		 */
		tty_unlock(tp);
		tob = uma_zalloc(ttyoutq_zone, M_WAITOK);
		tty_lock(tp);

		if (tty_gone(tp))
			return;

		STAILQ_INSERT_TAIL(&to->to_list, tob, tob_list);
		to->to_nblocks++;
	}

	while (nblocks < to->to_nblocks) {
		/*
		 * List is getting smaller. Remove unused blocks at the
		 * end. This means we cannot guarantee this routine
		 * shrinks buffers properly, when we need to reclaim
		 * more space than there is available.
		 *
		 * XXX TODO: Two solutions here:
		 * - Throw data away
		 * - Temporarily hit the watermark until enough data has
		 *   been flushed, so we can remove the blocks.
		 */

		if (to->to_end == 0) {
			tob = STAILQ_FIRST(&to->to_list);
			if (tob == NULL)
				break;
			STAILQ_REMOVE_HEAD(&to->to_list, tob_list);
		} else {
			tob = STAILQ_NEXT(to->to_lastblock, tob_list);
			if (tob == NULL)
				break;
			STAILQ_REMOVE_NEXT(&to->to_list, to->to_lastblock, tob_list);
		}
		uma_zfree(ttyoutq_zone, tob);
		to->to_nblocks--;
	}
}

size_t
ttyoutq_read(struct ttyoutq *to, void *buf, size_t len)
{
	char *cbuf = buf;

	while (len > 0) {
		struct ttyoutq_block *tob;
		size_t cbegin, cend, clen;

		/* See if there still is data. */
		if (to->to_begin == to->to_end)
			break;
		tob = STAILQ_FIRST(&to->to_list);
		if (tob == NULL)
			break;

		/*
		 * The end address should be the lowest of these three:
		 * - The write pointer
		 * - The blocksize - we can't read beyond the block
		 * - The end address if we could perform the full read
		 */
		cbegin = to->to_begin;
		cend = MIN(MIN(to->to_end, to->to_begin + len),
		    TTYOUTQ_DATASIZE);
		clen = cend - cbegin;

		if (cend == TTYOUTQ_DATASIZE || cend == to->to_end) {
			/* Read the block until the end. */
			STAILQ_REMOVE_HEAD(&to->to_list, tob_list);
			STAILQ_INSERT_TAIL(&to->to_list, tob, tob_list);
			to->to_begin = 0;
			if (to->to_end <= TTYOUTQ_DATASIZE) {
				to->to_end = 0;
			} else {
				to->to_end -= TTYOUTQ_DATASIZE;
			}
		} else {
			/* Read the block partially. */
			to->to_begin += clen;
		}

		/* Copy the data out of the buffers. */
		memcpy(cbuf, tob->tob_data + cbegin, clen);
		cbuf += clen;
		len -= clen;
	}

	return (cbuf - (char *)buf);
}

/*
 * An optimized version of ttyoutq_read() which can be used in pseudo
 * TTY drivers to directly copy data from the outq to userspace, instead
 * of buffering it.
 *
 * We can only copy data directly if we need to read the entire block
 * back to the user, because we temporarily remove the block from the
 * queue. Otherwise we need to copy it to a temporary buffer first, to
 * make sure data remains in the correct order.
 */
int
ttyoutq_read_uio(struct ttyoutq *to, struct tty *tp, struct uio *uio)
{

	while (uio->uio_resid > 0) {
		int error;
		struct ttyoutq_block *tob;
		size_t cbegin, cend, clen;

		/* See if there still is data. */
		if (to->to_begin == to->to_end)
			return (0);
		tob = STAILQ_FIRST(&to->to_list);
		if (tob == NULL)
			return (0);

		/*
		 * The end address should be the lowest of these three:
		 * - The write pointer
		 * - The blocksize - we can't read beyond the block
		 * - The end address if we could perform the full read
		 */
		cbegin = to->to_begin;
		cend = MIN(MIN(to->to_end, to->to_begin + uio->uio_resid),
		    TTYOUTQ_DATASIZE);
		clen = cend - cbegin;

		/*
		 * We can prevent buffering in some cases:
		 * - We need to read the block until the end.
		 * - We don't need to read the block until the end, but
		 *   there is no data beyond it, which allows us to move
		 *   the write pointer to a new block.
		 */
		if (cend == TTYOUTQ_DATASIZE || cend == to->to_end) {
			atomic_add_long(&ttyoutq_nfast, 1);

			/*
			 * Fast path: zero copy. Remove the first block,
			 * so we can unlock the TTY temporarily.
			 */
			STAILQ_REMOVE_HEAD(&to->to_list, tob_list);
			to->to_nblocks--;
			to->to_begin = 0;
			if (to->to_end <= TTYOUTQ_DATASIZE) {
				to->to_end = 0;
			} else {
				to->to_end -= TTYOUTQ_DATASIZE;
			}

			/* Temporary unlock and copy the data to userspace. */
			tty_unlock(tp);
			error = uiomove(tob->tob_data + cbegin, clen, uio);
			tty_lock(tp);

			if (tty_gone(tp)) {
				/* We lost the discipline. */
				uma_zfree(ttyoutq_zone, tob);
				return (ENXIO);
			}

			/* Block can now be readded to the list. */
			/*
			 * XXX: we could remove the blocks here when the
			 * queue was shrunk, but still in use. See
			 * ttyoutq_setsize().
			 */
			STAILQ_INSERT_TAIL(&to->to_list, tob, tob_list);
			to->to_nblocks++;
			if (error != 0)
				return (error);
		} else {
			char ob[TTYOUTQ_DATASIZE - 1];
			atomic_add_long(&ttyoutq_nslow, 1);

			/*
			 * Slow path: store data in a temporary buffer.
			 */
			memcpy(ob, tob->tob_data + cbegin, clen);
			to->to_begin += clen;
			MPASS(to->to_begin < TTYOUTQ_DATASIZE);

			/* Temporary unlock and copy the data to userspace. */
			tty_unlock(tp);
			error = uiomove(ob, clen, uio);
			tty_lock(tp);

			if (tty_gone(tp)) {
				/* We lost the discipline. */
				return (ENXIO);
			}

			if (error != 0)
				return (error);
		}
	}

	return (0);
}

size_t
ttyoutq_write(struct ttyoutq *to, const void *buf, size_t nbytes)
{
	const char *cbuf = buf;
	struct ttyoutq_block *tob;
	unsigned int boff;
	size_t l;

	while (nbytes > 0) {
		/* Offset in current block. */
		tob = to->to_lastblock;
		boff = to->to_end % TTYOUTQ_DATASIZE;

		if (to->to_end == 0) {
			/* First time we're being used or drained. */
			MPASS(to->to_begin == 0);
			tob = to->to_lastblock = STAILQ_FIRST(&to->to_list);
			if (tob == NULL) {
				/* Queue has no blocks. */
				break;
			}
		} else if (boff == 0) {
			/* We reached the end of this block on last write. */
			tob = STAILQ_NEXT(tob, tob_list);
			if (tob == NULL) {
				/* We've reached the watermark. */
				break;
			}
			to->to_lastblock = tob;
		}

		/* Don't copy more than was requested. */
		l = MIN(nbytes, TTYOUTQ_DATASIZE - boff);
		MPASS(l > 0);
		memcpy(tob->tob_data + boff, cbuf, l);

		cbuf += l;
		nbytes -= l;
		to->to_end += l;
	}

	return (cbuf - (const char *)buf);
}

int
ttyoutq_write_nofrag(struct ttyoutq *to, const void *buf, size_t nbytes)
{
	size_t ret;

	if (ttyoutq_bytesleft(to) < nbytes)
		return (-1);

	/* We should always be able to write it back. */
	ret = ttyoutq_write(to, buf, nbytes);
	MPASS(ret == nbytes);

	return (0);
}

static void
ttyoutq_startup(void *dummy)
{

	ttyoutq_zone = uma_zcreate("ttyoutq", sizeof(struct ttyoutq_block),
	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
}

SYSINIT(ttyoutq, SI_SUB_DRIVERS, SI_ORDER_FIRST, ttyoutq_startup, NULL);
OpenPOWER on IntegriCloud