summaryrefslogtreecommitdiffstats
path: root/lkm/vinum/request.c
blob: 94df5b9f06bcb7dae64c63ebc6cb9cc360e9cd19 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
/* XXX to do:

 * Decide where we need splbio ()
 */
/*-
 * Copyright (c) 1997, 1998
 *	Nan Yang Computer Services Limited.  All rights reserved.
 *
 *  This software is distributed under the so-called ``Berkeley
 *  License'':
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. All advertising materials mentioning features or use of this software
 *    must display the following acknowledgement:
 *	This product includes software developed by Nan Yang Computer
 *      Services Limited.
 * 4. Neither the name of the Company nor the names of its contributors
 *    may be used to endorse or promote products derived from this software
 *    without specific prior written permission.
 *
 * This software is provided ``as is'', and any express or implied
 * warranties, including, but not limited to, the implied warranties of
 * merchantability and fitness for a particular purpose are disclaimed.
 * In no event shall the company or contributors be liable for any
 * direct, indirect, incidental, special, exemplary, or consequential
 * damages (including, but not limited to, procurement of substitute
 * goods or services; loss of use, data, or profits; or business
 * interruption) however caused and on any theory of liability, whether
 * in contract, strict liability, or tort (including negligence or
 * otherwise) arising in any way out of the use of this software, even if
 * advised of the possibility of such damage.
 *
 * $Id: request.c,v 1.18 1998/08/31 23:45:35 grog Exp grog $
 */

#define REALLYKERNEL
#include "vinumhdr.h"
#include "request.h"
#include <miscfs/specfs/specdev.h>
#include <sys/resourcevar.h>

/* pointer to ioctl p parameter, to save passing it around */
extern struct proc *myproc;

enum requeststatus bre(struct request *rq,
    int plexno,
    daddr_t * diskstart,
    daddr_t diskend);
enum requeststatus bre5(struct request *rq,
    int plexno,
    daddr_t * diskstart,
    daddr_t diskend);
enum requeststatus build_read_request(struct request *rq, int volplexno);
enum requeststatus build_write_request(struct request *rq);
enum requeststatus build_rq_buffer(struct rqelement *rqe, struct plex *plex);
void freerq(struct request *rq);
void free_rqg(struct rqgroup *rqg);
int find_alternate_sd(struct request *rq);
int check_range_covered(struct request *);
void complete_rqe(struct buf *bp);
void complete_raid5_write(struct rqelement *);
int abortrequest(struct request *rq, int error);
void sdio(struct buf *bp);
void sdio_done(struct buf *bp);
int vinum_bounds_check(struct buf *bp, struct volume *vol);
caddr_t allocdatabuf(struct rqelement *rqe);
void freedatabuf(struct rqelement *rqe);

#ifdef DEBUG
struct rqinfo rqinfo[RQINFO_SIZE];
struct rqinfo *rqip = rqinfo;

void 
logrq(enum rqinfo_type type, union rqinfou info, struct buf *ubp)
{
    BROKEN_GDB;
    int s = splhigh();

    vinum_conf.rqipp = &rqip;				    /* XXX for broken gdb */
    vinum_conf.rqinfop = rqinfo;			    /* XXX for broken gdb */

#if __FreeBSD__ < 3
    rqip->timestamp = time;				    /* when did this happen? */
#else
    microtime(&rqip->timestamp);			    /* when did this happen? */
#endif
    rqip->type = type;
    rqip->bp = ubp;					    /* user buffer */
    switch (type) {
    case loginfo_user_bp:
    case loginfo_user_bpl:
	bcopy(info.bp, &rqip->info.b, sizeof(struct buf));
	break;

    case loginfo_iodone:
    case loginfo_rqe:
    case loginfo_raid5_data:
    case loginfo_raid5_parity:
	bcopy(info.rqe, &rqip->info.rqe, sizeof(struct rqelement));
	break;

    case loginfo_unused:
	break;
    }
    rqip++;
    if (rqip >= &rqinfo[RQINFO_SIZE])			    /* wrap around */
	rqip = rqinfo;
    splx(s);
}

#endif

void 
vinumstrategy(struct buf *bp)
{
    BROKEN_GDB;
    int volno;
    struct volume *vol = NULL;
    int s;
    struct devcode *device = (struct devcode *) &bp->b_dev; /* decode device number */
    enum requeststatus status;

    /* We may have changed the configuration in
     * an interrupt context.  Update it now.  It
     * could change again, so do it in a loop.
     * XXX this is broken and contains a race condition.
     * The correct way is to hand it off the the Vinum
     * daemon, but I haven't found a name for it yet */
    while (vinum_conf.flags & VF_DIRTYCONFIG) {		    /* config is dirty, save it now */
	vinum_conf.flags &= ~VF_DIRTYCONFIG;		    /* turn it off */
	save_config();
    }

    switch (device->type) {
    case VINUM_SD_TYPE:
	sdio(bp);
	return;

	/* In fact, vinum doesn't handle drives: they're
	 * handled directly by the disk drivers */
    case VINUM_DRIVE_TYPE:
    default:
	bp->b_error = EIO;				    /* I/O error */
	bp->b_flags |= B_ERROR;
	biodone(bp);
	return;

    case VINUM_VOLUME_TYPE:				    /* volume I/O */
	volno = VOLNO(bp->b_dev);
	vol = &VOL[volno];
	if (vol->state != volume_up) {			    /* can't access this volume */
	    bp->b_error = EIO;				    /* I/O error */
	    bp->b_flags |= B_ERROR;
	    biodone(bp);
	    return;
	}
	if (vinum_bounds_check(bp, vol) <= 0) {		    /* don't like them bounds */
	    biodone(bp);				    /* have nothing to do with this */
	    return;
	}
	/* FALLTHROUGH */
	/* Plex I/O is pretty much the same as volume I/O
	 * for a single plex.  Indicate this by passing a NULL
	 * pointer (set above) for the volume */
    case VINUM_PLEX_TYPE:
	bp->b_resid = bp->b_bcount;			    /* transfer everything */
	vinumstart(bp, 0);
	return;
    }
}

/* Start a transfer.  Return -1 on error,
 * 0 if OK, 1 if we need to retry.
 * Parameter reviveok is set when doing
 * transfers for revives: it allows transfers to
 * be started immediately when a revive is in
 * progress.  During revive, normal transfers
 * are queued if they share address space with
 * a currently active revive operation. */
int 
vinumstart(struct buf *bp, int reviveok)
{
    BROKEN_GDB;
    int plexno;
    int maxplex;					    /* maximum number of plexes to handle */
    struct volume *vol;
    struct rqgroup *rqg;				    /* current plex's requests */
    struct rqelement *rqe;				    /* individual element */
    struct request *rq;					    /* build up our request here */
    int rqno;						    /* index in request list */
    enum requeststatus status;

#if DEBUG
    if (debug & DEBUG_LASTREQS)
	logrq(loginfo_user_bp, bp, bp);
#endif

    /* XXX In these routines, we're assuming that
     * we will always be called with bp->b_bcount
     * which is a multiple of the sector size.  This
     * is a reasonable assumption, since we are only
     * called from system routines.  Should we check
     * anyway? */

    if ((bp->b_bcount % DEV_BSIZE) != 0) {		    /* bad length */
	bp->b_error = EINVAL;				    /* invalid size */
	bp->b_flags |= B_ERROR;
	biodone(bp);
	return -1;
    }
    rq = (struct request *) Malloc(sizeof(struct request)); /* allocate a request struct */
    if (rq == NULL) {					    /* can't do it */
	bp->b_error = ENOMEM;				    /* can't get memory */
	bp->b_flags |= B_ERROR;
	biodone(bp);
	return -1;
    }
    bzero(rq, sizeof(struct request));

    /* Note the volume ID.  This can be NULL, which
     * the request building functions use as an
     * indication for single plex I/O */
    rq->bp = bp;					    /* and the user buffer struct */

    if (DEVTYPE(bp->b_dev) == VINUM_VOLUME_TYPE) {	    /* it's a volume, */
	rq->volplex.volno = VOLNO(bp->b_dev);		    /* get the volume number */
	vol = &VOL[rq->volplex.volno];			    /* and point to it */
	vol->active++;					    /* one more active request */
	maxplex = vol->plexes;				    /* consider all its plexes */
    } else {
	vol = NULL;					    /* no volume */
	rq->volplex.plexno = PLEXNO(bp->b_dev);		    /* point to the plex */
	rq->isplex = 1;					    /* note that it's a plex */
	maxplex = 1;					    /* just the one plex */
    }

    if (bp->b_flags & B_READ) {
	/* This is a read request.  Decide
	 * which plex to read from.
	 *
	 * There's a potential race condition here,
	 * since we're not locked, and we could end
	 * up multiply incrementing the round-robin
	 * counter.  This doesn't have any serious
	 * effects, however. */
	if (vol != NULL) {
	    vol->reads++;
	    vol->bytes_read += bp->b_bcount;
	    plexno = vol->preferred_plex;		    /* get the plex to use */
	    if (plexno < 0) {				    /* round robin */
		plexno = vol->last_plex_read;
		vol->last_plex_read++;
		if (vol->last_plex_read == vol->plexes)	    /* got the the end? */
		    vol->last_plex_read = 0;		    /* wrap around */
	    }
	    status = build_read_request(rq, plexno);	    /* build a request */
	} else {
	    daddr_t diskaddr = bp->b_blkno;		    /* start offset of transfer */
	    status = bre(rq,				    /* build a request list */
		rq->volplex.plexno,
		&diskaddr,
		diskaddr + (bp->b_bcount / DEV_BSIZE));
	}

	if ((status > REQUEST_RECOVERED)		    /* can't satisfy it */
	||(bp->b_flags & B_DONE)) {			    /* XXX shouldn't get this without bad status */
	    if (status == REQUEST_DOWN) {		    /* not enough subdisks */
		bp->b_error = EIO;			    /* I/O error */
		bp->b_flags |= B_ERROR;
	    }
	    biodone(bp);
	    freerq(rq);
	    return -1;
	    }
	return launch_requests(rq, reviveok);	    /* now start the requests if we can */
    } else
	/* This is a write operation.  We write to all
	 * plexes.  If this is a RAID 5 plex, we must also
	 * update the parity stripe. */
    {
	if (vol != NULL) {
	    vol->writes++;
	    vol->bytes_written += bp->b_bcount;
	    status = build_write_request(rq);		    /* Not all the subdisks are up */
	} else {					    /* plex I/O */
	    daddr_t diskstart;

	    diskstart = bp->b_blkno;			    /* start offset of transfer */
	    status = bre(rq,
		PLEXNO(bp->b_dev),
		&diskstart,
		bp->b_blkno + (bp->b_bcount / DEV_BSIZE));  /* build requests for the plex */
	}
	if ((status > REQUEST_RECOVERED)		    /* can't satisfy it */
	||(bp->b_flags & B_DONE)) {			    /* XXX shouldn't get this without bad status */
	    if (status == REQUEST_DOWN) {		    /* not enough subdisks */
		bp->b_error = EIO;			    /* I/O error */
		bp->b_flags |= B_ERROR;
	    }
	    if ((bp->b_flags & B_DONE) == 0)
		biodone(bp);
	    freerq(rq);
	    return -1;
	    }
	return launch_requests (rq, reviveok);	    /* start the requests */
    }
}

/* Call the low-level strategy routines to
 * perform the requests in a struct request */
int 
launch_requests(struct request *rq, int reviveok)
{
    struct rqgroup *rqg;
    int rqno;						    /* loop index */
    struct rqelement *rqe;				    /* current element */
    int s;

    /* First find out whether we're reviving, and the
     * request contains a conflict.  If so, we hang
     * the request off plex->waitlist of the first
     * plex we find which is reviving */
    if ((rq->flags & XFR_REVIVECONFLICT)		    /* possible revive conflict */
    &&(!reviveok)) {					    /* and we don't want to do it now, */
	struct volume *vol = &VOL[VOLNO(rq->bp->b_dev)];
	struct plex *plex;
	int plexno;

	for (plexno = 0; plexno < vol->plexes; plexno++) {  /* find the reviving plex */
	    plex = &PLEX[vol->plex[plexno]];
	    if (plex->state == plex_reviving)		    /* found it */
		break;
	}
	if (plexno < vol->plexes) {			    /* found it? */
	    struct request *waitlist = plex->waitlist;	    /* point to the waiting list */

	    while (waitlist->next != NULL)		    /* find the end */
		waitlist = waitlist->next;
	    waitlist->next = rq;			    /* hook our request there */
	    return 0;					    /* and get out of here */
	} else						    /* bad vinum, bad */
	    printf("vinum: can't find reviving plex for volume %s\n", vol->name);
    }
    rq->active = 0;					    /* nothing yet */
    /* XXX This is probably due to a bug */
    if (rq->rqg == NULL) {				    /* no request */
	abortrequest(rq, EINVAL);
	return -1;
    }
#if DEBUG
    if (debug & DEBUG_ADDRESSES)
	printf("Request: %x\n%s dev 0x%x, offset 0x%x, length %ld\n",
	    (u_int) rq,
	    rq->bp->b_flags & B_READ ? "Read" : "Write",
	    rq->bp->b_dev,
	    rq->bp->b_blkno,
	    rq->bp->b_bcount);				    /* XXX */
    vinum_conf.lastrq = (int) rq;
    vinum_conf.lastbuf = rq->bp;
    if (debug & DEBUG_LASTREQS)
	logrq(loginfo_user_bpl, rq->bp, rq->bp);
#endif
    for (rqg = rq->rqg; rqg != NULL; rqg = rqg->next) {	    /* through the whole request chain */
	rqg->active = rqg->count;			    /* they're all active */
	rq->active++;					    /* one more active request group */
	for (rqno = 0; rqno < rqg->count; rqno++) {
	    rqe = &rqg->rqe[rqno];
	    if (rqe->flags & XFR_BAD_SUBDISK)		    /* this subdisk is bad, */
		rqg->active--;				    /* one less active request */
	    else {
		struct drive *drive = &DRIVE[rqe->driveno]; /* drive to access */
		if ((rqe->b.b_flags & B_READ) == 0)
		    rqe->b.b_vp->v_numoutput++;		    /* one more output going */
#if DEBUG
		if (debug & DEBUG_ADDRESSES)
		    printf("  %s dev 0x%x, sd %d, offset 0x%x, devoffset 0x%x, length %ld\n",
			rqe->b.b_flags & B_READ ? "Read" : "Write",
			rqe->b.b_dev,
			rqe->sdno,
			(u_int) (rqe->b.b_blkno - SD[rqe->sdno].driveoffset),
			rqe->b.b_blkno,
			rqe->b.b_bcount);		    /* XXX */
		if (debug & DEBUG_NUMOUTPUT)
		    printf("  vinumstart sd %d numoutput %ld\n",
			rqe->sdno,
			rqe->b.b_vp->v_numoutput);
		if (debug & DEBUG_LASTREQS)
		    logrq(loginfo_rqe, rqe, rq->bp);
#endif
		/* fire off the request */
		s = splbio();
		(*bdevsw[major(rqe->b.b_dev)]->d_strategy) (&rqe->b);
		splx(s);
	    }
	    /* XXX Do we need caching?  Think about this more */
	}
    }
    return 0;
}

/* define the low-level requests needed to perform a
 * high-level I/O operation for a specific plex 'plexno'.
 *
 * Return 0 if all subdisks involved in the request are up, 1 if some
 * subdisks are not up, and -1 if the request is at least partially
 * outside the bounds of the subdisks.
 *
 * Modify the pointer *diskstart to point to the end address.  On
 * read, return on the first bad subdisk, so that the caller
 * (build_read_request) can try alternatives.
 *
 * On entry to this routine, the rqg structures are not assigned.  The
 * assignment is performed by expandrq().  Strictly speaking, the
 * elements rqe->sdno of all entries should be set to -1, since 0
 * (from bzero) is a valid subdisk number.  We avoid this problem by
 * initializing the ones we use, and not looking at the others (index
 * >= rqg->requests).
 */
enum requeststatus 
bre(struct request *rq,
    int plexno,
    daddr_t * diskaddr,
    daddr_t diskend)
{
    BROKEN_GDB;
    int sdno;
    struct sd *sd;
    struct rqgroup *rqg;
    struct buf *bp;					    /* user's bp */
    struct plex *plex;
    enum requeststatus status;				    /* return value */
    daddr_t plexoffset;					    /* offset of transfer in plex */
    daddr_t stripebase;					    /* base address of stripe (1st subdisk) */
    daddr_t stripeoffset;				    /* offset in stripe */
    daddr_t blockoffset;				    /* offset in stripe on subdisk */
    struct rqelement *rqe;				    /* point to this request information */
    daddr_t diskstart = *diskaddr;			    /* remember where this transfer starts */

    bp = rq->bp;					    /* buffer pointer */
    status = REQUEST_OK;				    /* return value: OK until proven otherwise */
    plex = &PLEX[plexno];				    /* point to the plex */

    switch (plex->organization) {
    case plex_concat:
	for (sdno = 0; sdno < plex->subdisks; sdno++) {
	    sd = &SD[plex->sdnos[sdno]];
	    if ((*diskaddr < (sd->plexoffset + sd->sectors)) /* The request starts before the end of this */
	    &&(diskend > sd->plexoffset)) {		    /* subdisk and ends after the start of this sd */
		if ((sd->state != sd_up) || (plex->state != plex_up)) {
		    enum requeststatus s;

		    s = checksdstate(sd, rq, *diskaddr, diskend); /* do we need to change state? */
		    if (s)				    /* give up? */
			return s;			    /* yup */
		}
		rqg = allocrqg(rq, 1);			    /* space for the request */
		if (rqg == NULL) {			    /* malloc failed */
		    bp->b_flags |= B_ERROR;
		    bp->b_error = ENOMEM;
		    biodone(bp);
		    return REQUEST_ENOMEM;
		}
		rqg->plexno = plexno;

		rqe = &rqg->rqe[0];			    /* point to the element */
		rqe->rqg = rqg;				    /* group */
		rqe->sdno = sd->sdno;			    /* put in the subdisk number */
		plexoffset = max(sd->plexoffset, *diskaddr); /* start offset in plex */
		rqe->sdoffset = plexoffset - sd->plexoffset; /* start offset in subdisk */
		rqe->useroffset = plexoffset - diskstart;   /* start offset in user buffer */
		rqe->dataoffset = 0;
		rqe->datalen = min(diskend - *diskaddr,	    /* number of sectors to transfer in this sd */
		    sd->sectors - rqe->sdoffset);
		rqe->groupoffset = 0;			    /* no groups for concatenated plexes */
		rqe->grouplen = 0;
		rqe->buflen = rqe->datalen;		    /* buffer length is data buffer length */
		rqe->flags = 0;
		rqe->driveno = sd->driveno;
		*diskaddr += rqe->datalen;		    /* bump the address */
		if (build_rq_buffer(rqe, plex)) {	    /* build the buffer */
		    deallocrqg(rqg);
		    bp->b_flags |= B_ERROR;
		    bp->b_error = ENOMEM;
		    biodone(bp);
		    return REQUEST_ENOMEM;		    /* can't do it */
		}
	    }
	    if (*diskaddr > diskend)			    /* we're finished, */
		break;					    /* get out of here */
	}
	break;

    case plex_striped:
	{
	    while (*diskaddr < diskend) {		    /* until we get it all sorted out */
		/* The offset of the start address from
		 * the start of the stripe */
		stripeoffset = *diskaddr % (plex->stripesize * plex->subdisks);

		/* The plex-relative address of the
		 * start of the stripe */
		stripebase = *diskaddr - stripeoffset;

		/* The number of the subdisk in which
		 * the start is located */
		sdno = stripeoffset / plex->stripesize;

		/* The offset from the beginning of the stripe
		 * on this subdisk */
		blockoffset = stripeoffset % plex->stripesize;

		sd = &SD[plex->sdnos[sdno]];		    /* the subdisk in question */
		if ((sd->state != sd_up) || (plex->state != plex_up)) {
		    enum requeststatus s;

		    s = checksdstate(sd, rq, *diskaddr, diskend); /* do we need to change state? */
		    if (s)				    /* give up? */
			return s;			    /* yup */
		}
		rqg = allocrqg(rq, 1);			    /* space for the request */
		if (rqg == NULL) {			    /* malloc failed */
		    bp->b_flags |= B_ERROR;
		    bp->b_error = ENOMEM;
		    biodone(bp);
		    return REQUEST_ENOMEM;
		}
		rqg->plexno = plexno;

		rqe = &rqg->rqe[0];			    /* point to the element */
		rqe->rqg = rqg;
		rqe->sdoffset = stripebase / plex->subdisks + blockoffset; /* start offset in this subdisk */
		rqe->useroffset = *diskaddr - diskstart;    /* The offset of the start in the user buffer */
		rqe->dataoffset = 0;
		rqe->datalen = min(diskend - *diskaddr,	    /* the amount remaining to transfer */
		    plex->stripesize - blockoffset);	    /* and the amount left in this stripe */
		rqe->groupoffset = 0;			    /* no groups for striped plexes */
		rqe->grouplen = 0;
		rqe->buflen = rqe->datalen;		    /* buffer length is data buffer length */
		rqe->flags = 0;
		rqe->sdno = sd->sdno;			    /* put in the subdisk number */
		rqe->driveno = sd->driveno;

		if (rqe->sdoffset >= sd->sectors) {	    /* starts beyond the end of the subdisk? */
		    deallocrqg(rqg);
		    return REQUEST_EOF;
		} else if (rqe->sdoffset + rqe->datalen > sd->sectors) /* ends beyond the end of the subdisk? */
		    rqe->datalen = sd->sectors - rqe->sdoffset;	/* yes, truncate */

		if (build_rq_buffer(rqe, plex)) {	    /* build the buffer */
		    deallocrqg(rqg);
		    bp->b_flags |= B_ERROR;
		    bp->b_error = ENOMEM;
		    biodone(bp);
		    return REQUEST_ENOMEM;		    /* can't do it */
		}
		*diskaddr += rqe->datalen;		    /* look at the remainder */
		if (*diskaddr < diskend) {		    /* didn't finish the request on this stripe */
		    plex->multiblock++;			    /* count another one */
		    if (sdno == plex->subdisks - 1)	    /* last subdisk, */
			plex->multistripe++;		    /* another stripe as well */
		}
	    }
	}
	break;


    default:
	printf("vinum: invalid plex type in bre");
    }

    return status;
}

/* Build up a request structure for reading volumes.
 * This function is not needed for plex reads, since there's
 * no recovery if a plex read can't be satisified. */
enum requeststatus 
build_read_request(struct request *rq,			    /* request */
    int plexindex)
{							    /* index in the volume's plex table */
    BROKEN_GDB;
    struct buf *bp;
    daddr_t startaddr;					    /* offset of previous part of transfer */
    daddr_t diskaddr;					    /* offset of current part of transfer */
    daddr_t diskend;					    /* and end offset of transfer */
    int plexno;						    /* plex index in vinum_conf */
    struct rqgroup *rqg;				    /* point to the request we're working on */
    struct volume *vol;					    /* volume in question */
    off_t oldstart;					    /* note where we started */
    int recovered = 0;					    /* set if we recover a read */
    enum requeststatus status = REQUEST_OK;

    bp = rq->bp;					    /* buffer pointer */
    diskaddr = bp->b_blkno;				    /* start offset of transfer */
    diskend = diskaddr + (bp->b_bcount / DEV_BSIZE);	    /* and end offset of transfer */
    rqg = &rq->rqg[plexindex];				    /* plex request */
    vol = &VOL[rq->volplex.volno];			    /* point to volume */

    while (diskaddr < diskend) {			    /* build up request components */
	startaddr = diskaddr;
	status = bre(rq, vol->plex[plexindex], &diskaddr, diskend); /* build up a request */
	switch (status) {
	case REQUEST_OK:
	    continue;

	case REQUEST_RECOVERED:
	    recovered = 1;
	    break;

	case REQUEST_EOF:
	case REQUEST_ENOMEM:
	    return status;

	    /* if we get here, we have either had a failure or
	     * a RAID 5 recovery.  We don't want to use the
	     * recovery, because it's expensive, so first we
	     * check if we have alternatives */
	case REQUEST_DOWN:				    /* can't access the plex */
	    if (vol != NULL) {				    /* and this is volume I/O */
		/* Try to satisfy the request
		 * from another plex */
		for (plexno = 0; plexno < vol->plexes; plexno++) {
		    diskaddr = startaddr;		    /* start at the beginning again */
		    oldstart = startaddr;		    /* and note where that was */
		    if (plexno != plexindex) {		    /* don't try this plex again */
			bre(rq, vol->plex[plexno], &diskaddr, diskend);	/* try a request */
			if (diskaddr > oldstart) {	    /* we satisfied another part */
			    recovered = 1;		    /* we recovered from the problem */
			    status = REQUEST_OK;	    /* don't complain about it */
			    break;
			}
		    }
		    if (plexno == (vol->plexes - 1))	    /* couldn't satisfy the request */
			return REQUEST_DOWN;		    /* failed */
		}
	    } else
		return REQUEST_DOWN;			    /* bad luck */
	}
	if (recovered)
	    vol->recovered_reads += recovered;		    /* adjust our recovery count */
    }
    return status;
}

/* Build up a request structure for writes.
 * Return 0 if all subdisks involved in the request are up, 1 if some
 * subdisks are not up, and -1 if the request is at least partially
 * outside the bounds of the subdisks. */
enum requeststatus 
build_write_request(struct request *rq)
{							    /* request */
    BROKEN_GDB;
    struct buf *bp;
    daddr_t diskstart;					    /* offset of current part of transfer */
    daddr_t diskend;					    /* and end offset of transfer */
    int plexno;						    /* plex index in vinum_conf */
    struct volume *vol;					    /* volume in question */
    enum requeststatus status;

    bp = rq->bp;					    /* buffer pointer */
    vol = &VOL[rq->volplex.volno];			    /* point to volume */
    diskend = bp->b_blkno + (bp->b_bcount / DEV_BSIZE);	    /* end offset of transfer */
    status = REQUEST_OK;
    for (plexno = 0; plexno < vol->plexes; plexno++) {
	diskstart = bp->b_blkno;			    /* start offset of transfer */
	status = min(status, bre(rq,			    /* build requests for the plex */
		vol->plex[plexno],
		&diskstart,
		diskend));
    }
    return status;
}

/* Fill in the struct buf part of a request element. */
enum requeststatus 
build_rq_buffer(struct rqelement *rqe, struct plex *plex)
{
    BROKEN_GDB;
    struct sd *sd;					    /* point to subdisk */
    struct volume *vol;
    struct buf *bp;
    struct buf *ubp;					    /* user (high level) buffer header */

    vol = &VOL[rqe->rqg->rq->volplex.volno];
    sd = &SD[rqe->sdno];				    /* point to subdisk */
    bp = &rqe->b;
    ubp = rqe->rqg->rq->bp;				    /* pointer to user buffer header */

    /* Initialize the buf struct */
    bzero(&rqe->b, sizeof(struct buf));
    bp->b_proc = ubp->b_proc;				    /* process pointer */
    bp->b_flags = ubp->b_flags & (B_NOCACHE | B_READ | B_ASYNC); /* copy these flags from user bp */
    bp->b_flags |= B_CALL | B_BUSY;			    /* inform us when it's done */
    if (plex->state == plex_reviving)
	bp->b_flags |= B_ORDERED;			    /* keep request order if we're reviving */
    bp->b_iodone = complete_rqe;			    /* by calling us here */
    bp->b_dev = DRIVE[rqe->driveno].dev;		    /* drive device */
    bp->b_blkno = rqe->sdoffset + sd->driveoffset;	    /* start address */
    bp->b_bcount = rqe->buflen << DEV_BSHIFT;		    /* number of bytes to transfer */
    bp->b_resid = bp->b_bcount;				    /* and it's still all waiting */
    bp->b_bufsize = bp->b_bcount;			    /* and buffer size */
    bp->b_vp = DRIVE[rqe->driveno].vp;			    /* drive vnode */
    bp->b_rcred = FSCRED;				    /* we have the file system credentials */
    bp->b_wcred = FSCRED;				    /* we have the file system credentials */

    if (rqe->flags & XFR_MALLOCED) {			    /* this operation requires a malloced buffer */
	bp->b_data = Malloc(bp->b_bcount);		    /* get a buffer to put it in */
	if (bp->b_data == NULL) {			    /* failed */
	    Debugger("XXX");
	    abortrequest(rqe->rqg->rq, ENOMEM);
	    return REQUEST_ENOMEM;			    /* no memory */
	}
    } else
	/* Point directly to user buffer data.  This means
	 * that we don't need to do anything when we have
	 * finished the transfer */
	bp->b_data = ubp->b_data + rqe->useroffset * DEV_BSIZE;
    return 0;
}
/* Abort a request: free resources and complete the
 * user request with the specified error */
int 
abortrequest(struct request *rq, int error)
{
    struct buf *bp = rq->bp;				    /* user buffer */

    bp->b_flags |= B_ERROR;
    bp->b_error = error;
    freerq(rq);						    /* free everything we're doing */
    biodone(bp);
    return error;					    /* and give up */
}

/* Check that our transfer will cover the
 * complete address space of the user request.
 *
 * Return 1 if it can, otherwise 0 */
int 
check_range_covered(struct request *rq)
{
    /* XXX */
    return 1;
}

/* Perform I/O on a subdisk */
void 
sdio(struct buf *bp)
{
    int s;						    /* spl */
    struct sd *sd;
    struct sdbuf *sbp;
    daddr_t endoffset;
    struct drive *drive;

    sd = &SD[SDNO(bp->b_dev)];				    /* point to the subdisk */
    drive = &DRIVE[sd->driveno];

    if (drive->state != drive_up) {			    /* XXX until we get the states fixed */
	set_sd_state(SDNO(bp->b_dev), sd_obsolete, setstate_force);
	bp->b_flags |= B_ERROR;
	bp->b_error = EIO;
	biodone(bp);
	return;
    }
    /* XXX decide which states we will really accept here.  up
     * implies it could be involved with a plex, in which
     * case we don't want to dick with it */
    if ((sd->state != sd_up)
	&& (sd->state != sd_initializing)
	&& (sd->state != sd_reborn)) {			    /* we can't access it */
	bp->b_flags |= B_ERROR;
	bp->b_flags = EIO;
	if (bp->b_flags & B_BUSY)			    /* XXX why isn't this always the case? */
	    biodone(bp);
	return;
    }
    /* Get a buffer */
    sbp = (struct sdbuf *) Malloc(sizeof(struct sdbuf));
    if (sbp == NULL) {
	bp->b_flags |= B_ERROR;
	bp->b_error = ENOMEM;
	biodone(bp);
	return;
    }
    bcopy(bp, &sbp->b, sizeof(struct buf));		    /* start with the user's buffer */
    sbp->b.b_flags |= B_CALL;				    /* tell us when it's done */
    sbp->b.b_iodone = sdio_done;			    /* here */
    sbp->b.b_dev = DRIVE[sd->driveno].dev;		    /* device */
    sbp->b.b_vp = DRIVE[sd->driveno].vp;		    /* vnode */
    sbp->b.b_blkno += sd->driveoffset;
    sbp->bp = bp;					    /* note the address of the original header */
    sbp->sdno = sd->sdno;				    /* note for statistics */
    sbp->driveno = sd->driveno;
    endoffset = bp->b_blkno + sbp->b.b_bcount / DEV_BSIZE;  /* final sector offset */
    if (endoffset > sd->sectors) {			    /* beyond the end */
	sbp->b.b_bcount -= (endoffset - sd->sectors) * DEV_BSIZE; /* trim */
	if (sbp->b.b_bcount <= 0) {			    /* nothing to transfer */
	    bp->b_resid = bp->b_bcount;			    /* nothing transferred */
	    /* XXX Grrr.  This doesn't seem to work.  Return
	     * an error after all */
	    bp->b_flags |= B_ERROR;
	    bp->b_error = ENOSPC;
	    biodone(bp);
	    Free(sbp);
	    return;
	}
    }
    if ((sbp->b.b_flags & B_READ) == 0)			    /* write */
	sbp->b.b_vp->v_numoutput++;			    /* one more output going */
#if DEBUG
    if (debug & DEBUG_ADDRESSES)
	printf("  %s dev 0x%x, sd %d, offset 0x%x, devoffset 0x%x, length %ld\n",
	    sbp->b.b_flags & B_READ ? "Read" : "Write",
	    sbp->b.b_dev,
	    sbp->sdno,
	    (u_int) (sbp->b.b_blkno - SD[sbp->sdno].driveoffset),
	    (int) sbp->b.b_blkno,
	    sbp->b.b_bcount);				    /* XXX */
    if (debug & DEBUG_NUMOUTPUT)
	printf("  vinumstart sd %d numoutput %ld\n",
	    sbp->sdno,
	    sbp->b.b_vp->v_numoutput);
#endif
    s = splbio();
    (*bdevsw[major(sbp->b.b_dev)]->d_strategy) (&sbp->b);
    splx(s);
}

/* Simplified version of bounds_check_with_label
 * Determine the size of the transfer, and make sure it is
 * within the boundaries of the partition. Adjust transfer
 * if needed, and signal errors or early completion.
 *
 * Volumes are simpler than disk slices: they only contain
 * one component (though we call them a, b and c to make
 * system utilities happy), and they always take up the
 * complete space of the "partition".
 *
 * I'm still not happy with this: why should the label be
 * protected?  If it weren't so damned difficult to write
 * one in the first pleace (because it's protected), it wouldn't
 * be a problem.
 */
int 
vinum_bounds_check(struct buf *bp, struct volume *vol)
{
    int maxsize = vol->size;				    /* size of the partition (sectors) */
    int size = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; /* size of this request (sectors) */

    /* Would this transfer overwrite the disk label? */
    if (bp->b_blkno <= LABELSECTOR			    /* starts before or at the label */
#if LABELSECTOR != 0
	&& bp->b_blkno + size > LABELSECTOR		    /* and finishes after */
#endif
	&& (!(vol->flags & VF_RAW))			    /* and it's not raw */
	&&major(bp->b_dev) == BDEV_MAJOR		    /* and it's the block device */
	&& (bp->b_flags & B_READ) == 0			    /* and it's a write */
	&& (!vol->flags & (VF_WLABEL | VF_LABELLING))) {    /* and we're not allowed to write the label */
	bp->b_error = EROFS;				    /* read-only */
	bp->b_flags |= B_ERROR;
	return -1;
    }
    if (size == 0)					    /* no transfer specified, */
	return 0;					    /* treat as EOF */
    /* beyond partition? */
    if (bp->b_blkno < 0					    /* negative start */
	|| bp->b_blkno + size > maxsize) {		    /* or goes beyond the end of the partition */
	/* if exactly at end of disk, return an EOF */
	if (bp->b_blkno == maxsize) {
	    bp->b_resid = bp->b_bcount;
	    return 0;
	}
	/* or truncate if part of it fits */
	size = maxsize - bp->b_blkno;
	if (size <= 0) {				    /* nothing to transfer */
	    bp->b_error = EINVAL;
	    bp->b_flags |= B_ERROR;
	    return -1;
	}
	bp->b_bcount = size << DEV_BSHIFT;
    }
    bp->b_pblkno = bp->b_blkno;
    return 1;
}

/* Allocate a request group and hook
 * it in in the list for rq */
struct rqgroup *
allocrqg(struct request *rq, int elements)
{
    struct rqgroup *rqg;				    /* the one we're going to allocate */
    int size = sizeof(struct rqgroup) + elements * sizeof(struct rqelement);

    rqg = (struct rqgroup *) Malloc(size);
    if (rqg != NULL) {					    /* malloc OK, */
	if (rq->rqg)					    /* we already have requests */
	    rq->lrqg->next = rqg;			    /* hang it off the end */
	else						    /* first request */
	    rq->rqg = rqg;				    /* at the start */
	rq->lrqg = rqg;					    /* this one is the last in the list */

	bzero(rqg, size);				    /* no old junk */
	rqg->rq = rq;					    /* point back to the parent request */
	rqg->count = elements;				    /* number of requests in the group */
    } else
	Debugger("XXX");
    return rqg;
}

/* Deallocate a request group out of a chain.  We do
 * this by linear search: the chain is short, this
 * almost never happens, and currently it can only
 * happen to the first member of the chain. */
void 
deallocrqg(struct rqgroup *rqg)
{
    struct rqgroup *rqgc = rqg->rq->rqg;		    /* point to the request chain */

    if (rqg->rq->rqg == rqg)				    /* we're first in line */
	rqg->rq->rqg = rqg->next;			    /* unhook ourselves */
    else {
	while (rqgc->next != rqg)			    /* find the group */
	    rqgc = rqgc->next;
	rqgc->next = rqg->next;
    }
    Free(rqgc);
}

/* Character device interface */
int 
vinumread(dev_t dev, struct uio *uio, int ioflag)
{
    return (physio(vinumstrategy, NULL, dev, 1, minphys, uio));
}

int 
vinumwrite(dev_t dev, struct uio *uio, int ioflag)
{
    return (physio(vinumstrategy, NULL, dev, 0, minphys, uio));
}
OpenPOWER on IntegriCloud