Merge branch 'clockevents/fixes' of git://git.linaro.org/people/daniel.lezcano/linux...
[linux-drm-fsl-dcu.git] / drivers / md / raid1.c
1 /*
2  * raid1.c : Multiple Devices driver for Linux
3  *
4  * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5  *
6  * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7  *
8  * RAID-1 management functions.
9  *
10  * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11  *
12  * Fixes to reconstruction by Jakob Ã˜stergaard" <jakob@ostenfeld.dk>
13  * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14  *
15  * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16  * bitmapped intelligence in resync:
17  *
18  *      - bitmap marked during normal i/o
19  *      - bitmap used to skip nondirty blocks during sync
20  *
21  * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22  * - persistent bitmap code
23  *
24  * This program is free software; you can redistribute it and/or modify
25  * it under the terms of the GNU General Public License as published by
26  * the Free Software Foundation; either version 2, or (at your option)
27  * any later version.
28  *
29  * You should have received a copy of the GNU General Public License
30  * (for example /usr/src/linux/COPYING); if not, write to the Free
31  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32  */
33
34 #include <linux/slab.h>
35 #include <linux/delay.h>
36 #include <linux/blkdev.h>
37 #include <linux/module.h>
38 #include <linux/seq_file.h>
39 #include <linux/ratelimit.h>
40 #include "md.h"
41 #include "raid1.h"
42 #include "bitmap.h"
43
44 /*
45  * Number of guaranteed r1bios in case of extreme VM load:
46  */
47 #define NR_RAID1_BIOS 256
48
49 /* when we get a read error on a read-only array, we redirect to another
50  * device without failing the first device, or trying to over-write to
51  * correct the read error.  To keep track of bad blocks on a per-bio
52  * level, we store IO_BLOCKED in the appropriate 'bios' pointer
53  */
54 #define IO_BLOCKED ((struct bio *)1)
55 /* When we successfully write to a known bad-block, we need to remove the
56  * bad-block marking which must be done from process context.  So we record
57  * the success by setting devs[n].bio to IO_MADE_GOOD
58  */
59 #define IO_MADE_GOOD ((struct bio *)2)
60
61 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
62
63 /* When there are this many requests queue to be written by
64  * the raid1 thread, we become 'congested' to provide back-pressure
65  * for writeback.
66  */
67 static int max_queued_requests = 1024;
68
69 static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
70                           sector_t bi_sector);
71 static void lower_barrier(struct r1conf *conf);
72
73 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
74 {
75         struct pool_info *pi = data;
76         int size = offsetof(struct r1bio, bios[pi->raid_disks]);
77
78         /* allocate a r1bio with room for raid_disks entries in the bios array */
79         return kzalloc(size, gfp_flags);
80 }
81
82 static void r1bio_pool_free(void *r1_bio, void *data)
83 {
84         kfree(r1_bio);
85 }
86
87 #define RESYNC_BLOCK_SIZE (64*1024)
88 #define RESYNC_DEPTH 32
89 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
90 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
91 #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
92 #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
93 #define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
94
95 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
96 {
97         struct pool_info *pi = data;
98         struct r1bio *r1_bio;
99         struct bio *bio;
100         int i, j;
101
102         r1_bio = r1bio_pool_alloc(gfp_flags, pi);
103         if (!r1_bio)
104                 return NULL;
105
106         /*
107          * Allocate bios : 1 for reading, n-1 for writing
108          */
109         for (j = pi->raid_disks ; j-- ; ) {
110                 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
111                 if (!bio)
112                         goto out_free_bio;
113                 r1_bio->bios[j] = bio;
114         }
115         /*
116          * Allocate RESYNC_PAGES data pages and attach them to
117          * the first bio.
118          * If this is a user-requested check/repair, allocate
119          * RESYNC_PAGES for each bio.
120          */
121         if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
122                 j = pi->raid_disks;
123         else
124                 j = 1;
125         while(j--) {
126                 bio = r1_bio->bios[j];
127                 bio->bi_vcnt = RESYNC_PAGES;
128
129                 if (bio_alloc_pages(bio, gfp_flags))
130                         goto out_free_bio;
131         }
132         /* If not user-requests, copy the page pointers to all bios */
133         if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
134                 for (i=0; i<RESYNC_PAGES ; i++)
135                         for (j=1; j<pi->raid_disks; j++)
136                                 r1_bio->bios[j]->bi_io_vec[i].bv_page =
137                                         r1_bio->bios[0]->bi_io_vec[i].bv_page;
138         }
139
140         r1_bio->master_bio = NULL;
141
142         return r1_bio;
143
144 out_free_bio:
145         while (++j < pi->raid_disks)
146                 bio_put(r1_bio->bios[j]);
147         r1bio_pool_free(r1_bio, data);
148         return NULL;
149 }
150
151 static void r1buf_pool_free(void *__r1_bio, void *data)
152 {
153         struct pool_info *pi = data;
154         int i,j;
155         struct r1bio *r1bio = __r1_bio;
156
157         for (i = 0; i < RESYNC_PAGES; i++)
158                 for (j = pi->raid_disks; j-- ;) {
159                         if (j == 0 ||
160                             r1bio->bios[j]->bi_io_vec[i].bv_page !=
161                             r1bio->bios[0]->bi_io_vec[i].bv_page)
162                                 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
163                 }
164         for (i=0 ; i < pi->raid_disks; i++)
165                 bio_put(r1bio->bios[i]);
166
167         r1bio_pool_free(r1bio, data);
168 }
169
170 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
171 {
172         int i;
173
174         for (i = 0; i < conf->raid_disks * 2; i++) {
175                 struct bio **bio = r1_bio->bios + i;
176                 if (!BIO_SPECIAL(*bio))
177                         bio_put(*bio);
178                 *bio = NULL;
179         }
180 }
181
182 static void free_r1bio(struct r1bio *r1_bio)
183 {
184         struct r1conf *conf = r1_bio->mddev->private;
185
186         put_all_bios(conf, r1_bio);
187         mempool_free(r1_bio, conf->r1bio_pool);
188 }
189
190 static void put_buf(struct r1bio *r1_bio)
191 {
192         struct r1conf *conf = r1_bio->mddev->private;
193         int i;
194
195         for (i = 0; i < conf->raid_disks * 2; i++) {
196                 struct bio *bio = r1_bio->bios[i];
197                 if (bio->bi_end_io)
198                         rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
199         }
200
201         mempool_free(r1_bio, conf->r1buf_pool);
202
203         lower_barrier(conf);
204 }
205
206 static void reschedule_retry(struct r1bio *r1_bio)
207 {
208         unsigned long flags;
209         struct mddev *mddev = r1_bio->mddev;
210         struct r1conf *conf = mddev->private;
211
212         spin_lock_irqsave(&conf->device_lock, flags);
213         list_add(&r1_bio->retry_list, &conf->retry_list);
214         conf->nr_queued ++;
215         spin_unlock_irqrestore(&conf->device_lock, flags);
216
217         wake_up(&conf->wait_barrier);
218         md_wakeup_thread(mddev->thread);
219 }
220
221 /*
222  * raid_end_bio_io() is called when we have finished servicing a mirrored
223  * operation and are ready to return a success/failure code to the buffer
224  * cache layer.
225  */
226 static void call_bio_endio(struct r1bio *r1_bio)
227 {
228         struct bio *bio = r1_bio->master_bio;
229         int done;
230         struct r1conf *conf = r1_bio->mddev->private;
231         sector_t start_next_window = r1_bio->start_next_window;
232         sector_t bi_sector = bio->bi_sector;
233
234         if (bio->bi_phys_segments) {
235                 unsigned long flags;
236                 spin_lock_irqsave(&conf->device_lock, flags);
237                 bio->bi_phys_segments--;
238                 done = (bio->bi_phys_segments == 0);
239                 spin_unlock_irqrestore(&conf->device_lock, flags);
240                 /*
241                  * make_request() might be waiting for
242                  * bi_phys_segments to decrease
243                  */
244                 wake_up(&conf->wait_barrier);
245         } else
246                 done = 1;
247
248         if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
249                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
250         if (done) {
251                 bio_endio(bio, 0);
252                 /*
253                  * Wake up any possible resync thread that waits for the device
254                  * to go idle.
255                  */
256                 allow_barrier(conf, start_next_window, bi_sector);
257         }
258 }
259
260 static void raid_end_bio_io(struct r1bio *r1_bio)
261 {
262         struct bio *bio = r1_bio->master_bio;
263
264         /* if nobody has done the final endio yet, do it now */
265         if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
266                 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
267                          (bio_data_dir(bio) == WRITE) ? "write" : "read",
268                          (unsigned long long) bio->bi_sector,
269                          (unsigned long long) bio->bi_sector +
270                          bio_sectors(bio) - 1);
271
272                 call_bio_endio(r1_bio);
273         }
274         free_r1bio(r1_bio);
275 }
276
277 /*
278  * Update disk head position estimator based on IRQ completion info.
279  */
280 static inline void update_head_pos(int disk, struct r1bio *r1_bio)
281 {
282         struct r1conf *conf = r1_bio->mddev->private;
283
284         conf->mirrors[disk].head_position =
285                 r1_bio->sector + (r1_bio->sectors);
286 }
287
288 /*
289  * Find the disk number which triggered given bio
290  */
291 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
292 {
293         int mirror;
294         struct r1conf *conf = r1_bio->mddev->private;
295         int raid_disks = conf->raid_disks;
296
297         for (mirror = 0; mirror < raid_disks * 2; mirror++)
298                 if (r1_bio->bios[mirror] == bio)
299                         break;
300
301         BUG_ON(mirror == raid_disks * 2);
302         update_head_pos(mirror, r1_bio);
303
304         return mirror;
305 }
306
307 static void raid1_end_read_request(struct bio *bio, int error)
308 {
309         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
310         struct r1bio *r1_bio = bio->bi_private;
311         int mirror;
312         struct r1conf *conf = r1_bio->mddev->private;
313
314         mirror = r1_bio->read_disk;
315         /*
316          * this branch is our 'one mirror IO has finished' event handler:
317          */
318         update_head_pos(mirror, r1_bio);
319
320         if (uptodate)
321                 set_bit(R1BIO_Uptodate, &r1_bio->state);
322         else {
323                 /* If all other devices have failed, we want to return
324                  * the error upwards rather than fail the last device.
325                  * Here we redefine "uptodate" to mean "Don't want to retry"
326                  */
327                 unsigned long flags;
328                 spin_lock_irqsave(&conf->device_lock, flags);
329                 if (r1_bio->mddev->degraded == conf->raid_disks ||
330                     (r1_bio->mddev->degraded == conf->raid_disks-1 &&
331                      !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
332                         uptodate = 1;
333                 spin_unlock_irqrestore(&conf->device_lock, flags);
334         }
335
336         if (uptodate) {
337                 raid_end_bio_io(r1_bio);
338                 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
339         } else {
340                 /*
341                  * oops, read error:
342                  */
343                 char b[BDEVNAME_SIZE];
344                 printk_ratelimited(
345                         KERN_ERR "md/raid1:%s: %s: "
346                         "rescheduling sector %llu\n",
347                         mdname(conf->mddev),
348                         bdevname(conf->mirrors[mirror].rdev->bdev,
349                                  b),
350                         (unsigned long long)r1_bio->sector);
351                 set_bit(R1BIO_ReadError, &r1_bio->state);
352                 reschedule_retry(r1_bio);
353                 /* don't drop the reference on read_disk yet */
354         }
355 }
356
357 static void close_write(struct r1bio *r1_bio)
358 {
359         /* it really is the end of this request */
360         if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
361                 /* free extra copy of the data pages */
362                 int i = r1_bio->behind_page_count;
363                 while (i--)
364                         safe_put_page(r1_bio->behind_bvecs[i].bv_page);
365                 kfree(r1_bio->behind_bvecs);
366                 r1_bio->behind_bvecs = NULL;
367         }
368         /* clear the bitmap if all writes complete successfully */
369         bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
370                         r1_bio->sectors,
371                         !test_bit(R1BIO_Degraded, &r1_bio->state),
372                         test_bit(R1BIO_BehindIO, &r1_bio->state));
373         md_write_end(r1_bio->mddev);
374 }
375
376 static void r1_bio_write_done(struct r1bio *r1_bio)
377 {
378         if (!atomic_dec_and_test(&r1_bio->remaining))
379                 return;
380
381         if (test_bit(R1BIO_WriteError, &r1_bio->state))
382                 reschedule_retry(r1_bio);
383         else {
384                 close_write(r1_bio);
385                 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
386                         reschedule_retry(r1_bio);
387                 else
388                         raid_end_bio_io(r1_bio);
389         }
390 }
391
392 static void raid1_end_write_request(struct bio *bio, int error)
393 {
394         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
395         struct r1bio *r1_bio = bio->bi_private;
396         int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
397         struct r1conf *conf = r1_bio->mddev->private;
398         struct bio *to_put = NULL;
399
400         mirror = find_bio_disk(r1_bio, bio);
401
402         /*
403          * 'one mirror IO has finished' event handler:
404          */
405         if (!uptodate) {
406                 set_bit(WriteErrorSeen,
407                         &conf->mirrors[mirror].rdev->flags);
408                 if (!test_and_set_bit(WantReplacement,
409                                       &conf->mirrors[mirror].rdev->flags))
410                         set_bit(MD_RECOVERY_NEEDED, &
411                                 conf->mddev->recovery);
412
413                 set_bit(R1BIO_WriteError, &r1_bio->state);
414         } else {
415                 /*
416                  * Set R1BIO_Uptodate in our master bio, so that we
417                  * will return a good error code for to the higher
418                  * levels even if IO on some other mirrored buffer
419                  * fails.
420                  *
421                  * The 'master' represents the composite IO operation
422                  * to user-side. So if something waits for IO, then it
423                  * will wait for the 'master' bio.
424                  */
425                 sector_t first_bad;
426                 int bad_sectors;
427
428                 r1_bio->bios[mirror] = NULL;
429                 to_put = bio;
430                 /*
431                  * Do not set R1BIO_Uptodate if the current device is
432                  * rebuilding or Faulty. This is because we cannot use
433                  * such device for properly reading the data back (we could
434                  * potentially use it, if the current write would have felt
435                  * before rdev->recovery_offset, but for simplicity we don't
436                  * check this here.
437                  */
438                 if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
439                     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
440                         set_bit(R1BIO_Uptodate, &r1_bio->state);
441
442                 /* Maybe we can clear some bad blocks. */
443                 if (is_badblock(conf->mirrors[mirror].rdev,
444                                 r1_bio->sector, r1_bio->sectors,
445                                 &first_bad, &bad_sectors)) {
446                         r1_bio->bios[mirror] = IO_MADE_GOOD;
447                         set_bit(R1BIO_MadeGood, &r1_bio->state);
448                 }
449         }
450
451         if (behind) {
452                 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
453                         atomic_dec(&r1_bio->behind_remaining);
454
455                 /*
456                  * In behind mode, we ACK the master bio once the I/O
457                  * has safely reached all non-writemostly
458                  * disks. Setting the Returned bit ensures that this
459                  * gets done only once -- we don't ever want to return
460                  * -EIO here, instead we'll wait
461                  */
462                 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
463                     test_bit(R1BIO_Uptodate, &r1_bio->state)) {
464                         /* Maybe we can return now */
465                         if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
466                                 struct bio *mbio = r1_bio->master_bio;
467                                 pr_debug("raid1: behind end write sectors"
468                                          " %llu-%llu\n",
469                                          (unsigned long long) mbio->bi_sector,
470                                          (unsigned long long) mbio->bi_sector +
471                                          bio_sectors(mbio) - 1);
472                                 call_bio_endio(r1_bio);
473                         }
474                 }
475         }
476         if (r1_bio->bios[mirror] == NULL)
477                 rdev_dec_pending(conf->mirrors[mirror].rdev,
478                                  conf->mddev);
479
480         /*
481          * Let's see if all mirrored write operations have finished
482          * already.
483          */
484         r1_bio_write_done(r1_bio);
485
486         if (to_put)
487                 bio_put(to_put);
488 }
489
490
491 /*
492  * This routine returns the disk from which the requested read should
493  * be done. There is a per-array 'next expected sequential IO' sector
494  * number - if this matches on the next IO then we use the last disk.
495  * There is also a per-disk 'last know head position' sector that is
496  * maintained from IRQ contexts, both the normal and the resync IO
497  * completion handlers update this position correctly. If there is no
498  * perfect sequential match then we pick the disk whose head is closest.
499  *
500  * If there are 2 mirrors in the same 2 devices, performance degrades
501  * because position is mirror, not device based.
502  *
503  * The rdev for the device selected will have nr_pending incremented.
504  */
505 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
506 {
507         const sector_t this_sector = r1_bio->sector;
508         int sectors;
509         int best_good_sectors;
510         int best_disk, best_dist_disk, best_pending_disk;
511         int has_nonrot_disk;
512         int disk;
513         sector_t best_dist;
514         unsigned int min_pending;
515         struct md_rdev *rdev;
516         int choose_first;
517         int choose_next_idle;
518
519         rcu_read_lock();
520         /*
521          * Check if we can balance. We can balance on the whole
522          * device if no resync is going on, or below the resync window.
523          * We take the first readable disk when above the resync window.
524          */
525  retry:
526         sectors = r1_bio->sectors;
527         best_disk = -1;
528         best_dist_disk = -1;
529         best_dist = MaxSector;
530         best_pending_disk = -1;
531         min_pending = UINT_MAX;
532         best_good_sectors = 0;
533         has_nonrot_disk = 0;
534         choose_next_idle = 0;
535
536         if (conf->mddev->recovery_cp < MaxSector &&
537             (this_sector + sectors >= conf->next_resync))
538                 choose_first = 1;
539         else
540                 choose_first = 0;
541
542         for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
543                 sector_t dist;
544                 sector_t first_bad;
545                 int bad_sectors;
546                 unsigned int pending;
547                 bool nonrot;
548
549                 rdev = rcu_dereference(conf->mirrors[disk].rdev);
550                 if (r1_bio->bios[disk] == IO_BLOCKED
551                     || rdev == NULL
552                     || test_bit(Unmerged, &rdev->flags)
553                     || test_bit(Faulty, &rdev->flags))
554                         continue;
555                 if (!test_bit(In_sync, &rdev->flags) &&
556                     rdev->recovery_offset < this_sector + sectors)
557                         continue;
558                 if (test_bit(WriteMostly, &rdev->flags)) {
559                         /* Don't balance among write-mostly, just
560                          * use the first as a last resort */
561                         if (best_disk < 0) {
562                                 if (is_badblock(rdev, this_sector, sectors,
563                                                 &first_bad, &bad_sectors)) {
564                                         if (first_bad < this_sector)
565                                                 /* Cannot use this */
566                                                 continue;
567                                         best_good_sectors = first_bad - this_sector;
568                                 } else
569                                         best_good_sectors = sectors;
570                                 best_disk = disk;
571                         }
572                         continue;
573                 }
574                 /* This is a reasonable device to use.  It might
575                  * even be best.
576                  */
577                 if (is_badblock(rdev, this_sector, sectors,
578                                 &first_bad, &bad_sectors)) {
579                         if (best_dist < MaxSector)
580                                 /* already have a better device */
581                                 continue;
582                         if (first_bad <= this_sector) {
583                                 /* cannot read here. If this is the 'primary'
584                                  * device, then we must not read beyond
585                                  * bad_sectors from another device..
586                                  */
587                                 bad_sectors -= (this_sector - first_bad);
588                                 if (choose_first && sectors > bad_sectors)
589                                         sectors = bad_sectors;
590                                 if (best_good_sectors > sectors)
591                                         best_good_sectors = sectors;
592
593                         } else {
594                                 sector_t good_sectors = first_bad - this_sector;
595                                 if (good_sectors > best_good_sectors) {
596                                         best_good_sectors = good_sectors;
597                                         best_disk = disk;
598                                 }
599                                 if (choose_first)
600                                         break;
601                         }
602                         continue;
603                 } else
604                         best_good_sectors = sectors;
605
606                 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
607                 has_nonrot_disk |= nonrot;
608                 pending = atomic_read(&rdev->nr_pending);
609                 dist = abs(this_sector - conf->mirrors[disk].head_position);
610                 if (choose_first) {
611                         best_disk = disk;
612                         break;
613                 }
614                 /* Don't change to another disk for sequential reads */
615                 if (conf->mirrors[disk].next_seq_sect == this_sector
616                     || dist == 0) {
617                         int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
618                         struct raid1_info *mirror = &conf->mirrors[disk];
619
620                         best_disk = disk;
621                         /*
622                          * If buffered sequential IO size exceeds optimal
623                          * iosize, check if there is idle disk. If yes, choose
624                          * the idle disk. read_balance could already choose an
625                          * idle disk before noticing it's a sequential IO in
626                          * this disk. This doesn't matter because this disk
627                          * will idle, next time it will be utilized after the
628                          * first disk has IO size exceeds optimal iosize. In
629                          * this way, iosize of the first disk will be optimal
630                          * iosize at least. iosize of the second disk might be
631                          * small, but not a big deal since when the second disk
632                          * starts IO, the first disk is likely still busy.
633                          */
634                         if (nonrot && opt_iosize > 0 &&
635                             mirror->seq_start != MaxSector &&
636                             mirror->next_seq_sect > opt_iosize &&
637                             mirror->next_seq_sect - opt_iosize >=
638                             mirror->seq_start) {
639                                 choose_next_idle = 1;
640                                 continue;
641                         }
642                         break;
643                 }
644                 /* If device is idle, use it */
645                 if (pending == 0) {
646                         best_disk = disk;
647                         break;
648                 }
649
650                 if (choose_next_idle)
651                         continue;
652
653                 if (min_pending > pending) {
654                         min_pending = pending;
655                         best_pending_disk = disk;
656                 }
657
658                 if (dist < best_dist) {
659                         best_dist = dist;
660                         best_dist_disk = disk;
661                 }
662         }
663
664         /*
665          * If all disks are rotational, choose the closest disk. If any disk is
666          * non-rotational, choose the disk with less pending request even the
667          * disk is rotational, which might/might not be optimal for raids with
668          * mixed ratation/non-rotational disks depending on workload.
669          */
670         if (best_disk == -1) {
671                 if (has_nonrot_disk)
672                         best_disk = best_pending_disk;
673                 else
674                         best_disk = best_dist_disk;
675         }
676
677         if (best_disk >= 0) {
678                 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
679                 if (!rdev)
680                         goto retry;
681                 atomic_inc(&rdev->nr_pending);
682                 if (test_bit(Faulty, &rdev->flags)) {
683                         /* cannot risk returning a device that failed
684                          * before we inc'ed nr_pending
685                          */
686                         rdev_dec_pending(rdev, conf->mddev);
687                         goto retry;
688                 }
689                 sectors = best_good_sectors;
690
691                 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
692                         conf->mirrors[best_disk].seq_start = this_sector;
693
694                 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
695         }
696         rcu_read_unlock();
697         *max_sectors = sectors;
698
699         return best_disk;
700 }
701
702 static int raid1_mergeable_bvec(struct request_queue *q,
703                                 struct bvec_merge_data *bvm,
704                                 struct bio_vec *biovec)
705 {
706         struct mddev *mddev = q->queuedata;
707         struct r1conf *conf = mddev->private;
708         sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
709         int max = biovec->bv_len;
710
711         if (mddev->merge_check_needed) {
712                 int disk;
713                 rcu_read_lock();
714                 for (disk = 0; disk < conf->raid_disks * 2; disk++) {
715                         struct md_rdev *rdev = rcu_dereference(
716                                 conf->mirrors[disk].rdev);
717                         if (rdev && !test_bit(Faulty, &rdev->flags)) {
718                                 struct request_queue *q =
719                                         bdev_get_queue(rdev->bdev);
720                                 if (q->merge_bvec_fn) {
721                                         bvm->bi_sector = sector +
722                                                 rdev->data_offset;
723                                         bvm->bi_bdev = rdev->bdev;
724                                         max = min(max, q->merge_bvec_fn(
725                                                           q, bvm, biovec));
726                                 }
727                         }
728                 }
729                 rcu_read_unlock();
730         }
731         return max;
732
733 }
734
735 int md_raid1_congested(struct mddev *mddev, int bits)
736 {
737         struct r1conf *conf = mddev->private;
738         int i, ret = 0;
739
740         if ((bits & (1 << BDI_async_congested)) &&
741             conf->pending_count >= max_queued_requests)
742                 return 1;
743
744         rcu_read_lock();
745         for (i = 0; i < conf->raid_disks * 2; i++) {
746                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
747                 if (rdev && !test_bit(Faulty, &rdev->flags)) {
748                         struct request_queue *q = bdev_get_queue(rdev->bdev);
749
750                         BUG_ON(!q);
751
752                         /* Note the '|| 1' - when read_balance prefers
753                          * non-congested targets, it can be removed
754                          */
755                         if ((bits & (1<<BDI_async_congested)) || 1)
756                                 ret |= bdi_congested(&q->backing_dev_info, bits);
757                         else
758                                 ret &= bdi_congested(&q->backing_dev_info, bits);
759                 }
760         }
761         rcu_read_unlock();
762         return ret;
763 }
764 EXPORT_SYMBOL_GPL(md_raid1_congested);
765
766 static int raid1_congested(void *data, int bits)
767 {
768         struct mddev *mddev = data;
769
770         return mddev_congested(mddev, bits) ||
771                 md_raid1_congested(mddev, bits);
772 }
773
774 static void flush_pending_writes(struct r1conf *conf)
775 {
776         /* Any writes that have been queued but are awaiting
777          * bitmap updates get flushed here.
778          */
779         spin_lock_irq(&conf->device_lock);
780
781         if (conf->pending_bio_list.head) {
782                 struct bio *bio;
783                 bio = bio_list_get(&conf->pending_bio_list);
784                 conf->pending_count = 0;
785                 spin_unlock_irq(&conf->device_lock);
786                 /* flush any pending bitmap writes to
787                  * disk before proceeding w/ I/O */
788                 bitmap_unplug(conf->mddev->bitmap);
789                 wake_up(&conf->wait_barrier);
790
791                 while (bio) { /* submit pending writes */
792                         struct bio *next = bio->bi_next;
793                         bio->bi_next = NULL;
794                         if (unlikely((bio->bi_rw & REQ_DISCARD) &&
795                             !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
796                                 /* Just ignore it */
797                                 bio_endio(bio, 0);
798                         else
799                                 generic_make_request(bio);
800                         bio = next;
801                 }
802         } else
803                 spin_unlock_irq(&conf->device_lock);
804 }
805
806 /* Barriers....
807  * Sometimes we need to suspend IO while we do something else,
808  * either some resync/recovery, or reconfigure the array.
809  * To do this we raise a 'barrier'.
810  * The 'barrier' is a counter that can be raised multiple times
811  * to count how many activities are happening which preclude
812  * normal IO.
813  * We can only raise the barrier if there is no pending IO.
814  * i.e. if nr_pending == 0.
815  * We choose only to raise the barrier if no-one is waiting for the
816  * barrier to go down.  This means that as soon as an IO request
817  * is ready, no other operations which require a barrier will start
818  * until the IO request has had a chance.
819  *
820  * So: regular IO calls 'wait_barrier'.  When that returns there
821  *    is no backgroup IO happening,  It must arrange to call
822  *    allow_barrier when it has finished its IO.
823  * backgroup IO calls must call raise_barrier.  Once that returns
824  *    there is no normal IO happeing.  It must arrange to call
825  *    lower_barrier when the particular background IO completes.
826  */
827 static void raise_barrier(struct r1conf *conf)
828 {
829         spin_lock_irq(&conf->resync_lock);
830
831         /* Wait until no block IO is waiting */
832         wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
833                             conf->resync_lock);
834
835         /* block any new IO from starting */
836         conf->barrier++;
837
838         /* For these conditions we must wait:
839          * A: while the array is in frozen state
840          * B: while barrier >= RESYNC_DEPTH, meaning resync reach
841          *    the max count which allowed.
842          * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
843          *    next resync will reach to the window which normal bios are
844          *    handling.
845          */
846         wait_event_lock_irq(conf->wait_barrier,
847                             !conf->array_frozen &&
848                             conf->barrier < RESYNC_DEPTH &&
849                             (conf->start_next_window >=
850                              conf->next_resync + RESYNC_SECTORS),
851                             conf->resync_lock);
852
853         spin_unlock_irq(&conf->resync_lock);
854 }
855
856 static void lower_barrier(struct r1conf *conf)
857 {
858         unsigned long flags;
859         BUG_ON(conf->barrier <= 0);
860         spin_lock_irqsave(&conf->resync_lock, flags);
861         conf->barrier--;
862         spin_unlock_irqrestore(&conf->resync_lock, flags);
863         wake_up(&conf->wait_barrier);
864 }
865
866 static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
867 {
868         bool wait = false;
869
870         if (conf->array_frozen || !bio)
871                 wait = true;
872         else if (conf->barrier && bio_data_dir(bio) == WRITE) {
873                 if (conf->next_resync < RESYNC_WINDOW_SECTORS)
874                         wait = true;
875                 else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
876                                 >= bio_end_sector(bio)) ||
877                          (conf->next_resync + NEXT_NORMALIO_DISTANCE
878                                 <= bio->bi_sector))
879                         wait = false;
880                 else
881                         wait = true;
882         }
883
884         return wait;
885 }
886
887 static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
888 {
889         sector_t sector = 0;
890
891         spin_lock_irq(&conf->resync_lock);
892         if (need_to_wait_for_sync(conf, bio)) {
893                 conf->nr_waiting++;
894                 /* Wait for the barrier to drop.
895                  * However if there are already pending
896                  * requests (preventing the barrier from
897                  * rising completely), and the
898                  * pre-process bio queue isn't empty,
899                  * then don't wait, as we need to empty
900                  * that queue to get the nr_pending
901                  * count down.
902                  */
903                 wait_event_lock_irq(conf->wait_barrier,
904                                     !conf->array_frozen &&
905                                     (!conf->barrier ||
906                                     ((conf->start_next_window <
907                                       conf->next_resync + RESYNC_SECTORS) &&
908                                      current->bio_list &&
909                                      !bio_list_empty(current->bio_list))),
910                                     conf->resync_lock);
911                 conf->nr_waiting--;
912         }
913
914         if (bio && bio_data_dir(bio) == WRITE) {
915                 if (conf->next_resync + NEXT_NORMALIO_DISTANCE
916                     <= bio->bi_sector) {
917                         if (conf->start_next_window == MaxSector)
918                                 conf->start_next_window =
919                                         conf->next_resync +
920                                         NEXT_NORMALIO_DISTANCE;
921
922                         if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
923                             <= bio->bi_sector)
924                                 conf->next_window_requests++;
925                         else
926                                 conf->current_window_requests++;
927                 }
928                 if (bio->bi_sector >= conf->start_next_window)
929                         sector = conf->start_next_window;
930         }
931
932         conf->nr_pending++;
933         spin_unlock_irq(&conf->resync_lock);
934         return sector;
935 }
936
937 static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
938                           sector_t bi_sector)
939 {
940         unsigned long flags;
941
942         spin_lock_irqsave(&conf->resync_lock, flags);
943         conf->nr_pending--;
944         if (start_next_window) {
945                 if (start_next_window == conf->start_next_window) {
946                         if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
947                             <= bi_sector)
948                                 conf->next_window_requests--;
949                         else
950                                 conf->current_window_requests--;
951                 } else
952                         conf->current_window_requests--;
953
954                 if (!conf->current_window_requests) {
955                         if (conf->next_window_requests) {
956                                 conf->current_window_requests =
957                                         conf->next_window_requests;
958                                 conf->next_window_requests = 0;
959                                 conf->start_next_window +=
960                                         NEXT_NORMALIO_DISTANCE;
961                         } else
962                                 conf->start_next_window = MaxSector;
963                 }
964         }
965         spin_unlock_irqrestore(&conf->resync_lock, flags);
966         wake_up(&conf->wait_barrier);
967 }
968
969 static void freeze_array(struct r1conf *conf, int extra)
970 {
971         /* stop syncio and normal IO and wait for everything to
972          * go quite.
973          * We wait until nr_pending match nr_queued+extra
974          * This is called in the context of one normal IO request
975          * that has failed. Thus any sync request that might be pending
976          * will be blocked by nr_pending, and we need to wait for
977          * pending IO requests to complete or be queued for re-try.
978          * Thus the number queued (nr_queued) plus this request (extra)
979          * must match the number of pending IOs (nr_pending) before
980          * we continue.
981          */
982         spin_lock_irq(&conf->resync_lock);
983         conf->array_frozen = 1;
984         wait_event_lock_irq_cmd(conf->wait_barrier,
985                                 conf->nr_pending == conf->nr_queued+extra,
986                                 conf->resync_lock,
987                                 flush_pending_writes(conf));
988         spin_unlock_irq(&conf->resync_lock);
989 }
990 static void unfreeze_array(struct r1conf *conf)
991 {
992         /* reverse the effect of the freeze */
993         spin_lock_irq(&conf->resync_lock);
994         conf->array_frozen = 0;
995         wake_up(&conf->wait_barrier);
996         spin_unlock_irq(&conf->resync_lock);
997 }
998
999
1000 /* duplicate the data pages for behind I/O 
1001  */
1002 static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
1003 {
1004         int i;
1005         struct bio_vec *bvec;
1006         struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
1007                                         GFP_NOIO);
1008         if (unlikely(!bvecs))
1009                 return;
1010
1011         bio_for_each_segment_all(bvec, bio, i) {
1012                 bvecs[i] = *bvec;
1013                 bvecs[i].bv_page = alloc_page(GFP_NOIO);
1014                 if (unlikely(!bvecs[i].bv_page))
1015                         goto do_sync_io;
1016                 memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
1017                        kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
1018                 kunmap(bvecs[i].bv_page);
1019                 kunmap(bvec->bv_page);
1020         }
1021         r1_bio->behind_bvecs = bvecs;
1022         r1_bio->behind_page_count = bio->bi_vcnt;
1023         set_bit(R1BIO_BehindIO, &r1_bio->state);
1024         return;
1025
1026 do_sync_io:
1027         for (i = 0; i < bio->bi_vcnt; i++)
1028                 if (bvecs[i].bv_page)
1029                         put_page(bvecs[i].bv_page);
1030         kfree(bvecs);
1031         pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
1032 }
1033
1034 struct raid1_plug_cb {
1035         struct blk_plug_cb      cb;
1036         struct bio_list         pending;
1037         int                     pending_cnt;
1038 };
1039
1040 static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1041 {
1042         struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1043                                                   cb);
1044         struct mddev *mddev = plug->cb.data;
1045         struct r1conf *conf = mddev->private;
1046         struct bio *bio;
1047
1048         if (from_schedule || current->bio_list) {
1049                 spin_lock_irq(&conf->device_lock);
1050                 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1051                 conf->pending_count += plug->pending_cnt;
1052                 spin_unlock_irq(&conf->device_lock);
1053                 wake_up(&conf->wait_barrier);
1054                 md_wakeup_thread(mddev->thread);
1055                 kfree(plug);
1056                 return;
1057         }
1058
1059         /* we aren't scheduling, so we can do the write-out directly. */
1060         bio = bio_list_get(&plug->pending);
1061         bitmap_unplug(mddev->bitmap);
1062         wake_up(&conf->wait_barrier);
1063
1064         while (bio) { /* submit pending writes */
1065                 struct bio *next = bio->bi_next;
1066                 bio->bi_next = NULL;
1067                 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
1068                     !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1069                         /* Just ignore it */
1070                         bio_endio(bio, 0);
1071                 else
1072                         generic_make_request(bio);
1073                 bio = next;
1074         }
1075         kfree(plug);
1076 }
1077
1078 static void make_request(struct mddev *mddev, struct bio * bio)
1079 {
1080         struct r1conf *conf = mddev->private;
1081         struct raid1_info *mirror;
1082         struct r1bio *r1_bio;
1083         struct bio *read_bio;
1084         int i, disks;
1085         struct bitmap *bitmap;
1086         unsigned long flags;
1087         const int rw = bio_data_dir(bio);
1088         const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1089         const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
1090         const unsigned long do_discard = (bio->bi_rw
1091                                           & (REQ_DISCARD | REQ_SECURE));
1092         const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
1093         struct md_rdev *blocked_rdev;
1094         struct blk_plug_cb *cb;
1095         struct raid1_plug_cb *plug = NULL;
1096         int first_clone;
1097         int sectors_handled;
1098         int max_sectors;
1099         sector_t start_next_window;
1100
1101         /*
1102          * Register the new request and wait if the reconstruction
1103          * thread has put up a bar for new requests.
1104          * Continue immediately if no resync is active currently.
1105          */
1106
1107         md_write_start(mddev, bio); /* wait on superblock update early */
1108
1109         if (bio_data_dir(bio) == WRITE &&
1110             bio_end_sector(bio) > mddev->suspend_lo &&
1111             bio->bi_sector < mddev->suspend_hi) {
1112                 /* As the suspend_* range is controlled by
1113                  * userspace, we want an interruptible
1114                  * wait.
1115                  */
1116                 DEFINE_WAIT(w);
1117                 for (;;) {
1118                         flush_signals(current);
1119                         prepare_to_wait(&conf->wait_barrier,
1120                                         &w, TASK_INTERRUPTIBLE);
1121                         if (bio_end_sector(bio) <= mddev->suspend_lo ||
1122                             bio->bi_sector >= mddev->suspend_hi)
1123                                 break;
1124                         schedule();
1125                 }
1126                 finish_wait(&conf->wait_barrier, &w);
1127         }
1128
1129         start_next_window = wait_barrier(conf, bio);
1130
1131         bitmap = mddev->bitmap;
1132
1133         /*
1134          * make_request() can abort the operation when READA is being
1135          * used and no empty request is available.
1136          *
1137          */
1138         r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1139
1140         r1_bio->master_bio = bio;
1141         r1_bio->sectors = bio_sectors(bio);
1142         r1_bio->state = 0;
1143         r1_bio->mddev = mddev;
1144         r1_bio->sector = bio->bi_sector;
1145
1146         /* We might need to issue multiple reads to different
1147          * devices if there are bad blocks around, so we keep
1148          * track of the number of reads in bio->bi_phys_segments.
1149          * If this is 0, there is only one r1_bio and no locking
1150          * will be needed when requests complete.  If it is
1151          * non-zero, then it is the number of not-completed requests.
1152          */
1153         bio->bi_phys_segments = 0;
1154         clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1155
1156         if (rw == READ) {
1157                 /*
1158                  * read balancing logic:
1159                  */
1160                 int rdisk;
1161
1162 read_again:
1163                 rdisk = read_balance(conf, r1_bio, &max_sectors);
1164
1165                 if (rdisk < 0) {
1166                         /* couldn't find anywhere to read from */
1167                         raid_end_bio_io(r1_bio);
1168                         return;
1169                 }
1170                 mirror = conf->mirrors + rdisk;
1171
1172                 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1173                     bitmap) {
1174                         /* Reading from a write-mostly device must
1175                          * take care not to over-take any writes
1176                          * that are 'behind'
1177                          */
1178                         wait_event(bitmap->behind_wait,
1179                                    atomic_read(&bitmap->behind_writes) == 0);
1180                 }
1181                 r1_bio->read_disk = rdisk;
1182
1183                 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1184                 bio_trim(read_bio, r1_bio->sector - bio->bi_sector,
1185                          max_sectors);
1186
1187                 r1_bio->bios[rdisk] = read_bio;
1188
1189                 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
1190                 read_bio->bi_bdev = mirror->rdev->bdev;
1191                 read_bio->bi_end_io = raid1_end_read_request;
1192                 read_bio->bi_rw = READ | do_sync;
1193                 read_bio->bi_private = r1_bio;
1194
1195                 if (max_sectors < r1_bio->sectors) {
1196                         /* could not read all from this device, so we will
1197                          * need another r1_bio.
1198                          */
1199
1200                         sectors_handled = (r1_bio->sector + max_sectors
1201                                            - bio->bi_sector);
1202                         r1_bio->sectors = max_sectors;
1203                         spin_lock_irq(&conf->device_lock);
1204                         if (bio->bi_phys_segments == 0)
1205                                 bio->bi_phys_segments = 2;
1206                         else
1207                                 bio->bi_phys_segments++;
1208                         spin_unlock_irq(&conf->device_lock);
1209                         /* Cannot call generic_make_request directly
1210                          * as that will be queued in __make_request
1211                          * and subsequent mempool_alloc might block waiting
1212                          * for it.  So hand bio over to raid1d.
1213                          */
1214                         reschedule_retry(r1_bio);
1215
1216                         r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1217
1218                         r1_bio->master_bio = bio;
1219                         r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1220                         r1_bio->state = 0;
1221                         r1_bio->mddev = mddev;
1222                         r1_bio->sector = bio->bi_sector + sectors_handled;
1223                         goto read_again;
1224                 } else
1225                         generic_make_request(read_bio);
1226                 return;
1227         }
1228
1229         /*
1230          * WRITE:
1231          */
1232         if (conf->pending_count >= max_queued_requests) {
1233                 md_wakeup_thread(mddev->thread);
1234                 wait_event(conf->wait_barrier,
1235                            conf->pending_count < max_queued_requests);
1236         }
1237         /* first select target devices under rcu_lock and
1238          * inc refcount on their rdev.  Record them by setting
1239          * bios[x] to bio
1240          * If there are known/acknowledged bad blocks on any device on
1241          * which we have seen a write error, we want to avoid writing those
1242          * blocks.
1243          * This potentially requires several writes to write around
1244          * the bad blocks.  Each set of writes gets it's own r1bio
1245          * with a set of bios attached.
1246          */
1247
1248         disks = conf->raid_disks * 2;
1249  retry_write:
1250         r1_bio->start_next_window = start_next_window;
1251         blocked_rdev = NULL;
1252         rcu_read_lock();
1253         max_sectors = r1_bio->sectors;
1254         for (i = 0;  i < disks; i++) {
1255                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1256                 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1257                         atomic_inc(&rdev->nr_pending);
1258                         blocked_rdev = rdev;
1259                         break;
1260                 }
1261                 r1_bio->bios[i] = NULL;
1262                 if (!rdev || test_bit(Faulty, &rdev->flags)
1263                     || test_bit(Unmerged, &rdev->flags)) {
1264                         if (i < conf->raid_disks)
1265                                 set_bit(R1BIO_Degraded, &r1_bio->state);
1266                         continue;
1267                 }
1268
1269                 atomic_inc(&rdev->nr_pending);
1270                 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1271                         sector_t first_bad;
1272                         int bad_sectors;
1273                         int is_bad;
1274
1275                         is_bad = is_badblock(rdev, r1_bio->sector,
1276                                              max_sectors,
1277                                              &first_bad, &bad_sectors);
1278                         if (is_bad < 0) {
1279                                 /* mustn't write here until the bad block is
1280                                  * acknowledged*/
1281                                 set_bit(BlockedBadBlocks, &rdev->flags);
1282                                 blocked_rdev = rdev;
1283                                 break;
1284                         }
1285                         if (is_bad && first_bad <= r1_bio->sector) {
1286                                 /* Cannot write here at all */
1287                                 bad_sectors -= (r1_bio->sector - first_bad);
1288                                 if (bad_sectors < max_sectors)
1289                                         /* mustn't write more than bad_sectors
1290                                          * to other devices yet
1291                                          */
1292                                         max_sectors = bad_sectors;
1293                                 rdev_dec_pending(rdev, mddev);
1294                                 /* We don't set R1BIO_Degraded as that
1295                                  * only applies if the disk is
1296                                  * missing, so it might be re-added,
1297                                  * and we want to know to recover this
1298                                  * chunk.
1299                                  * In this case the device is here,
1300                                  * and the fact that this chunk is not
1301                                  * in-sync is recorded in the bad
1302                                  * block log
1303                                  */
1304                                 continue;
1305                         }
1306                         if (is_bad) {
1307                                 int good_sectors = first_bad - r1_bio->sector;
1308                                 if (good_sectors < max_sectors)
1309                                         max_sectors = good_sectors;
1310                         }
1311                 }
1312                 r1_bio->bios[i] = bio;
1313         }
1314         rcu_read_unlock();
1315
1316         if (unlikely(blocked_rdev)) {
1317                 /* Wait for this device to become unblocked */
1318                 int j;
1319                 sector_t old = start_next_window;
1320
1321                 for (j = 0; j < i; j++)
1322                         if (r1_bio->bios[j])
1323                                 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1324                 r1_bio->state = 0;
1325                 allow_barrier(conf, start_next_window, bio->bi_sector);
1326                 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1327                 start_next_window = wait_barrier(conf, bio);
1328                 /*
1329                  * We must make sure the multi r1bios of bio have
1330                  * the same value of bi_phys_segments
1331                  */
1332                 if (bio->bi_phys_segments && old &&
1333                     old != start_next_window)
1334                         /* Wait for the former r1bio(s) to complete */
1335                         wait_event(conf->wait_barrier,
1336                                    bio->bi_phys_segments == 1);
1337                 goto retry_write;
1338         }
1339
1340         if (max_sectors < r1_bio->sectors) {
1341                 /* We are splitting this write into multiple parts, so
1342                  * we need to prepare for allocating another r1_bio.
1343                  */
1344                 r1_bio->sectors = max_sectors;
1345                 spin_lock_irq(&conf->device_lock);
1346                 if (bio->bi_phys_segments == 0)
1347                         bio->bi_phys_segments = 2;
1348                 else
1349                         bio->bi_phys_segments++;
1350                 spin_unlock_irq(&conf->device_lock);
1351         }
1352         sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
1353
1354         atomic_set(&r1_bio->remaining, 1);
1355         atomic_set(&r1_bio->behind_remaining, 0);
1356
1357         first_clone = 1;
1358         for (i = 0; i < disks; i++) {
1359                 struct bio *mbio;
1360                 if (!r1_bio->bios[i])
1361                         continue;
1362
1363                 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1364                 bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
1365
1366                 if (first_clone) {
1367                         /* do behind I/O ?
1368                          * Not if there are too many, or cannot
1369                          * allocate memory, or a reader on WriteMostly
1370                          * is waiting for behind writes to flush */
1371                         if (bitmap &&
1372                             (atomic_read(&bitmap->behind_writes)
1373                              < mddev->bitmap_info.max_write_behind) &&
1374                             !waitqueue_active(&bitmap->behind_wait))
1375                                 alloc_behind_pages(mbio, r1_bio);
1376
1377                         bitmap_startwrite(bitmap, r1_bio->sector,
1378                                           r1_bio->sectors,
1379                                           test_bit(R1BIO_BehindIO,
1380                                                    &r1_bio->state));
1381                         first_clone = 0;
1382                 }
1383                 if (r1_bio->behind_bvecs) {
1384                         struct bio_vec *bvec;
1385                         int j;
1386
1387                         /*
1388                          * We trimmed the bio, so _all is legit
1389                          */
1390                         bio_for_each_segment_all(bvec, mbio, j)
1391                                 bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
1392                         if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
1393                                 atomic_inc(&r1_bio->behind_remaining);
1394                 }
1395
1396                 r1_bio->bios[i] = mbio;
1397
1398                 mbio->bi_sector = (r1_bio->sector +
1399                                    conf->mirrors[i].rdev->data_offset);
1400                 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1401                 mbio->bi_end_io = raid1_end_write_request;
1402                 mbio->bi_rw =
1403                         WRITE | do_flush_fua | do_sync | do_discard | do_same;
1404                 mbio->bi_private = r1_bio;
1405
1406                 atomic_inc(&r1_bio->remaining);
1407
1408                 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1409                 if (cb)
1410                         plug = container_of(cb, struct raid1_plug_cb, cb);
1411                 else
1412                         plug = NULL;
1413                 spin_lock_irqsave(&conf->device_lock, flags);
1414                 if (plug) {
1415                         bio_list_add(&plug->pending, mbio);
1416                         plug->pending_cnt++;
1417                 } else {
1418                         bio_list_add(&conf->pending_bio_list, mbio);
1419                         conf->pending_count++;
1420                 }
1421                 spin_unlock_irqrestore(&conf->device_lock, flags);
1422                 if (!plug)
1423                         md_wakeup_thread(mddev->thread);
1424         }
1425         /* Mustn't call r1_bio_write_done before this next test,
1426          * as it could result in the bio being freed.
1427          */
1428         if (sectors_handled < bio_sectors(bio)) {
1429                 r1_bio_write_done(r1_bio);
1430                 /* We need another r1_bio.  It has already been counted
1431                  * in bio->bi_phys_segments
1432                  */
1433                 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1434                 r1_bio->master_bio = bio;
1435                 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1436                 r1_bio->state = 0;
1437                 r1_bio->mddev = mddev;
1438                 r1_bio->sector = bio->bi_sector + sectors_handled;
1439                 goto retry_write;
1440         }
1441
1442         r1_bio_write_done(r1_bio);
1443
1444         /* In case raid1d snuck in to freeze_array */
1445         wake_up(&conf->wait_barrier);
1446 }
1447
1448 static void status(struct seq_file *seq, struct mddev *mddev)
1449 {
1450         struct r1conf *conf = mddev->private;
1451         int i;
1452
1453         seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1454                    conf->raid_disks - mddev->degraded);
1455         rcu_read_lock();
1456         for (i = 0; i < conf->raid_disks; i++) {
1457                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1458                 seq_printf(seq, "%s",
1459                            rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1460         }
1461         rcu_read_unlock();
1462         seq_printf(seq, "]");
1463 }
1464
1465
1466 static void error(struct mddev *mddev, struct md_rdev *rdev)
1467 {
1468         char b[BDEVNAME_SIZE];
1469         struct r1conf *conf = mddev->private;
1470
1471         /*
1472          * If it is not operational, then we have already marked it as dead
1473          * else if it is the last working disks, ignore the error, let the
1474          * next level up know.
1475          * else mark the drive as failed
1476          */
1477         if (test_bit(In_sync, &rdev->flags)
1478             && (conf->raid_disks - mddev->degraded) == 1) {
1479                 /*
1480                  * Don't fail the drive, act as though we were just a
1481                  * normal single drive.
1482                  * However don't try a recovery from this drive as
1483                  * it is very likely to fail.
1484                  */
1485                 conf->recovery_disabled = mddev->recovery_disabled;
1486                 return;
1487         }
1488         set_bit(Blocked, &rdev->flags);
1489         if (test_and_clear_bit(In_sync, &rdev->flags)) {
1490                 unsigned long flags;
1491                 spin_lock_irqsave(&conf->device_lock, flags);
1492                 mddev->degraded++;
1493                 set_bit(Faulty, &rdev->flags);
1494                 spin_unlock_irqrestore(&conf->device_lock, flags);
1495                 /*
1496                  * if recovery is running, make sure it aborts.
1497                  */
1498                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1499         } else
1500                 set_bit(Faulty, &rdev->flags);
1501         set_bit(MD_CHANGE_DEVS, &mddev->flags);
1502         printk(KERN_ALERT
1503                "md/raid1:%s: Disk failure on %s, disabling device.\n"
1504                "md/raid1:%s: Operation continuing on %d devices.\n",
1505                mdname(mddev), bdevname(rdev->bdev, b),
1506                mdname(mddev), conf->raid_disks - mddev->degraded);
1507 }
1508
1509 static void print_conf(struct r1conf *conf)
1510 {
1511         int i;
1512
1513         printk(KERN_DEBUG "RAID1 conf printout:\n");
1514         if (!conf) {
1515                 printk(KERN_DEBUG "(!conf)\n");
1516                 return;
1517         }
1518         printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1519                 conf->raid_disks);
1520
1521         rcu_read_lock();
1522         for (i = 0; i < conf->raid_disks; i++) {
1523                 char b[BDEVNAME_SIZE];
1524                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1525                 if (rdev)
1526                         printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1527                                i, !test_bit(In_sync, &rdev->flags),
1528                                !test_bit(Faulty, &rdev->flags),
1529                                bdevname(rdev->bdev,b));
1530         }
1531         rcu_read_unlock();
1532 }
1533
1534 static void close_sync(struct r1conf *conf)
1535 {
1536         wait_barrier(conf, NULL);
1537         allow_barrier(conf, 0, 0);
1538
1539         mempool_destroy(conf->r1buf_pool);
1540         conf->r1buf_pool = NULL;
1541
1542         conf->next_resync = 0;
1543         conf->start_next_window = MaxSector;
1544 }
1545
1546 static int raid1_spare_active(struct mddev *mddev)
1547 {
1548         int i;
1549         struct r1conf *conf = mddev->private;
1550         int count = 0;
1551         unsigned long flags;
1552
1553         /*
1554          * Find all failed disks within the RAID1 configuration 
1555          * and mark them readable.
1556          * Called under mddev lock, so rcu protection not needed.
1557          */
1558         for (i = 0; i < conf->raid_disks; i++) {
1559                 struct md_rdev *rdev = conf->mirrors[i].rdev;
1560                 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1561                 if (repl
1562                     && repl->recovery_offset == MaxSector
1563                     && !test_bit(Faulty, &repl->flags)
1564                     && !test_and_set_bit(In_sync, &repl->flags)) {
1565                         /* replacement has just become active */
1566                         if (!rdev ||
1567                             !test_and_clear_bit(In_sync, &rdev->flags))
1568                                 count++;
1569                         if (rdev) {
1570                                 /* Replaced device not technically
1571                                  * faulty, but we need to be sure
1572                                  * it gets removed and never re-added
1573                                  */
1574                                 set_bit(Faulty, &rdev->flags);
1575                                 sysfs_notify_dirent_safe(
1576                                         rdev->sysfs_state);
1577                         }
1578                 }
1579                 if (rdev
1580                     && rdev->recovery_offset == MaxSector
1581                     && !test_bit(Faulty, &rdev->flags)
1582                     && !test_and_set_bit(In_sync, &rdev->flags)) {
1583                         count++;
1584                         sysfs_notify_dirent_safe(rdev->sysfs_state);
1585                 }
1586         }
1587         spin_lock_irqsave(&conf->device_lock, flags);
1588         mddev->degraded -= count;
1589         spin_unlock_irqrestore(&conf->device_lock, flags);
1590
1591         print_conf(conf);
1592         return count;
1593 }
1594
1595
1596 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1597 {
1598         struct r1conf *conf = mddev->private;
1599         int err = -EEXIST;
1600         int mirror = 0;
1601         struct raid1_info *p;
1602         int first = 0;
1603         int last = conf->raid_disks - 1;
1604         struct request_queue *q = bdev_get_queue(rdev->bdev);
1605
1606         if (mddev->recovery_disabled == conf->recovery_disabled)
1607                 return -EBUSY;
1608
1609         if (rdev->raid_disk >= 0)
1610                 first = last = rdev->raid_disk;
1611
1612         if (q->merge_bvec_fn) {
1613                 set_bit(Unmerged, &rdev->flags);
1614                 mddev->merge_check_needed = 1;
1615         }
1616
1617         for (mirror = first; mirror <= last; mirror++) {
1618                 p = conf->mirrors+mirror;
1619                 if (!p->rdev) {
1620
1621                         if (mddev->gendisk)
1622                                 disk_stack_limits(mddev->gendisk, rdev->bdev,
1623                                                   rdev->data_offset << 9);
1624
1625                         p->head_position = 0;
1626                         rdev->raid_disk = mirror;
1627                         err = 0;
1628                         /* As all devices are equivalent, we don't need a full recovery
1629                          * if this was recently any drive of the array
1630                          */
1631                         if (rdev->saved_raid_disk < 0)
1632                                 conf->fullsync = 1;
1633                         rcu_assign_pointer(p->rdev, rdev);
1634                         break;
1635                 }
1636                 if (test_bit(WantReplacement, &p->rdev->flags) &&
1637                     p[conf->raid_disks].rdev == NULL) {
1638                         /* Add this device as a replacement */
1639                         clear_bit(In_sync, &rdev->flags);
1640                         set_bit(Replacement, &rdev->flags);
1641                         rdev->raid_disk = mirror;
1642                         err = 0;
1643                         conf->fullsync = 1;
1644                         rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1645                         break;
1646                 }
1647         }
1648         if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
1649                 /* Some requests might not have seen this new
1650                  * merge_bvec_fn.  We must wait for them to complete
1651                  * before merging the device fully.
1652                  * First we make sure any code which has tested
1653                  * our function has submitted the request, then
1654                  * we wait for all outstanding requests to complete.
1655                  */
1656                 synchronize_sched();
1657                 freeze_array(conf, 0);
1658                 unfreeze_array(conf);
1659                 clear_bit(Unmerged, &rdev->flags);
1660         }
1661         md_integrity_add_rdev(rdev, mddev);
1662         if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1663                 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1664         print_conf(conf);
1665         return err;
1666 }
1667
1668 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1669 {
1670         struct r1conf *conf = mddev->private;
1671         int err = 0;
1672         int number = rdev->raid_disk;
1673         struct raid1_info *p = conf->mirrors + number;
1674
1675         if (rdev != p->rdev)
1676                 p = conf->mirrors + conf->raid_disks + number;
1677
1678         print_conf(conf);
1679         if (rdev == p->rdev) {
1680                 if (test_bit(In_sync, &rdev->flags) ||
1681                     atomic_read(&rdev->nr_pending)) {
1682                         err = -EBUSY;
1683                         goto abort;
1684                 }
1685                 /* Only remove non-faulty devices if recovery
1686                  * is not possible.
1687                  */
1688                 if (!test_bit(Faulty, &rdev->flags) &&
1689                     mddev->recovery_disabled != conf->recovery_disabled &&
1690                     mddev->degraded < conf->raid_disks) {
1691                         err = -EBUSY;
1692                         goto abort;
1693                 }
1694                 p->rdev = NULL;
1695                 synchronize_rcu();
1696                 if (atomic_read(&rdev->nr_pending)) {
1697                         /* lost the race, try later */
1698                         err = -EBUSY;
1699                         p->rdev = rdev;
1700                         goto abort;
1701                 } else if (conf->mirrors[conf->raid_disks + number].rdev) {
1702                         /* We just removed a device that is being replaced.
1703                          * Move down the replacement.  We drain all IO before
1704                          * doing this to avoid confusion.
1705                          */
1706                         struct md_rdev *repl =
1707                                 conf->mirrors[conf->raid_disks + number].rdev;
1708                         freeze_array(conf, 0);
1709                         clear_bit(Replacement, &repl->flags);
1710                         p->rdev = repl;
1711                         conf->mirrors[conf->raid_disks + number].rdev = NULL;
1712                         unfreeze_array(conf);
1713                         clear_bit(WantReplacement, &rdev->flags);
1714                 } else
1715                         clear_bit(WantReplacement, &rdev->flags);
1716                 err = md_integrity_register(mddev);
1717         }
1718 abort:
1719
1720         print_conf(conf);
1721         return err;
1722 }
1723
1724
1725 static void end_sync_read(struct bio *bio, int error)
1726 {
1727         struct r1bio *r1_bio = bio->bi_private;
1728
1729         update_head_pos(r1_bio->read_disk, r1_bio);
1730
1731         /*
1732          * we have read a block, now it needs to be re-written,
1733          * or re-read if the read failed.
1734          * We don't do much here, just schedule handling by raid1d
1735          */
1736         if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1737                 set_bit(R1BIO_Uptodate, &r1_bio->state);
1738
1739         if (atomic_dec_and_test(&r1_bio->remaining))
1740                 reschedule_retry(r1_bio);
1741 }
1742
1743 static void end_sync_write(struct bio *bio, int error)
1744 {
1745         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1746         struct r1bio *r1_bio = bio->bi_private;
1747         struct mddev *mddev = r1_bio->mddev;
1748         struct r1conf *conf = mddev->private;
1749         int mirror=0;
1750         sector_t first_bad;
1751         int bad_sectors;
1752
1753         mirror = find_bio_disk(r1_bio, bio);
1754
1755         if (!uptodate) {
1756                 sector_t sync_blocks = 0;
1757                 sector_t s = r1_bio->sector;
1758                 long sectors_to_go = r1_bio->sectors;
1759                 /* make sure these bits doesn't get cleared. */
1760                 do {
1761                         bitmap_end_sync(mddev->bitmap, s,
1762                                         &sync_blocks, 1);
1763                         s += sync_blocks;
1764                         sectors_to_go -= sync_blocks;
1765                 } while (sectors_to_go > 0);
1766                 set_bit(WriteErrorSeen,
1767                         &conf->mirrors[mirror].rdev->flags);
1768                 if (!test_and_set_bit(WantReplacement,
1769                                       &conf->mirrors[mirror].rdev->flags))
1770                         set_bit(MD_RECOVERY_NEEDED, &
1771                                 mddev->recovery);
1772                 set_bit(R1BIO_WriteError, &r1_bio->state);
1773         } else if (is_badblock(conf->mirrors[mirror].rdev,
1774                                r1_bio->sector,
1775                                r1_bio->sectors,
1776                                &first_bad, &bad_sectors) &&
1777                    !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1778                                 r1_bio->sector,
1779                                 r1_bio->sectors,
1780                                 &first_bad, &bad_sectors)
1781                 )
1782                 set_bit(R1BIO_MadeGood, &r1_bio->state);
1783
1784         if (atomic_dec_and_test(&r1_bio->remaining)) {
1785                 int s = r1_bio->sectors;
1786                 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1787                     test_bit(R1BIO_WriteError, &r1_bio->state))
1788                         reschedule_retry(r1_bio);
1789                 else {
1790                         put_buf(r1_bio);
1791                         md_done_sync(mddev, s, uptodate);
1792                 }
1793         }
1794 }
1795
1796 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1797                             int sectors, struct page *page, int rw)
1798 {
1799         if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
1800                 /* success */
1801                 return 1;
1802         if (rw == WRITE) {
1803                 set_bit(WriteErrorSeen, &rdev->flags);
1804                 if (!test_and_set_bit(WantReplacement,
1805                                       &rdev->flags))
1806                         set_bit(MD_RECOVERY_NEEDED, &
1807                                 rdev->mddev->recovery);
1808         }
1809         /* need to record an error - either for the block or the device */
1810         if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1811                 md_error(rdev->mddev, rdev);
1812         return 0;
1813 }
1814
1815 static int fix_sync_read_error(struct r1bio *r1_bio)
1816 {
1817         /* Try some synchronous reads of other devices to get
1818          * good data, much like with normal read errors.  Only
1819          * read into the pages we already have so we don't
1820          * need to re-issue the read request.
1821          * We don't need to freeze the array, because being in an
1822          * active sync request, there is no normal IO, and
1823          * no overlapping syncs.
1824          * We don't need to check is_badblock() again as we
1825          * made sure that anything with a bad block in range
1826          * will have bi_end_io clear.
1827          */
1828         struct mddev *mddev = r1_bio->mddev;
1829         struct r1conf *conf = mddev->private;
1830         struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1831         sector_t sect = r1_bio->sector;
1832         int sectors = r1_bio->sectors;
1833         int idx = 0;
1834
1835         while(sectors) {
1836                 int s = sectors;
1837                 int d = r1_bio->read_disk;
1838                 int success = 0;
1839                 struct md_rdev *rdev;
1840                 int start;
1841
1842                 if (s > (PAGE_SIZE>>9))
1843                         s = PAGE_SIZE >> 9;
1844                 do {
1845                         if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1846                                 /* No rcu protection needed here devices
1847                                  * can only be removed when no resync is
1848                                  * active, and resync is currently active
1849                                  */
1850                                 rdev = conf->mirrors[d].rdev;
1851                                 if (sync_page_io(rdev, sect, s<<9,
1852                                                  bio->bi_io_vec[idx].bv_page,
1853                                                  READ, false)) {
1854                                         success = 1;
1855                                         break;
1856                                 }
1857                         }
1858                         d++;
1859                         if (d == conf->raid_disks * 2)
1860                                 d = 0;
1861                 } while (!success && d != r1_bio->read_disk);
1862
1863                 if (!success) {
1864                         char b[BDEVNAME_SIZE];
1865                         int abort = 0;
1866                         /* Cannot read from anywhere, this block is lost.
1867                          * Record a bad block on each device.  If that doesn't
1868                          * work just disable and interrupt the recovery.
1869                          * Don't fail devices as that won't really help.
1870                          */
1871                         printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
1872                                " for block %llu\n",
1873                                mdname(mddev),
1874                                bdevname(bio->bi_bdev, b),
1875                                (unsigned long long)r1_bio->sector);
1876                         for (d = 0; d < conf->raid_disks * 2; d++) {
1877                                 rdev = conf->mirrors[d].rdev;
1878                                 if (!rdev || test_bit(Faulty, &rdev->flags))
1879                                         continue;
1880                                 if (!rdev_set_badblocks(rdev, sect, s, 0))
1881                                         abort = 1;
1882                         }
1883                         if (abort) {
1884                                 conf->recovery_disabled =
1885                                         mddev->recovery_disabled;
1886                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1887                                 md_done_sync(mddev, r1_bio->sectors, 0);
1888                                 put_buf(r1_bio);
1889                                 return 0;
1890                         }
1891                         /* Try next page */
1892                         sectors -= s;
1893                         sect += s;
1894                         idx++;
1895                         continue;
1896                 }
1897
1898                 start = d;
1899                 /* write it back and re-read */
1900                 while (d != r1_bio->read_disk) {
1901                         if (d == 0)
1902                                 d = conf->raid_disks * 2;
1903                         d--;
1904                         if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1905                                 continue;
1906                         rdev = conf->mirrors[d].rdev;
1907                         if (r1_sync_page_io(rdev, sect, s,
1908                                             bio->bi_io_vec[idx].bv_page,
1909                                             WRITE) == 0) {
1910                                 r1_bio->bios[d]->bi_end_io = NULL;
1911                                 rdev_dec_pending(rdev, mddev);
1912                         }
1913                 }
1914                 d = start;
1915                 while (d != r1_bio->read_disk) {
1916                         if (d == 0)
1917                                 d = conf->raid_disks * 2;
1918                         d--;
1919                         if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1920                                 continue;
1921                         rdev = conf->mirrors[d].rdev;
1922                         if (r1_sync_page_io(rdev, sect, s,
1923                                             bio->bi_io_vec[idx].bv_page,
1924                                             READ) != 0)
1925                                 atomic_add(s, &rdev->corrected_errors);
1926                 }
1927                 sectors -= s;
1928                 sect += s;
1929                 idx ++;
1930         }
1931         set_bit(R1BIO_Uptodate, &r1_bio->state);
1932         set_bit(BIO_UPTODATE, &bio->bi_flags);
1933         return 1;
1934 }
1935
1936 static int process_checks(struct r1bio *r1_bio)
1937 {
1938         /* We have read all readable devices.  If we haven't
1939          * got the block, then there is no hope left.
1940          * If we have, then we want to do a comparison
1941          * and skip the write if everything is the same.
1942          * If any blocks failed to read, then we need to
1943          * attempt an over-write
1944          */
1945         struct mddev *mddev = r1_bio->mddev;
1946         struct r1conf *conf = mddev->private;
1947         int primary;
1948         int i;
1949         int vcnt;
1950
1951         /* Fix variable parts of all bios */
1952         vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
1953         for (i = 0; i < conf->raid_disks * 2; i++) {
1954                 int j;
1955                 int size;
1956                 struct bio *b = r1_bio->bios[i];
1957                 if (b->bi_end_io != end_sync_read)
1958                         continue;
1959                 /* fixup the bio for reuse */
1960                 bio_reset(b);
1961                 b->bi_vcnt = vcnt;
1962                 b->bi_size = r1_bio->sectors << 9;
1963                 b->bi_sector = r1_bio->sector +
1964                         conf->mirrors[i].rdev->data_offset;
1965                 b->bi_bdev = conf->mirrors[i].rdev->bdev;
1966                 b->bi_end_io = end_sync_read;
1967                 b->bi_private = r1_bio;
1968
1969                 size = b->bi_size;
1970                 for (j = 0; j < vcnt ; j++) {
1971                         struct bio_vec *bi;
1972                         bi = &b->bi_io_vec[j];
1973                         bi->bv_offset = 0;
1974                         if (size > PAGE_SIZE)
1975                                 bi->bv_len = PAGE_SIZE;
1976                         else
1977                                 bi->bv_len = size;
1978                         size -= PAGE_SIZE;
1979                 }
1980         }
1981         for (primary = 0; primary < conf->raid_disks * 2; primary++)
1982                 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1983                     test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1984                         r1_bio->bios[primary]->bi_end_io = NULL;
1985                         rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1986                         break;
1987                 }
1988         r1_bio->read_disk = primary;
1989         for (i = 0; i < conf->raid_disks * 2; i++) {
1990                 int j;
1991                 struct bio *pbio = r1_bio->bios[primary];
1992                 struct bio *sbio = r1_bio->bios[i];
1993
1994                 if (sbio->bi_end_io != end_sync_read)
1995                         continue;
1996
1997                 if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
1998                         for (j = vcnt; j-- ; ) {
1999                                 struct page *p, *s;
2000                                 p = pbio->bi_io_vec[j].bv_page;
2001                                 s = sbio->bi_io_vec[j].bv_page;
2002                                 if (memcmp(page_address(p),
2003                                            page_address(s),
2004                                            sbio->bi_io_vec[j].bv_len))
2005                                         break;
2006                         }
2007                 } else
2008                         j = 0;
2009                 if (j >= 0)
2010                         atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2011                 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2012                               && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
2013                         /* No need to write to this device. */
2014                         sbio->bi_end_io = NULL;
2015                         rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2016                         continue;
2017                 }
2018
2019                 bio_copy_data(sbio, pbio);
2020         }
2021         return 0;
2022 }
2023
2024 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2025 {
2026         struct r1conf *conf = mddev->private;
2027         int i;
2028         int disks = conf->raid_disks * 2;
2029         struct bio *bio, *wbio;
2030
2031         bio = r1_bio->bios[r1_bio->read_disk];
2032
2033         if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2034                 /* ouch - failed to read all of that. */
2035                 if (!fix_sync_read_error(r1_bio))
2036                         return;
2037
2038         if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2039                 if (process_checks(r1_bio) < 0)
2040                         return;
2041         /*
2042          * schedule writes
2043          */
2044         atomic_set(&r1_bio->remaining, 1);
2045         for (i = 0; i < disks ; i++) {
2046                 wbio = r1_bio->bios[i];
2047                 if (wbio->bi_end_io == NULL ||
2048                     (wbio->bi_end_io == end_sync_read &&
2049                      (i == r1_bio->read_disk ||
2050                       !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2051                         continue;
2052
2053                 wbio->bi_rw = WRITE;
2054                 wbio->bi_end_io = end_sync_write;
2055                 atomic_inc(&r1_bio->remaining);
2056                 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2057
2058                 generic_make_request(wbio);
2059         }
2060
2061         if (atomic_dec_and_test(&r1_bio->remaining)) {
2062                 /* if we're here, all write(s) have completed, so clean up */
2063                 int s = r1_bio->sectors;
2064                 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2065                     test_bit(R1BIO_WriteError, &r1_bio->state))
2066                         reschedule_retry(r1_bio);
2067                 else {
2068                         put_buf(r1_bio);
2069                         md_done_sync(mddev, s, 1);
2070                 }
2071         }
2072 }
2073
2074 /*
2075  * This is a kernel thread which:
2076  *
2077  *      1.      Retries failed read operations on working mirrors.
2078  *      2.      Updates the raid superblock when problems encounter.
2079  *      3.      Performs writes following reads for array synchronising.
2080  */
2081
2082 static void fix_read_error(struct r1conf *conf, int read_disk,
2083                            sector_t sect, int sectors)
2084 {
2085         struct mddev *mddev = conf->mddev;
2086         while(sectors) {
2087                 int s = sectors;
2088                 int d = read_disk;
2089                 int success = 0;
2090                 int start;
2091                 struct md_rdev *rdev;
2092
2093                 if (s > (PAGE_SIZE>>9))
2094                         s = PAGE_SIZE >> 9;
2095
2096                 do {
2097                         /* Note: no rcu protection needed here
2098                          * as this is synchronous in the raid1d thread
2099                          * which is the thread that might remove
2100                          * a device.  If raid1d ever becomes multi-threaded....
2101                          */
2102                         sector_t first_bad;
2103                         int bad_sectors;
2104
2105                         rdev = conf->mirrors[d].rdev;
2106                         if (rdev &&
2107                             (test_bit(In_sync, &rdev->flags) ||
2108                              (!test_bit(Faulty, &rdev->flags) &&
2109                               rdev->recovery_offset >= sect + s)) &&
2110                             is_badblock(rdev, sect, s,
2111                                         &first_bad, &bad_sectors) == 0 &&
2112                             sync_page_io(rdev, sect, s<<9,
2113                                          conf->tmppage, READ, false))
2114                                 success = 1;
2115                         else {
2116                                 d++;
2117                                 if (d == conf->raid_disks * 2)
2118                                         d = 0;
2119                         }
2120                 } while (!success && d != read_disk);
2121
2122                 if (!success) {
2123                         /* Cannot read from anywhere - mark it bad */
2124                         struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2125                         if (!rdev_set_badblocks(rdev, sect, s, 0))
2126                                 md_error(mddev, rdev);
2127                         break;
2128                 }
2129                 /* write it back and re-read */
2130                 start = d;
2131                 while (d != read_disk) {
2132                         if (d==0)
2133                                 d = conf->raid_disks * 2;
2134                         d--;
2135                         rdev = conf->mirrors[d].rdev;
2136                         if (rdev &&
2137                             test_bit(In_sync, &rdev->flags))
2138                                 r1_sync_page_io(rdev, sect, s,
2139                                                 conf->tmppage, WRITE);
2140                 }
2141                 d = start;
2142                 while (d != read_disk) {
2143                         char b[BDEVNAME_SIZE];
2144                         if (d==0)
2145                                 d = conf->raid_disks * 2;
2146                         d--;
2147                         rdev = conf->mirrors[d].rdev;
2148                         if (rdev &&
2149                             test_bit(In_sync, &rdev->flags)) {
2150                                 if (r1_sync_page_io(rdev, sect, s,
2151                                                     conf->tmppage, READ)) {
2152                                         atomic_add(s, &rdev->corrected_errors);
2153                                         printk(KERN_INFO
2154                                                "md/raid1:%s: read error corrected "
2155                                                "(%d sectors at %llu on %s)\n",
2156                                                mdname(mddev), s,
2157                                                (unsigned long long)(sect +
2158                                                    rdev->data_offset),
2159                                                bdevname(rdev->bdev, b));
2160                                 }
2161                         }
2162                 }
2163                 sectors -= s;
2164                 sect += s;
2165         }
2166 }
2167
2168 static int narrow_write_error(struct r1bio *r1_bio, int i)
2169 {
2170         struct mddev *mddev = r1_bio->mddev;
2171         struct r1conf *conf = mddev->private;
2172         struct md_rdev *rdev = conf->mirrors[i].rdev;
2173
2174         /* bio has the data to be written to device 'i' where
2175          * we just recently had a write error.
2176          * We repeatedly clone the bio and trim down to one block,
2177          * then try the write.  Where the write fails we record
2178          * a bad block.
2179          * It is conceivable that the bio doesn't exactly align with
2180          * blocks.  We must handle this somehow.
2181          *
2182          * We currently own a reference on the rdev.
2183          */
2184
2185         int block_sectors;
2186         sector_t sector;
2187         int sectors;
2188         int sect_to_write = r1_bio->sectors;
2189         int ok = 1;
2190
2191         if (rdev->badblocks.shift < 0)
2192                 return 0;
2193
2194         block_sectors = 1 << rdev->badblocks.shift;
2195         sector = r1_bio->sector;
2196         sectors = ((sector + block_sectors)
2197                    & ~(sector_t)(block_sectors - 1))
2198                 - sector;
2199
2200         while (sect_to_write) {
2201                 struct bio *wbio;
2202                 if (sectors > sect_to_write)
2203                         sectors = sect_to_write;
2204                 /* Write at 'sector' for 'sectors'*/
2205
2206                 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2207                         unsigned vcnt = r1_bio->behind_page_count;
2208                         struct bio_vec *vec = r1_bio->behind_bvecs;
2209
2210                         while (!vec->bv_page) {
2211                                 vec++;
2212                                 vcnt--;
2213                         }
2214
2215                         wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
2216                         memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
2217
2218                         wbio->bi_vcnt = vcnt;
2219                 } else {
2220                         wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2221                 }
2222
2223                 wbio->bi_rw = WRITE;
2224                 wbio->bi_sector = r1_bio->sector;
2225                 wbio->bi_size = r1_bio->sectors << 9;
2226
2227                 bio_trim(wbio, sector - r1_bio->sector, sectors);
2228                 wbio->bi_sector += rdev->data_offset;
2229                 wbio->bi_bdev = rdev->bdev;
2230                 if (submit_bio_wait(WRITE, wbio) == 0)
2231                         /* failure! */
2232                         ok = rdev_set_badblocks(rdev, sector,
2233                                                 sectors, 0)
2234                                 && ok;
2235
2236                 bio_put(wbio);
2237                 sect_to_write -= sectors;
2238                 sector += sectors;
2239                 sectors = block_sectors;
2240         }
2241         return ok;
2242 }
2243
2244 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2245 {
2246         int m;
2247         int s = r1_bio->sectors;
2248         for (m = 0; m < conf->raid_disks * 2 ; m++) {
2249                 struct md_rdev *rdev = conf->mirrors[m].rdev;
2250                 struct bio *bio = r1_bio->bios[m];
2251                 if (bio->bi_end_io == NULL)
2252                         continue;
2253                 if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
2254                     test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2255                         rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2256                 }
2257                 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
2258                     test_bit(R1BIO_WriteError, &r1_bio->state)) {
2259                         if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2260                                 md_error(conf->mddev, rdev);
2261                 }
2262         }
2263         put_buf(r1_bio);
2264         md_done_sync(conf->mddev, s, 1);
2265 }
2266
2267 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2268 {
2269         int m;
2270         for (m = 0; m < conf->raid_disks * 2 ; m++)
2271                 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2272                         struct md_rdev *rdev = conf->mirrors[m].rdev;
2273                         rdev_clear_badblocks(rdev,
2274                                              r1_bio->sector,
2275                                              r1_bio->sectors, 0);
2276                         rdev_dec_pending(rdev, conf->mddev);
2277                 } else if (r1_bio->bios[m] != NULL) {
2278                         /* This drive got a write error.  We need to
2279                          * narrow down and record precise write
2280                          * errors.
2281                          */
2282                         if (!narrow_write_error(r1_bio, m)) {
2283                                 md_error(conf->mddev,
2284                                          conf->mirrors[m].rdev);
2285                                 /* an I/O failed, we can't clear the bitmap */
2286                                 set_bit(R1BIO_Degraded, &r1_bio->state);
2287                         }
2288                         rdev_dec_pending(conf->mirrors[m].rdev,
2289                                          conf->mddev);
2290                 }
2291         if (test_bit(R1BIO_WriteError, &r1_bio->state))
2292                 close_write(r1_bio);
2293         raid_end_bio_io(r1_bio);
2294 }
2295
2296 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2297 {
2298         int disk;
2299         int max_sectors;
2300         struct mddev *mddev = conf->mddev;
2301         struct bio *bio;
2302         char b[BDEVNAME_SIZE];
2303         struct md_rdev *rdev;
2304
2305         clear_bit(R1BIO_ReadError, &r1_bio->state);
2306         /* we got a read error. Maybe the drive is bad.  Maybe just
2307          * the block and we can fix it.
2308          * We freeze all other IO, and try reading the block from
2309          * other devices.  When we find one, we re-write
2310          * and check it that fixes the read error.
2311          * This is all done synchronously while the array is
2312          * frozen
2313          */
2314         if (mddev->ro == 0) {
2315                 freeze_array(conf, 1);
2316                 fix_read_error(conf, r1_bio->read_disk,
2317                                r1_bio->sector, r1_bio->sectors);
2318                 unfreeze_array(conf);
2319         } else
2320                 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
2321         rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
2322
2323         bio = r1_bio->bios[r1_bio->read_disk];
2324         bdevname(bio->bi_bdev, b);
2325 read_more:
2326         disk = read_balance(conf, r1_bio, &max_sectors);
2327         if (disk == -1) {
2328                 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
2329                        " read error for block %llu\n",
2330                        mdname(mddev), b, (unsigned long long)r1_bio->sector);
2331                 raid_end_bio_io(r1_bio);
2332         } else {
2333                 const unsigned long do_sync
2334                         = r1_bio->master_bio->bi_rw & REQ_SYNC;
2335                 if (bio) {
2336                         r1_bio->bios[r1_bio->read_disk] =
2337                                 mddev->ro ? IO_BLOCKED : NULL;
2338                         bio_put(bio);
2339                 }
2340                 r1_bio->read_disk = disk;
2341                 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2342                 bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors);
2343                 r1_bio->bios[r1_bio->read_disk] = bio;
2344                 rdev = conf->mirrors[disk].rdev;
2345                 printk_ratelimited(KERN_ERR
2346                                    "md/raid1:%s: redirecting sector %llu"
2347                                    " to other mirror: %s\n",
2348                                    mdname(mddev),
2349                                    (unsigned long long)r1_bio->sector,
2350                                    bdevname(rdev->bdev, b));
2351                 bio->bi_sector = r1_bio->sector + rdev->data_offset;
2352                 bio->bi_bdev = rdev->bdev;
2353                 bio->bi_end_io = raid1_end_read_request;
2354                 bio->bi_rw = READ | do_sync;
2355                 bio->bi_private = r1_bio;
2356                 if (max_sectors < r1_bio->sectors) {
2357                         /* Drat - have to split this up more */
2358                         struct bio *mbio = r1_bio->master_bio;
2359                         int sectors_handled = (r1_bio->sector + max_sectors
2360                                                - mbio->bi_sector);
2361                         r1_bio->sectors = max_sectors;
2362                         spin_lock_irq(&conf->device_lock);
2363                         if (mbio->bi_phys_segments == 0)
2364                                 mbio->bi_phys_segments = 2;
2365                         else
2366                                 mbio->bi_phys_segments++;
2367                         spin_unlock_irq(&conf->device_lock);
2368                         generic_make_request(bio);
2369                         bio = NULL;
2370
2371                         r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
2372
2373                         r1_bio->master_bio = mbio;
2374                         r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
2375                         r1_bio->state = 0;
2376                         set_bit(R1BIO_ReadError, &r1_bio->state);
2377                         r1_bio->mddev = mddev;
2378                         r1_bio->sector = mbio->bi_sector + sectors_handled;
2379
2380                         goto read_more;
2381                 } else
2382                         generic_make_request(bio);
2383         }
2384 }
2385
2386 static void raid1d(struct md_thread *thread)
2387 {
2388         struct mddev *mddev = thread->mddev;
2389         struct r1bio *r1_bio;
2390         unsigned long flags;
2391         struct r1conf *conf = mddev->private;
2392         struct list_head *head = &conf->retry_list;
2393         struct blk_plug plug;
2394
2395         md_check_recovery(mddev);
2396
2397         blk_start_plug(&plug);
2398         for (;;) {
2399
2400                 flush_pending_writes(conf);
2401
2402                 spin_lock_irqsave(&conf->device_lock, flags);
2403                 if (list_empty(head)) {
2404                         spin_unlock_irqrestore(&conf->device_lock, flags);
2405                         break;
2406                 }
2407                 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2408                 list_del(head->prev);
2409                 conf->nr_queued--;
2410                 spin_unlock_irqrestore(&conf->device_lock, flags);
2411
2412                 mddev = r1_bio->mddev;
2413                 conf = mddev->private;
2414                 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2415                         if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2416                             test_bit(R1BIO_WriteError, &r1_bio->state))
2417                                 handle_sync_write_finished(conf, r1_bio);
2418                         else
2419                                 sync_request_write(mddev, r1_bio);
2420                 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2421                            test_bit(R1BIO_WriteError, &r1_bio->state))
2422                         handle_write_finished(conf, r1_bio);
2423                 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2424                         handle_read_error(conf, r1_bio);
2425                 else
2426                         /* just a partial read to be scheduled from separate
2427                          * context
2428                          */
2429                         generic_make_request(r1_bio->bios[r1_bio->read_disk]);
2430
2431                 cond_resched();
2432                 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2433                         md_check_recovery(mddev);
2434         }
2435         blk_finish_plug(&plug);
2436 }
2437
2438
2439 static int init_resync(struct r1conf *conf)
2440 {
2441         int buffs;
2442
2443         buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2444         BUG_ON(conf->r1buf_pool);
2445         conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
2446                                           conf->poolinfo);
2447         if (!conf->r1buf_pool)
2448                 return -ENOMEM;
2449         conf->next_resync = 0;
2450         return 0;
2451 }
2452
2453 /*
2454  * perform a "sync" on one "block"
2455  *
2456  * We need to make sure that no normal I/O request - particularly write
2457  * requests - conflict with active sync requests.
2458  *
2459  * This is achieved by tracking pending requests and a 'barrier' concept
2460  * that can be installed to exclude normal IO requests.
2461  */
2462
2463 static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
2464 {
2465         struct r1conf *conf = mddev->private;
2466         struct r1bio *r1_bio;
2467         struct bio *bio;
2468         sector_t max_sector, nr_sectors;
2469         int disk = -1;
2470         int i;
2471         int wonly = -1;
2472         int write_targets = 0, read_targets = 0;
2473         sector_t sync_blocks;
2474         int still_degraded = 0;
2475         int good_sectors = RESYNC_SECTORS;
2476         int min_bad = 0; /* number of sectors that are bad in all devices */
2477
2478         if (!conf->r1buf_pool)
2479                 if (init_resync(conf))
2480                         return 0;
2481
2482         max_sector = mddev->dev_sectors;
2483         if (sector_nr >= max_sector) {
2484                 /* If we aborted, we need to abort the
2485                  * sync on the 'current' bitmap chunk (there will
2486                  * only be one in raid1 resync.
2487                  * We can find the current addess in mddev->curr_resync
2488                  */
2489                 if (mddev->curr_resync < max_sector) /* aborted */
2490                         bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2491                                                 &sync_blocks, 1);
2492                 else /* completed sync */
2493                         conf->fullsync = 0;
2494
2495                 bitmap_close_sync(mddev->bitmap);
2496                 close_sync(conf);
2497                 return 0;
2498         }
2499
2500         if (mddev->bitmap == NULL &&
2501             mddev->recovery_cp == MaxSector &&
2502             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2503             conf->fullsync == 0) {
2504                 *skipped = 1;
2505                 return max_sector - sector_nr;
2506         }
2507         /* before building a request, check if we can skip these blocks..
2508          * This call the bitmap_start_sync doesn't actually record anything
2509          */
2510         if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2511             !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2512                 /* We can skip this block, and probably several more */
2513                 *skipped = 1;
2514                 return sync_blocks;
2515         }
2516         /*
2517          * If there is non-resync activity waiting for a turn,
2518          * and resync is going fast enough,
2519          * then let it though before starting on this new sync request.
2520          */
2521         if (!go_faster && conf->nr_waiting)
2522                 msleep_interruptible(1000);
2523
2524         bitmap_cond_end_sync(mddev->bitmap, sector_nr);
2525         r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2526         raise_barrier(conf);
2527
2528         conf->next_resync = sector_nr;
2529
2530         rcu_read_lock();
2531         /*
2532          * If we get a correctably read error during resync or recovery,
2533          * we might want to read from a different device.  So we
2534          * flag all drives that could conceivably be read from for READ,
2535          * and any others (which will be non-In_sync devices) for WRITE.
2536          * If a read fails, we try reading from something else for which READ
2537          * is OK.
2538          */
2539
2540         r1_bio->mddev = mddev;
2541         r1_bio->sector = sector_nr;
2542         r1_bio->state = 0;
2543         set_bit(R1BIO_IsSync, &r1_bio->state);
2544
2545         for (i = 0; i < conf->raid_disks * 2; i++) {
2546                 struct md_rdev *rdev;
2547                 bio = r1_bio->bios[i];
2548                 bio_reset(bio);
2549
2550                 rdev = rcu_dereference(conf->mirrors[i].rdev);
2551                 if (rdev == NULL ||
2552                     test_bit(Faulty, &rdev->flags)) {
2553                         if (i < conf->raid_disks)
2554                                 still_degraded = 1;
2555                 } else if (!test_bit(In_sync, &rdev->flags)) {
2556                         bio->bi_rw = WRITE;
2557                         bio->bi_end_io = end_sync_write;
2558                         write_targets ++;
2559                 } else {
2560                         /* may need to read from here */
2561                         sector_t first_bad = MaxSector;
2562                         int bad_sectors;
2563
2564                         if (is_badblock(rdev, sector_nr, good_sectors,
2565                                         &first_bad, &bad_sectors)) {
2566                                 if (first_bad > sector_nr)
2567                                         good_sectors = first_bad - sector_nr;
2568                                 else {
2569                                         bad_sectors -= (sector_nr - first_bad);
2570                                         if (min_bad == 0 ||
2571                                             min_bad > bad_sectors)
2572                                                 min_bad = bad_sectors;
2573                                 }
2574                         }
2575                         if (sector_nr < first_bad) {
2576                                 if (test_bit(WriteMostly, &rdev->flags)) {
2577                                         if (wonly < 0)
2578                                                 wonly = i;
2579                                 } else {
2580                                         if (disk < 0)
2581                                                 disk = i;
2582                                 }
2583                                 bio->bi_rw = READ;
2584                                 bio->bi_end_io = end_sync_read;
2585                                 read_targets++;
2586                         } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2587                                 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2588                                 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2589                                 /*
2590                                  * The device is suitable for reading (InSync),
2591                                  * but has bad block(s) here. Let's try to correct them,
2592                                  * if we are doing resync or repair. Otherwise, leave
2593                                  * this device alone for this sync request.
2594                                  */
2595                                 bio->bi_rw = WRITE;
2596                                 bio->bi_end_io = end_sync_write;
2597                                 write_targets++;
2598                         }
2599                 }
2600                 if (bio->bi_end_io) {
2601                         atomic_inc(&rdev->nr_pending);
2602                         bio->bi_sector = sector_nr + rdev->data_offset;
2603                         bio->bi_bdev = rdev->bdev;
2604                         bio->bi_private = r1_bio;
2605                 }
2606         }
2607         rcu_read_unlock();
2608         if (disk < 0)
2609                 disk = wonly;
2610         r1_bio->read_disk = disk;
2611
2612         if (read_targets == 0 && min_bad > 0) {
2613                 /* These sectors are bad on all InSync devices, so we
2614                  * need to mark them bad on all write targets
2615                  */
2616                 int ok = 1;
2617                 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2618                         if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2619                                 struct md_rdev *rdev = conf->mirrors[i].rdev;
2620                                 ok = rdev_set_badblocks(rdev, sector_nr,
2621                                                         min_bad, 0
2622                                         ) && ok;
2623                         }
2624                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2625                 *skipped = 1;
2626                 put_buf(r1_bio);
2627
2628                 if (!ok) {
2629                         /* Cannot record the badblocks, so need to
2630                          * abort the resync.
2631                          * If there are multiple read targets, could just
2632                          * fail the really bad ones ???
2633                          */
2634                         conf->recovery_disabled = mddev->recovery_disabled;
2635                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2636                         return 0;
2637                 } else
2638                         return min_bad;
2639
2640         }
2641         if (min_bad > 0 && min_bad < good_sectors) {
2642                 /* only resync enough to reach the next bad->good
2643                  * transition */
2644                 good_sectors = min_bad;
2645         }
2646
2647         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2648                 /* extra read targets are also write targets */
2649                 write_targets += read_targets-1;
2650
2651         if (write_targets == 0 || read_targets == 0) {
2652                 /* There is nowhere to write, so all non-sync
2653                  * drives must be failed - so we are finished
2654                  */
2655                 sector_t rv;
2656                 if (min_bad > 0)
2657                         max_sector = sector_nr + min_bad;
2658                 rv = max_sector - sector_nr;
2659                 *skipped = 1;
2660                 put_buf(r1_bio);
2661                 return rv;
2662         }
2663
2664         if (max_sector > mddev->resync_max)
2665                 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2666         if (max_sector > sector_nr + good_sectors)
2667                 max_sector = sector_nr + good_sectors;
2668         nr_sectors = 0;
2669         sync_blocks = 0;
2670         do {
2671                 struct page *page;
2672                 int len = PAGE_SIZE;
2673                 if (sector_nr + (len>>9) > max_sector)
2674                         len = (max_sector - sector_nr) << 9;
2675                 if (len == 0)
2676                         break;
2677                 if (sync_blocks == 0) {
2678                         if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2679                                                &sync_blocks, still_degraded) &&
2680                             !conf->fullsync &&
2681                             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2682                                 break;
2683                         BUG_ON(sync_blocks < (PAGE_SIZE>>9));
2684                         if ((len >> 9) > sync_blocks)
2685                                 len = sync_blocks<<9;
2686                 }
2687
2688                 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2689                         bio = r1_bio->bios[i];
2690                         if (bio->bi_end_io) {
2691                                 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
2692                                 if (bio_add_page(bio, page, len, 0) == 0) {
2693                                         /* stop here */
2694                                         bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
2695                                         while (i > 0) {
2696                                                 i--;
2697                                                 bio = r1_bio->bios[i];
2698                                                 if (bio->bi_end_io==NULL)
2699                                                         continue;
2700                                                 /* remove last page from this bio */
2701                                                 bio->bi_vcnt--;
2702                                                 bio->bi_size -= len;
2703                                                 bio->bi_flags &= ~(1<< BIO_SEG_VALID);
2704                                         }
2705                                         goto bio_full;
2706                                 }
2707                         }
2708                 }
2709                 nr_sectors += len>>9;
2710                 sector_nr += len>>9;
2711                 sync_blocks -= (len>>9);
2712         } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
2713  bio_full:
2714         r1_bio->sectors = nr_sectors;
2715
2716         /* For a user-requested sync, we read all readable devices and do a
2717          * compare
2718          */
2719         if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2720                 atomic_set(&r1_bio->remaining, read_targets);
2721                 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2722                         bio = r1_bio->bios[i];
2723                         if (bio->bi_end_io == end_sync_read) {
2724                                 read_targets--;
2725                                 md_sync_acct(bio->bi_bdev, nr_sectors);
2726                                 generic_make_request(bio);
2727                         }
2728                 }
2729         } else {
2730                 atomic_set(&r1_bio->remaining, 1);
2731                 bio = r1_bio->bios[r1_bio->read_disk];
2732                 md_sync_acct(bio->bi_bdev, nr_sectors);
2733                 generic_make_request(bio);
2734
2735         }
2736         return nr_sectors;
2737 }
2738
2739 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2740 {
2741         if (sectors)
2742                 return sectors;
2743
2744         return mddev->dev_sectors;
2745 }
2746
2747 static struct r1conf *setup_conf(struct mddev *mddev)
2748 {
2749         struct r1conf *conf;
2750         int i;
2751         struct raid1_info *disk;
2752         struct md_rdev *rdev;
2753         int err = -ENOMEM;
2754
2755         conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2756         if (!conf)
2757                 goto abort;
2758
2759         conf->mirrors = kzalloc(sizeof(struct raid1_info)
2760                                 * mddev->raid_disks * 2,
2761                                  GFP_KERNEL);
2762         if (!conf->mirrors)
2763                 goto abort;
2764
2765         conf->tmppage = alloc_page(GFP_KERNEL);
2766         if (!conf->tmppage)
2767                 goto abort;
2768
2769         conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2770         if (!conf->poolinfo)
2771                 goto abort;
2772         conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2773         conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2774                                           r1bio_pool_free,
2775                                           conf->poolinfo);
2776         if (!conf->r1bio_pool)
2777                 goto abort;
2778
2779         conf->poolinfo->mddev = mddev;
2780
2781         err = -EINVAL;
2782         spin_lock_init(&conf->device_lock);
2783         rdev_for_each(rdev, mddev) {
2784                 struct request_queue *q;
2785                 int disk_idx = rdev->raid_disk;
2786                 if (disk_idx >= mddev->raid_disks
2787                     || disk_idx < 0)
2788                         continue;
2789                 if (test_bit(Replacement, &rdev->flags))
2790                         disk = conf->mirrors + mddev->raid_disks + disk_idx;
2791                 else
2792                         disk = conf->mirrors + disk_idx;
2793
2794                 if (disk->rdev)
2795                         goto abort;
2796                 disk->rdev = rdev;
2797                 q = bdev_get_queue(rdev->bdev);
2798                 if (q->merge_bvec_fn)
2799                         mddev->merge_check_needed = 1;
2800
2801                 disk->head_position = 0;
2802                 disk->seq_start = MaxSector;
2803         }
2804         conf->raid_disks = mddev->raid_disks;
2805         conf->mddev = mddev;
2806         INIT_LIST_HEAD(&conf->retry_list);
2807
2808         spin_lock_init(&conf->resync_lock);
2809         init_waitqueue_head(&conf->wait_barrier);
2810
2811         bio_list_init(&conf->pending_bio_list);
2812         conf->pending_count = 0;
2813         conf->recovery_disabled = mddev->recovery_disabled - 1;
2814
2815         conf->start_next_window = MaxSector;
2816         conf->current_window_requests = conf->next_window_requests = 0;
2817
2818         err = -EIO;
2819         for (i = 0; i < conf->raid_disks * 2; i++) {
2820
2821                 disk = conf->mirrors + i;
2822
2823                 if (i < conf->raid_disks &&
2824                     disk[conf->raid_disks].rdev) {
2825                         /* This slot has a replacement. */
2826                         if (!disk->rdev) {
2827                                 /* No original, just make the replacement
2828                                  * a recovering spare
2829                                  */
2830                                 disk->rdev =
2831                                         disk[conf->raid_disks].rdev;
2832                                 disk[conf->raid_disks].rdev = NULL;
2833                         } else if (!test_bit(In_sync, &disk->rdev->flags))
2834                                 /* Original is not in_sync - bad */
2835                                 goto abort;
2836                 }
2837
2838                 if (!disk->rdev ||
2839                     !test_bit(In_sync, &disk->rdev->flags)) {
2840                         disk->head_position = 0;
2841                         if (disk->rdev &&
2842                             (disk->rdev->saved_raid_disk < 0))
2843                                 conf->fullsync = 1;
2844                 }
2845         }
2846
2847         err = -ENOMEM;
2848         conf->thread = md_register_thread(raid1d, mddev, "raid1");
2849         if (!conf->thread) {
2850                 printk(KERN_ERR
2851                        "md/raid1:%s: couldn't allocate thread\n",
2852                        mdname(mddev));
2853                 goto abort;
2854         }
2855
2856         return conf;
2857
2858  abort:
2859         if (conf) {
2860                 if (conf->r1bio_pool)
2861                         mempool_destroy(conf->r1bio_pool);
2862                 kfree(conf->mirrors);
2863                 safe_put_page(conf->tmppage);
2864                 kfree(conf->poolinfo);
2865                 kfree(conf);
2866         }
2867         return ERR_PTR(err);
2868 }
2869
2870 static int stop(struct mddev *mddev);
2871 static int run(struct mddev *mddev)
2872 {
2873         struct r1conf *conf;
2874         int i;
2875         struct md_rdev *rdev;
2876         int ret;
2877         bool discard_supported = false;
2878
2879         if (mddev->level != 1) {
2880                 printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
2881                        mdname(mddev), mddev->level);
2882                 return -EIO;
2883         }
2884         if (mddev->reshape_position != MaxSector) {
2885                 printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
2886                        mdname(mddev));
2887                 return -EIO;
2888         }
2889         /*
2890          * copy the already verified devices into our private RAID1
2891          * bookkeeping area. [whatever we allocate in run(),
2892          * should be freed in stop()]
2893          */
2894         if (mddev->private == NULL)
2895                 conf = setup_conf(mddev);
2896         else
2897                 conf = mddev->private;
2898
2899         if (IS_ERR(conf))
2900                 return PTR_ERR(conf);
2901
2902         if (mddev->queue)
2903                 blk_queue_max_write_same_sectors(mddev->queue, 0);
2904
2905         rdev_for_each(rdev, mddev) {
2906                 if (!mddev->gendisk)
2907                         continue;
2908                 disk_stack_limits(mddev->gendisk, rdev->bdev,
2909                                   rdev->data_offset << 9);
2910                 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
2911                         discard_supported = true;
2912         }
2913
2914         mddev->degraded = 0;
2915         for (i=0; i < conf->raid_disks; i++)
2916                 if (conf->mirrors[i].rdev == NULL ||
2917                     !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
2918                     test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2919                         mddev->degraded++;
2920
2921         if (conf->raid_disks - mddev->degraded == 1)
2922                 mddev->recovery_cp = MaxSector;
2923
2924         if (mddev->recovery_cp != MaxSector)
2925                 printk(KERN_NOTICE "md/raid1:%s: not clean"
2926                        " -- starting background reconstruction\n",
2927                        mdname(mddev));
2928         printk(KERN_INFO 
2929                 "md/raid1:%s: active with %d out of %d mirrors\n",
2930                 mdname(mddev), mddev->raid_disks - mddev->degraded, 
2931                 mddev->raid_disks);
2932
2933         /*
2934          * Ok, everything is just fine now
2935          */
2936         mddev->thread = conf->thread;
2937         conf->thread = NULL;
2938         mddev->private = conf;
2939
2940         md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2941
2942         if (mddev->queue) {
2943                 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2944                 mddev->queue->backing_dev_info.congested_data = mddev;
2945                 blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
2946
2947                 if (discard_supported)
2948                         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
2949                                                 mddev->queue);
2950                 else
2951                         queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
2952                                                   mddev->queue);
2953         }
2954
2955         ret =  md_integrity_register(mddev);
2956         if (ret)
2957                 stop(mddev);
2958         return ret;
2959 }
2960
2961 static int stop(struct mddev *mddev)
2962 {
2963         struct r1conf *conf = mddev->private;
2964         struct bitmap *bitmap = mddev->bitmap;
2965
2966         /* wait for behind writes to complete */
2967         if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
2968                 printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
2969                        mdname(mddev));
2970                 /* need to kick something here to make sure I/O goes? */
2971                 wait_event(bitmap->behind_wait,
2972                            atomic_read(&bitmap->behind_writes) == 0);
2973         }
2974
2975         freeze_array(conf, 0);
2976         unfreeze_array(conf);
2977
2978         md_unregister_thread(&mddev->thread);
2979         if (conf->r1bio_pool)
2980                 mempool_destroy(conf->r1bio_pool);
2981         kfree(conf->mirrors);
2982         safe_put_page(conf->tmppage);
2983         kfree(conf->poolinfo);
2984         kfree(conf);
2985         mddev->private = NULL;
2986         return 0;
2987 }
2988
2989 static int raid1_resize(struct mddev *mddev, sector_t sectors)
2990 {
2991         /* no resync is happening, and there is enough space
2992          * on all devices, so we can resize.
2993          * We need to make sure resync covers any new space.
2994          * If the array is shrinking we should possibly wait until
2995          * any io in the removed space completes, but it hardly seems
2996          * worth it.
2997          */
2998         sector_t newsize = raid1_size(mddev, sectors, 0);
2999         if (mddev->external_size &&
3000             mddev->array_sectors > newsize)
3001                 return -EINVAL;
3002         if (mddev->bitmap) {
3003                 int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
3004                 if (ret)
3005                         return ret;
3006         }
3007         md_set_array_sectors(mddev, newsize);
3008         set_capacity(mddev->gendisk, mddev->array_sectors);
3009         revalidate_disk(mddev->gendisk);
3010         if (sectors > mddev->dev_sectors &&
3011             mddev->recovery_cp > mddev->dev_sectors) {
3012                 mddev->recovery_cp = mddev->dev_sectors;
3013                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3014         }
3015         mddev->dev_sectors = sectors;
3016         mddev->resync_max_sectors = sectors;
3017         return 0;
3018 }
3019
3020 static int raid1_reshape(struct mddev *mddev)
3021 {
3022         /* We need to:
3023          * 1/ resize the r1bio_pool
3024          * 2/ resize conf->mirrors
3025          *
3026          * We allocate a new r1bio_pool if we can.
3027          * Then raise a device barrier and wait until all IO stops.
3028          * Then resize conf->mirrors and swap in the new r1bio pool.
3029          *
3030          * At the same time, we "pack" the devices so that all the missing
3031          * devices have the higher raid_disk numbers.
3032          */
3033         mempool_t *newpool, *oldpool;
3034         struct pool_info *newpoolinfo;
3035         struct raid1_info *newmirrors;
3036         struct r1conf *conf = mddev->private;
3037         int cnt, raid_disks;
3038         unsigned long flags;
3039         int d, d2, err;
3040
3041         /* Cannot change chunk_size, layout, or level */
3042         if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3043             mddev->layout != mddev->new_layout ||
3044             mddev->level != mddev->new_level) {
3045                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3046                 mddev->new_layout = mddev->layout;
3047                 mddev->new_level = mddev->level;
3048                 return -EINVAL;
3049         }
3050
3051         err = md_allow_write(mddev);
3052         if (err)
3053                 return err;
3054
3055         raid_disks = mddev->raid_disks + mddev->delta_disks;
3056
3057         if (raid_disks < conf->raid_disks) {
3058                 cnt=0;
3059                 for (d= 0; d < conf->raid_disks; d++)
3060                         if (conf->mirrors[d].rdev)
3061                                 cnt++;
3062                 if (cnt > raid_disks)
3063                         return -EBUSY;
3064         }
3065
3066         newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3067         if (!newpoolinfo)
3068                 return -ENOMEM;
3069         newpoolinfo->mddev = mddev;
3070         newpoolinfo->raid_disks = raid_disks * 2;
3071
3072         newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
3073                                  r1bio_pool_free, newpoolinfo);
3074         if (!newpool) {
3075                 kfree(newpoolinfo);
3076                 return -ENOMEM;
3077         }
3078         newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
3079                              GFP_KERNEL);
3080         if (!newmirrors) {
3081                 kfree(newpoolinfo);
3082                 mempool_destroy(newpool);
3083                 return -ENOMEM;
3084         }
3085
3086         freeze_array(conf, 0);
3087
3088         /* ok, everything is stopped */
3089         oldpool = conf->r1bio_pool;
3090         conf->r1bio_pool = newpool;
3091
3092         for (d = d2 = 0; d < conf->raid_disks; d++) {
3093                 struct md_rdev *rdev = conf->mirrors[d].rdev;
3094                 if (rdev && rdev->raid_disk != d2) {
3095                         sysfs_unlink_rdev(mddev, rdev);
3096                         rdev->raid_disk = d2;
3097                         sysfs_unlink_rdev(mddev, rdev);
3098                         if (sysfs_link_rdev(mddev, rdev))
3099                                 printk(KERN_WARNING
3100                                        "md/raid1:%s: cannot register rd%d\n",
3101                                        mdname(mddev), rdev->raid_disk);
3102                 }
3103                 if (rdev)
3104                         newmirrors[d2++].rdev = rdev;
3105         }
3106         kfree(conf->mirrors);
3107         conf->mirrors = newmirrors;
3108         kfree(conf->poolinfo);
3109         conf->poolinfo = newpoolinfo;
3110
3111         spin_lock_irqsave(&conf->device_lock, flags);
3112         mddev->degraded += (raid_disks - conf->raid_disks);
3113         spin_unlock_irqrestore(&conf->device_lock, flags);
3114         conf->raid_disks = mddev->raid_disks = raid_disks;
3115         mddev->delta_disks = 0;
3116
3117         unfreeze_array(conf);
3118
3119         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3120         md_wakeup_thread(mddev->thread);
3121
3122         mempool_destroy(oldpool);
3123         return 0;
3124 }
3125
3126 static void raid1_quiesce(struct mddev *mddev, int state)
3127 {
3128         struct r1conf *conf = mddev->private;
3129
3130         switch(state) {
3131         case 2: /* wake for suspend */
3132                 wake_up(&conf->wait_barrier);
3133                 break;
3134         case 1:
3135                 freeze_array(conf, 0);
3136                 break;
3137         case 0:
3138                 unfreeze_array(conf);
3139                 break;
3140         }
3141 }
3142
3143 static void *raid1_takeover(struct mddev *mddev)
3144 {
3145         /* raid1 can take over:
3146          *  raid5 with 2 devices, any layout or chunk size
3147          */
3148         if (mddev->level == 5 && mddev->raid_disks == 2) {
3149                 struct r1conf *conf;
3150                 mddev->new_level = 1;
3151                 mddev->new_layout = 0;
3152                 mddev->new_chunk_sectors = 0;
3153                 conf = setup_conf(mddev);
3154                 if (!IS_ERR(conf))
3155                         /* Array must appear to be quiesced */
3156                         conf->array_frozen = 1;
3157                 return conf;
3158         }
3159         return ERR_PTR(-EINVAL);
3160 }
3161
3162 static struct md_personality raid1_personality =
3163 {
3164         .name           = "raid1",
3165         .level          = 1,
3166         .owner          = THIS_MODULE,
3167         .make_request   = make_request,
3168         .run            = run,
3169         .stop           = stop,
3170         .status         = status,
3171         .error_handler  = error,
3172         .hot_add_disk   = raid1_add_disk,
3173         .hot_remove_disk= raid1_remove_disk,
3174         .spare_active   = raid1_spare_active,
3175         .sync_request   = sync_request,
3176         .resize         = raid1_resize,
3177         .size           = raid1_size,
3178         .check_reshape  = raid1_reshape,
3179         .quiesce        = raid1_quiesce,
3180         .takeover       = raid1_takeover,
3181 };
3182
3183 static int __init raid_init(void)
3184 {
3185         return register_md_personality(&raid1_personality);
3186 }
3187
3188 static void raid_exit(void)
3189 {
3190         unregister_md_personality(&raid1_personality);
3191 }
3192
3193 module_init(raid_init);
3194 module_exit(raid_exit);
3195 MODULE_LICENSE("GPL");
3196 MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3197 MODULE_ALIAS("md-personality-3"); /* RAID1 */
3198 MODULE_ALIAS("md-raid1");
3199 MODULE_ALIAS("md-level-1");
3200
3201 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);