patch-1.3.69 linux/drivers/block/raid0.c
Next file: linux/drivers/cdrom/optcd.c
Previous file: linux/drivers/block/md.c
Back to the patch index
Back to the overall index
- Lines: 342
- Date:
Mon Feb 26 13:51:45 1996
- Orig file:
v1.3.68/linux/drivers/block/raid0.c
- Orig date:
Thu Jan 1 02:00:00 1970
diff -u --recursive --new-file v1.3.68/linux/drivers/block/raid0.c linux/drivers/block/raid0.c
@@ -0,0 +1,341 @@
+
+/*
+ raid0.c : Multiple Devices driver for Linux
+ Copyright (C) 1994-96 Marc ZYNGIER
+ <zyngier@ufr-info-p7.ibp.fr> or
+ <maz@gloups.fdn.fr>
+
+ RAID-0 management functions.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ You should have received a copy of the GNU General Public License
+ (for example /usr/src/linux/COPYING); if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/md.h>
+#include <linux/raid0.h>
+#include <linux/malloc.h>
+
+#define MAJOR_NR MD_MAJOR
+#define MD_DRIVER
+#define MD_PERSONALITY
+
+#include <linux/blk.h>
+
+static void create_strip_zones (int minor, struct md_dev *mddev)
+{
+ int i, j, c=0;
+ int current_offset=0;
+ struct real_dev *smallest_by_zone;
+ struct raid0_data *data=(struct raid0_data *) mddev->private;
+
+ data->nr_strip_zones=1;
+
+ for (i=1; i<mddev->nb_dev; i++)
+ {
+ for (j=0; j<i; j++)
+ if (devices[minor][i].size==devices[minor][j].size)
+ {
+ c=1;
+ break;
+ }
+
+ if (!c)
+ data->nr_strip_zones++;
+
+ c=0;
+ }
+
+ data->strip_zone=kmalloc (sizeof(struct strip_zone)*data->nr_strip_zones,
+ GFP_KERNEL);
+
+ data->smallest=NULL;
+
+ for (i=0; i<data->nr_strip_zones; i++)
+ {
+ data->strip_zone[i].dev_offset=current_offset;
+ smallest_by_zone=NULL;
+ c=0;
+
+ for (j=0; j<mddev->nb_dev; j++)
+ if (devices[minor][j].size>current_offset)
+ {
+ data->strip_zone[i].dev[c++]=devices[minor]+j;
+ if (!smallest_by_zone ||
+ smallest_by_zone->size > devices[minor][j].size)
+ smallest_by_zone=devices[minor]+j;
+ }
+
+ data->strip_zone[i].nb_dev=c;
+ data->strip_zone[i].size=(smallest_by_zone->size-current_offset)*c;
+
+ if (!data->smallest ||
+ data->smallest->size > data->strip_zone[i].size)
+ data->smallest=data->strip_zone+i;
+
+ data->strip_zone[i].zone_offset=i ? (data->strip_zone[i-1].zone_offset+
+ data->strip_zone[i-1].size) : 0;
+ current_offset=smallest_by_zone->size;
+ }
+}
+
+static int raid0_run (int minor, struct md_dev *mddev)
+{
+ int cur=0, i=0, size, zone0_size, nb_zone, min;
+ struct raid0_data *data;
+
+ min=1 << FACTOR_SHIFT(FACTOR(mddev));
+
+ for (i=0; i<mddev->nb_dev; i++)
+ if (devices[minor][i].size<min)
+ {
+ printk ("Cannot use %dk chunks on dev %s\n", min,
+ partition_name (devices[minor][i].dev));
+ return -EINVAL;
+ }
+
+ MOD_INC_USE_COUNT;
+
+ /* Resize devices according to the factor */
+ md_size[minor]=0;
+
+ for (i=0; i<mddev->nb_dev; i++)
+ {
+ devices[minor][i].size &= ~((1 << FACTOR_SHIFT(FACTOR(mddev))) - 1);
+ md_size[minor] += devices[minor][i].size;
+ }
+
+ mddev->private=kmalloc (sizeof (struct raid0_data), GFP_KERNEL);
+ data=(struct raid0_data *) mddev->private;
+
+ create_strip_zones (minor, mddev);
+
+ nb_zone=data->nr_zones=
+ md_size[minor]/data->smallest->size +
+ (md_size[minor]%data->smallest->size ? 1 : 0);
+
+ data->hash_table=kmalloc (sizeof (struct raid0_hash)*nb_zone, GFP_KERNEL);
+
+ size=data->strip_zone[cur].size;
+
+ i=0;
+ while (cur<data->nr_strip_zones)
+ {
+ data->hash_table[i].zone0=data->strip_zone+cur;
+
+ if (size>=data->smallest->size)/* If we completly fill the slot */
+ {
+ data->hash_table[i++].zone1=NULL;
+ size-=data->smallest->size;
+
+ if (!size)
+ {
+ if (++cur==data->nr_strip_zones) continue;
+ size=data->strip_zone[cur].size;
+ }
+
+ continue;
+ }
+
+ if (++cur==data->nr_strip_zones) /* Last dev, set unit1 as NULL */
+ {
+ data->hash_table[i].zone1=NULL;
+ continue;
+ }
+
+ zone0_size=size; /* Here, we use a 2nd dev to fill the slot */
+ size=data->strip_zone[cur].size;
+ data->hash_table[i++].zone1=data->strip_zone+cur;
+ size-=(data->smallest->size - zone0_size);
+ }
+
+ return (0);
+}
+
+
+static int raid0_stop (int minor, struct md_dev *mddev)
+{
+ struct raid0_data *data=(struct raid0_data *) mddev->private;
+
+ kfree (data->hash_table);
+ kfree (data->strip_zone);
+ kfree (data);
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * FIXME - We assume some things here :
+ * - requested buffers NEVER bigger than chunk size,
+ * - requested buffers NEVER cross stripes limits.
+ * Of course, those facts may not be valid anymore (and surely won't...)
+ * Hey guys, there's some work out there ;-)
+ */
+static int raid0_map (int minor, struct md_dev *mddev, struct request *req)
+{
+ struct raid0_data *data=(struct raid0_data *) mddev->private;
+ static struct raid0_hash *hash;
+ struct strip_zone *zone;
+ struct real_dev *tmp_dev;
+ int i, queue, blk_in_chunk, factor, chunk;
+ long block, rblock;
+ struct buffer_head *bh;
+ static struct request pending[MAX_REAL]={{0, }, };
+
+ factor=FACTOR(mddev);
+
+ while (req->bh || req->sem)
+ {
+ block=req->sector >> 1;
+ hash=data->hash_table+(block/data->smallest->size);
+
+ if (block >= (hash->zone0->size +
+ hash->zone0->zone_offset))
+ {
+ if (!hash->zone1)
+ printk ("raid0_map : hash->zone1==NULL for block %ld\n", block);
+ zone=hash->zone1;
+ }
+ else
+ zone=hash->zone0;
+
+ blk_in_chunk=block & ((1UL << FACTOR_SHIFT(factor)) - 1);
+ chunk=(block - zone->zone_offset) / (zone->nb_dev<<FACTOR_SHIFT(factor));
+ tmp_dev=zone->dev[(block >> FACTOR_SHIFT(factor)) % zone->nb_dev];
+ rblock=(chunk << FACTOR_SHIFT(factor)) + blk_in_chunk + zone->dev_offset;
+
+ if (req->sem) /* This is a paging request */
+ {
+ req->rq_dev=tmp_dev->dev;
+ req->sector=rblock << 1;
+ add_request (blk_dev+MAJOR (tmp_dev->dev), req);
+
+ return REDIRECTED_REQ;
+ }
+
+ queue=tmp_dev - devices[minor];
+
+ /* This is a buffer request */
+ for (i=blk_in_chunk;
+ i<(1UL << FACTOR_SHIFT(factor)) && req->bh;
+ i+=bh->b_size >> 10)
+ {
+ bh=req->bh;
+ if (!buffer_locked(bh))
+ printk("md%d: block %ld not locked\n", minor, bh->b_blocknr);
+
+ bh->b_rdev=tmp_dev->dev;
+#if defined (CONFIG_MD_SUPPORT_RAID1)
+ bh->b_reqshared=NULL;
+ bh->b_sister_req=NULL;
+#endif
+
+ if (!pending[queue].bh)
+ {
+ pending[queue].rq_dev=tmp_dev->dev;
+ pending[queue].bhtail=pending[queue].bh=bh;
+ pending[queue].sector=rblock << 1;
+ pending[queue].cmd=req->cmd;
+ pending[queue].current_nr_sectors=
+ pending[queue].nr_sectors=bh->b_size >> 9;
+ }
+ else
+ {
+ pending[queue].bhtail->b_reqnext=bh;
+ pending[queue].bhtail=bh;
+ pending[queue].nr_sectors+=bh->b_size >> 9;
+ }
+
+ end_redirect (req); /* Separate bh from the request */
+ }
+ }
+
+ req->rq_status=RQ_INACTIVE;
+ wake_up (&wait_for_request);
+ make_md_request (pending, mddev->nb_dev);
+ return REDIRECTED_REQ; /* Since we already set the request free */
+}
+
+
+static int raid0_status (char *page, int minor, struct md_dev *mddev)
+{
+ int sz=0;
+#undef MD_DEBUG
+#ifdef MD_DEBUG
+ int j, k;
+ struct raid0_data *data=(struct raid0_data *) mddev->private;
+
+ sz+=sprintf (page+sz, " ");
+ for (j=0; j<data->nr_zones; j++)
+ {
+ sz+=sprintf (page+sz, "[z%d",
+ data->hash_table[j].zone0-data->strip_zone);
+ if (data->hash_table[j].zone1)
+ sz+=sprintf (page+sz, "/z%d] ",
+ data->hash_table[j].zone1-data->strip_zone);
+ else
+ sz+=sprintf (page+sz, "] ");
+ }
+
+ sz+=sprintf (page+sz, "\n");
+
+ for (j=0; j<data->nr_strip_zones; j++)
+ {
+ sz+=sprintf (page+sz, " z%d=[", j);
+ for (k=0; k<data->strip_zone[j].nb_dev; k++)
+ sz+=sprintf (page+sz, "%s/",
+ partition_name(data->strip_zone[j].dev[k]->dev));
+ sz--;
+ sz+=sprintf (page+sz, "] zo=%d do=%d s=%d\n",
+ data->strip_zone[j].zone_offset,
+ data->strip_zone[j].dev_offset,
+ data->strip_zone[j].size);
+ }
+#endif
+ return sz;
+}
+
+
+static struct md_personality raid0_personality=
+{
+ "raid0",
+ raid0_map,
+ raid0_run,
+ raid0_stop,
+ raid0_status,
+ NULL, /* no ioctls */
+ 0
+};
+
+
+#ifndef MODULE
+
+void raid0_init (void)
+{
+ register_md_personality (RAID0, &raid0_personality);
+}
+
+#else
+
+int init_module (void)
+{
+ return (register_md_personality (RAID0, &raid0_personality));
+}
+
+void cleanup_module (void)
+{
+ if (MOD_IN_USE)
+ printk ("md raid0 : module still busy...\n");
+ else
+ unregister_md_personality (RAID0);
+}
+
+#endif
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov
with Sam's (original) version of this