From 0245423ad8e92e117cd3b11f9a4c86428f0b5f55 Mon Sep 17 00:00:00 2001 From: marcel Date: Wed, 7 Feb 2007 18:55:31 +0000 Subject: Evolve the ctlreq interface added to geom_gpt into a generic partitioning class that supports multiple schemes. Current schemes supported are APM (Apple Partition Map) and GPT. Change all GEOM_APPLE anf GEOM_GPT options into GEOM_PART_APM and GEOM_PART_GPT (resp). The ctlreq interface supports verbs to create and destroy partitioning schemes on a disk; to add, delete and modify partitions; and to commit or undo changes made. --- sys/geom/geom_apple.c | 263 --------- sys/geom/geom_gpt.c | 1356 ------------------------------------------- sys/geom/part/g_part.c | 1385 ++++++++++++++++++++++++++++++++++++++++++++ sys/geom/part/g_part.h | 130 +++++ sys/geom/part/g_part_apm.c | 414 +++++++++++++ sys/geom/part/g_part_gpt.c | 736 +++++++++++++++++++++++ sys/geom/part/g_part_if.m | 117 ++++ 7 files changed, 2782 insertions(+), 1619 deletions(-) delete mode 100644 sys/geom/geom_apple.c delete mode 100644 sys/geom/geom_gpt.c create mode 100644 sys/geom/part/g_part.c create mode 100644 sys/geom/part/g_part.h create mode 100644 sys/geom/part/g_part_apm.c create mode 100644 sys/geom/part/g_part_gpt.c create mode 100644 sys/geom/part/g_part_if.m (limited to 'sys/geom') diff --git a/sys/geom/geom_apple.c b/sys/geom/geom_apple.c deleted file mode 100644 index cfb53e8..0000000 --- a/sys/geom/geom_apple.c +++ /dev/null @@ -1,263 +0,0 @@ -/*- - * Copyright (c) 2002 Peter Grehan. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* - * GEOM module for Apple Partition Maps - * As described in 'Inside Macintosh Vol 3: About the SCSI Manager - - * The Structure of Block Devices" - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#define APPLE_CLASS_NAME "APPLE" - -#define NAPMPART 16 /* Max partitions */ - -struct apm_partition { - char am_sig[2]; - u_int32_t am_mapcnt; - u_int32_t am_start; - u_int32_t am_partcnt; - char am_name[32]; - char am_type[32]; -}; - -struct g_apple_softc { - u_int16_t dd_bsiz; - u_int32_t dd_blkcnt; - u_int16_t dd_drvrcnt; - u_int32_t am_mapcnt0; - struct apm_partition apmpart[NAPMPART]; -}; - -static void -g_dec_drvrdesc(u_char *ptr, struct g_apple_softc *sc) -{ - sc->dd_bsiz = be16dec(ptr + 2); - sc->dd_blkcnt = be32dec(ptr + 4); - sc->dd_drvrcnt = be32dec(ptr + 16); -} - -static void -g_dec_apple_partition(u_char *ptr, struct apm_partition *d) -{ - d->am_sig[0] = ptr[0]; - d->am_sig[1] = ptr[1]; - d->am_mapcnt = be32dec(ptr + 4); - d->am_start = be32dec(ptr + 8); - d->am_partcnt = be32dec(ptr + 12); - memcpy(d->am_name, ptr + 16, 32); - memcpy(d->am_type, ptr + 48, 32); -} - -static int -g_apple_start(struct bio *bp) -{ - struct g_provider *pp; - struct g_geom *gp; - struct g_slicer *gsp; - - pp = bp->bio_to; - gp = pp->geom; - gsp = gp->softc; - if (bp->bio_cmd == BIO_GETATTR) { - if (g_handleattr_off_t(bp, "APM::offset", - gsp->slices[pp->index].offset)) - return (1); - } - return (0); -} - -static void -g_apple_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, - struct g_consumer *cp __unused, struct g_provider *pp) -{ - struct g_apple_softc *mp; - struct g_slicer *gsp; - - gsp = gp->softc; - mp = gsp->softc; - g_slice_dumpconf(sb, indent, gp, cp, pp); - if (pp != NULL) { - if (indent == NULL) { - sbuf_printf(sb, " ty %s", - mp->apmpart[pp->index].am_type); - if (*mp->apmpart[pp->index].am_name) - sbuf_printf(sb, " sn %s", - mp->apmpart[pp->index].am_name); - } else { - sbuf_printf(sb, "%s%s\n", indent, - mp->apmpart[pp->index].am_name); - sbuf_printf(sb, "%s%s\n", indent, - mp->apmpart[pp->index].am_type); - } - } -} - -#if 0 -static void -g_apple_print() -{ - - /* XXX */ -} -#endif - -static struct g_geom * -g_apple_taste(struct g_class *mp, struct g_provider *pp, int insist) -{ - struct g_geom *gp; - struct g_consumer *cp; - int i; - struct g_apple_softc *ms; - struct apm_partition *apm; - u_int sectorsize; - u_char *buf; - - g_trace(G_T_TOPOLOGY, "apple_taste(%s,%s)", mp->name, pp->name); - g_topology_assert(); - gp = g_slice_new(mp, NAPMPART, pp, &cp, &ms, sizeof *ms, g_apple_start); - if (gp == NULL) - return (NULL); - g_topology_unlock(); - do { - if (gp->rank != 2 && insist == 0) - break; - - sectorsize = cp->provider->sectorsize; - if (sectorsize != 512) - break; - - buf = g_read_data(cp, 0, sectorsize, NULL); - if (buf == NULL) - break; - - /* - * Test for the sector 0 driver record signature, and - * validate sector and disk size - */ - if (buf[0] != 'E' && buf[1] != 'R') { - g_free(buf); - break; - } - g_dec_drvrdesc(buf, ms); - g_free(buf); - - if (ms->dd_bsiz != 512) { - break; - } - - /* - * Read in the first partition map - */ - buf = g_read_data(cp, sectorsize, sectorsize, NULL); - if (buf == NULL) - break; - - /* - * Decode the first partition: it's another indication of - * validity, as well as giving the size of the partition - * map - */ - apm = &ms->apmpart[0]; - g_dec_apple_partition(buf, apm); - g_free(buf); - - if (apm->am_sig[0] != 'P' || apm->am_sig[1] != 'M') - break; - ms->am_mapcnt0 = apm->am_mapcnt; - - buf = g_read_data(cp, 2 * sectorsize, - (NAPMPART - 1) * sectorsize, NULL); - if (buf == NULL) - break; - - for (i = 1; i < NAPMPART; i++) { - g_dec_apple_partition(buf + ((i - 1) * sectorsize), - &ms->apmpart[i]); - } - - for (i = 0; i < NAPMPART; i++) { - apm = &ms->apmpart[i]; - - /* - * Validate partition sig and global mapcount - */ - if (apm->am_sig[0] != 'P' || - apm->am_sig[1] != 'M') - continue; - if (apm->am_mapcnt != ms->am_mapcnt0) - continue; - - if (bootverbose) { - printf("APM Slice %d (%s/%s) on %s:\n", - i + 1, apm->am_name, apm->am_type, - gp->name); - /* g_apple_print(i, dp + i); */ - } - g_topology_lock(); - g_slice_config(gp, i, G_SLICE_CONFIG_SET, - (off_t)apm->am_start << 9ULL, - (off_t)apm->am_partcnt << 9ULL, - sectorsize, - "%ss%d", gp->name, i + 1); - g_topology_unlock(); - } - g_free(buf); - break; - } while(0); - g_topology_lock(); - g_access(cp, -1, 0, 0); - if (LIST_EMPTY(&gp->provider)) { - g_slice_spoiled(cp); - return (NULL); - } - return (gp); -} - - -static struct g_class g_apple_class = { - .name = APPLE_CLASS_NAME, - .version = G_VERSION, - .taste = g_apple_taste, - .dumpconf = g_apple_dumpconf, -}; - -DECLARE_GEOM_CLASS(g_apple_class, g_apple); diff --git a/sys/geom/geom_gpt.c b/sys/geom/geom_gpt.c deleted file mode 100644 index bea8c7b..0000000 --- a/sys/geom/geom_gpt.c +++ /dev/null @@ -1,1356 +0,0 @@ -/*- - * Copyright (c) 2002, 2005, 2006 Marcel Moolenaar - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -CTASSERT(offsetof(struct gpt_hdr, padding) == 92); -CTASSERT(sizeof(struct gpt_ent) == 128); - -#define G_GPT_TRACE(args) /* g_trace args */ - -/* - * The GEOM GPT class. Nothing fancy... - */ -static g_ctl_req_t g_gpt_ctlreq; -static g_ctl_destroy_geom_t g_gpt_destroy_geom; -static g_taste_t g_gpt_taste; - -static g_access_t g_gpt_access; -static g_dumpconf_t g_gpt_dumpconf; -static g_orphan_t g_gpt_orphan; -static g_spoiled_t g_gpt_spoiled; -static g_start_t g_gpt_start; - -static struct g_class g_gpt_class = { - .name = "GPT", - .version = G_VERSION, - /* Class methods. */ - .ctlreq = g_gpt_ctlreq, - .destroy_geom = g_gpt_destroy_geom, - .taste = g_gpt_taste, - /* Geom methods. */ - .access = g_gpt_access, - .dumpconf = g_gpt_dumpconf, - .orphan = g_gpt_orphan, - .spoiled = g_gpt_spoiled, - .start = g_gpt_start, -}; - -DECLARE_GEOM_CLASS(g_gpt_class, g_gpt); - -/* - * The GEOM GPT instance data. - */ -struct g_gpt_part { - LIST_ENTRY(g_gpt_part) parts; - struct g_provider *provider; - off_t offset; - struct gpt_ent ent; - int index; -}; - -enum gpt_hdr_type { - GPT_HDR_PRIMARY, - GPT_HDR_SECONDARY, - GPT_HDR_COUNT -}; - -enum gpt_hdr_state { - GPT_HDR_UNKNOWN, - GPT_HDR_MISSING, - GPT_HDR_CORRUPT, - GPT_HDR_INVALID, - GPT_HDR_OK -}; - -struct g_gpt_softc { - LIST_HEAD(, g_gpt_part) parts; - struct gpt_hdr hdr[GPT_HDR_COUNT]; - enum gpt_hdr_state state[GPT_HDR_COUNT]; -}; - -enum g_gpt_ctl { - G_GPT_CTL_NONE, - G_GPT_CTL_ADD, - G_GPT_CTL_CREATE, - G_GPT_CTL_DESTROY, - G_GPT_CTL_MODIFY, - G_GPT_CTL_RECOVER, - G_GPT_CTL_REMOVE -}; - -static struct uuid g_gpt_freebsd = GPT_ENT_TYPE_FREEBSD; -static struct uuid g_gpt_freebsd_swap = GPT_ENT_TYPE_FREEBSD_SWAP; -static struct uuid g_gpt_linux_swap = GPT_ENT_TYPE_LINUX_SWAP; -static struct uuid g_gpt_unused = GPT_ENT_TYPE_UNUSED; - -/* - * Support functions. - */ - -static void g_gpt_wither(struct g_geom *, int); - -static void -g_gpt_ctl_add(struct gctl_req *req, const char *flags, struct g_geom *gp, - struct uuid *type, uint64_t start, uint64_t end, long entry) -{ - char buf[16]; - struct g_provider *pp; - struct g_gpt_softc *softc; - struct g_gpt_part *last, *part; - u_int idx; - - G_GPT_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); - g_topology_assert(); - - pp = LIST_FIRST(&gp->consumer)->provider; - softc = gp->softc; - - last = NULL; - idx = (entry > 0) ? (u_int)(entry - 1) : 0; - LIST_FOREACH(part, &softc->parts, parts) { - if (part->index == idx) { - idx = part->index + 1; - last = part; - } - if ((start >= part->ent.ent_lba_start && - start <= part->ent.ent_lba_end) || - (end >= part->ent.ent_lba_start && - end <= part->ent.ent_lba_end) || - (start < part->ent.ent_lba_start && - end > part->ent.ent_lba_end)) { - gctl_error(req, "%d start/end %jd/%jd", ENOSPC, - (intmax_t)start, (intmax_t)end); - return; - } - } - if (entry > 0 && (long)idx != entry - 1) { - gctl_error(req, "%d entry %ld", EEXIST, entry); - return; - } - snprintf(buf, sizeof(buf), "%u", idx + 1); - gctl_set_param(req, "entry", buf, strlen(buf) + 1); - - part = g_malloc(sizeof(struct g_gpt_part), M_WAITOK | M_ZERO); - part->index = idx; - part->offset = start * pp->sectorsize; - if (last == NULL) - LIST_INSERT_HEAD(&softc->parts, part, parts); - else - LIST_INSERT_AFTER(last, part, parts); - part->ent.ent_type = *type; - kern_uuidgen(&part->ent.ent_uuid, 1); - part->ent.ent_lba_start = start; - part->ent.ent_lba_end = end; - - /* XXX ent_attr */ - /* XXX ent_name */ - - part->provider = g_new_providerf(gp, "%s%c%d", gp->name, - !memcmp(type, &g_gpt_freebsd, sizeof(struct uuid)) ? 's' : 'p', - idx + 1); - part->provider->index = idx; - part->provider->private = part; /* Close the circle. */ - part->provider->mediasize = (end - start + 1) * pp->sectorsize; - part->provider->sectorsize = pp->sectorsize; - part->provider->flags = pp->flags & G_PF_CANDELETE; - if (pp->stripesize > 0) { - part->provider->stripesize = pp->stripesize; - part->provider->stripeoffset = - (pp->stripeoffset + part->offset) % pp->stripesize; - } - g_error_provider(part->provider, 0); - - if (bootverbose) { - printf("GEOM: %s: partition ", part->provider->name); - printf_uuid(&part->ent.ent_uuid); - printf(".\n"); - } -} - -static struct g_geom * -g_gpt_ctl_create(struct gctl_req *req, const char *flags, struct g_class *mp, - struct g_provider *pp, uint32_t entries) -{ - struct uuid uuid; - struct g_consumer *cp; - struct g_geom *gp; - struct g_gpt_softc *softc; - struct gpt_hdr *hdr; - uint64_t last; - size_t tblsz; - int error, i; - - G_GPT_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); - g_topology_assert(); - - tblsz = (entries * sizeof(struct gpt_ent) + pp->sectorsize - 1) / - pp->sectorsize; - - /* - * Sanity-check the size of the provider. This test is very similar - * to the one in g_gpt_taste(). Here we want to make sure that the - * size of the provider is large enough to hold a GPT that has the - * requested number of entries, plus as many available sectors for - * partitions of minimal size. The latter test is not exactly needed - * but it helps keep the table size proportional to the media size. - * Thus, a GPT with 128 entries must at least have 128 sectors of - * usable partition space. Therefore, the absolute minimal size we - * allow is (1 + 2 * (1 + 32) + 128) = 195 sectors. This is more - * restrictive than what g_gpt_taste() requires. - */ - if (pp->sectorsize < 512 || - pp->sectorsize % sizeof(struct gpt_ent) != 0 || - pp->mediasize < (3 + 2 * tblsz + entries) * pp->sectorsize) { - gctl_error(req, "%d provider", ENOSPC); - return (NULL); - } - - /* We don't nest. See also g_gpt_taste(). */ - if (pp->geom->class == &g_gpt_class) { - gctl_error(req, "%d provider", ENODEV); - return (NULL); - } - - /* Create a GEOM. */ - gp = g_new_geomf(mp, "%s", pp->name); - softc = g_malloc(sizeof(struct g_gpt_softc), M_WAITOK | M_ZERO); - gp->softc = softc; - LIST_INIT(&softc->parts); - cp = g_new_consumer(gp); - error = g_attach(cp, pp); - if (error == 0) - error = g_access(cp, 1, 0, 0); - if (error != 0) { - g_gpt_wither(gp, error); - gctl_error(req, "%d geom '%s'", error, pp->name); - return (NULL); - } - - last = (pp->mediasize / pp->sectorsize) - 1; - kern_uuidgen(&uuid, 1); - - /* Construct an in-memory GPT. */ - for (i = GPT_HDR_PRIMARY; i < GPT_HDR_COUNT; i++) { - hdr = softc->hdr + i; - bcopy(GPT_HDR_SIG, hdr->hdr_sig, sizeof(hdr->hdr_sig)); - hdr->hdr_revision = GPT_HDR_REVISION; - hdr->hdr_size = offsetof(struct gpt_hdr, padding); - hdr->hdr_lba_self = (i == GPT_HDR_PRIMARY) ? 1 : last; - hdr->hdr_lba_alt = (i == GPT_HDR_PRIMARY) ? last : 1; - hdr->hdr_lba_start = 2 + tblsz; - hdr->hdr_lba_end = last - (1 + tblsz); - hdr->hdr_uuid = uuid; - hdr->hdr_lba_table = (i == GPT_HDR_PRIMARY) ? 2 : last - tblsz; - hdr->hdr_entries = entries; - hdr->hdr_entsz = sizeof(struct gpt_ent); - softc->state[i] = GPT_HDR_OK; - } - - if (0) - goto fail; - - if (bootverbose) { - printf("GEOM: %s: GPT ", pp->name); - printf_uuid(&softc->hdr[GPT_HDR_PRIMARY].hdr_uuid); - printf(".\n"); - } - - g_access(cp, -1, 0, 0); - return (gp); - -fail: - g_access(cp, -1, 0, 0); - g_gpt_wither(gp, error); - gctl_error(req, "%d geom '%s'", error, pp->name); - return (NULL); -} - -static void -g_gpt_ctl_destroy(struct gctl_req *req, const char *flags, struct g_geom *gp) -{ -} - -static void -g_gpt_ctl_modify(struct gctl_req *req, const char *flags, struct g_geom *gp, - long entry) -{ -} - -static void -g_gpt_ctl_recover(struct gctl_req *req, const char *flags, struct g_geom *gp) -{ -} - -static void -g_gpt_ctl_remove(struct gctl_req *req, const char *flags, struct g_geom *gp, - long entry) -{ - struct g_provider *pp; - struct g_gpt_softc *softc; - struct g_gpt_part *part; - - G_GPT_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); - g_topology_assert(); - - softc = gp->softc; - - LIST_FOREACH(part, &softc->parts, parts) { - if ((long)part->index == entry - 1) - break; - } - if (part == NULL) { - gctl_error(req, "%d entry %ld", ENOENT, entry); - return; - } - - pp = part->provider; - if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { - gctl_error(req, "%d", EBUSY); - return; - } - - LIST_REMOVE(part, parts); - pp->private = NULL; - g_wither_provider(pp, ENXIO); - g_free(part); -} - -static int -g_gpt_has_pmbr(struct g_consumer *cp, int *error) -{ - char *buf; - uint8_t *typ; - uint64_t mediasize; - int i, pmbr, parts; - uint32_t dp_start, dp_size; - uint16_t magic; - - buf = g_read_data(cp, 0L, cp->provider->sectorsize, error); - if (buf == NULL) - return (0); - - pmbr = 0; - *error = 0; - - magic = le16toh(*(uint16_t *)(uintptr_t)(buf + DOSMAGICOFFSET)); - if (magic != DOSMAGIC) - goto out; - - /* - * Check that there are at least one partition of type - * DOSPTYP_PMBR that covers the whole unit. - */ - parts = 0; - mediasize = cp->provider->mediasize / cp->provider->sectorsize; - for (i = 0; i < 4; i++) { - typ = buf + DOSPARTOFF + i * sizeof(struct dos_partition) + - offsetof(struct dos_partition, dp_typ); - if (*typ != 0) - parts++; - if (*typ != DOSPTYP_PMBR) - continue; - - bcopy(buf + DOSPARTOFF + i * sizeof(struct dos_partition) + - offsetof(struct dos_partition, dp_start), &dp_start, - sizeof(dp_start)); - if (le32toh(dp_start) != 1) - break; - - bcopy(buf + DOSPARTOFF + i * sizeof(struct dos_partition) + - offsetof(struct dos_partition, dp_size), &dp_size, - sizeof(dp_size)); - if (le32toh(dp_size) != ~0U && - le32toh(dp_size) != mediasize - 1 && - /* Catch old FreeBSD bug for backward compatibility. */ - le32toh(dp_size) != mediasize) - break; - - pmbr = 1; - } - - /* - * Treat empty MBRs as PMBRs for increased flexibility. Note that an - * invalid entry of type DOSPTYP_PMBR counts towards the number of - * partitions and will prevent the MBR from being treated as a PMBR. - */ - if (!pmbr && parts == 0) - pmbr = 1; - -out: - g_free(buf); - return (pmbr); -} - -static void -g_gpt_load_hdr(struct g_gpt_softc *softc, struct g_provider *pp, - enum gpt_hdr_type type, void *buf) -{ - struct uuid uuid; - struct gpt_hdr *hdr; - uint64_t lba, last; - uint32_t crc, sz; - - softc->state[type] = GPT_HDR_MISSING; - - hdr = softc->hdr + type; - bcopy(buf, hdr, sizeof(*hdr)); - if (memcmp(hdr->hdr_sig, GPT_HDR_SIG, sizeof(hdr->hdr_sig)) != 0) - return; - - softc->state[type] = GPT_HDR_CORRUPT; - - sz = le32toh(hdr->hdr_size); - if (sz < 92 || sz > pp->sectorsize) - return; - crc = le32toh(hdr->hdr_crc_self); - hdr->hdr_crc_self = 0; - if (crc32(hdr, sz) != crc) - return; - hdr->hdr_size = sz; - hdr->hdr_crc_self = crc; - - softc->state[type] = GPT_HDR_INVALID; - - last = (pp->mediasize / pp->sectorsize) - 1; - hdr->hdr_revision = le32toh(hdr->hdr_revision); - if (hdr->hdr_revision < 0x00010000) - return; - hdr->hdr_lba_self = le64toh(hdr->hdr_lba_self); - if (hdr->hdr_lba_self != (type == GPT_HDR_PRIMARY ? 1 : last)) - return; - hdr->hdr_lba_alt = le64toh(hdr->hdr_lba_alt); - if (hdr->hdr_lba_alt != (type == GPT_HDR_PRIMARY ? last : 1)) - return; - - /* Check the managed area. */ - hdr->hdr_lba_start = le64toh(hdr->hdr_lba_start); - if (hdr->hdr_lba_start < 2 || hdr->hdr_lba_start >= last) - return; - hdr->hdr_lba_end = le64toh(hdr->hdr_lba_end); - if (hdr->hdr_lba_end < hdr->hdr_lba_start || hdr->hdr_lba_end >= last) - return; - - /* Check the table location and size of the table. */ - hdr->hdr_entries = le32toh(hdr->hdr_entries); - hdr->hdr_entsz = le32toh(hdr->hdr_entsz); - if (hdr->hdr_entries == 0 || hdr->hdr_entsz < 128 || - (hdr->hdr_entsz & 7) != 0) - return; - hdr->hdr_lba_table = le64toh(hdr->hdr_lba_table); - if (hdr->hdr_lba_table < 2 || hdr->hdr_lba_table >= last) - return; - if (hdr->hdr_lba_table >= hdr->hdr_lba_start && - hdr->hdr_lba_table <= hdr->hdr_lba_end) - return; - lba = hdr->hdr_lba_table + - (hdr->hdr_entries * hdr->hdr_entsz + pp->sectorsize - 1) / - pp->sectorsize - 1; - if (lba >= last) - return; - if (lba >= hdr->hdr_lba_start && lba <= hdr->hdr_lba_end) - return; - - softc->state[type] = GPT_HDR_OK; - - le_uuid_dec(&hdr->hdr_uuid, &uuid); - hdr->hdr_uuid = uuid; - hdr->hdr_crc_table = le32toh(hdr->hdr_crc_table); -} - -static void -g_gpt_load_tbl(struct g_geom *gp, struct g_provider *pp, struct gpt_hdr *hdr, - char *tbl) -{ - struct uuid uuid; - struct gpt_ent *ent; - struct g_gpt_part *last, *part; - struct g_gpt_softc *softc; - uint64_t part_start, part_end; - unsigned int ch, idx; - - softc = gp->softc; - - for (idx = 0, last = part = NULL; - idx < hdr->hdr_entries; - idx++, last = part, tbl += hdr->hdr_entsz) { - ent = (struct gpt_ent *)(uintptr_t)tbl; - le_uuid_dec(&ent->ent_type, &uuid); - if (!memcmp(&uuid, &g_gpt_unused, sizeof(struct uuid))) - continue; - part_start = le64toh(ent->ent_lba_start); - part_end = le64toh(ent->ent_lba_end); - if (part_start < hdr->hdr_lba_start || part_start > part_end || - part_end > hdr->hdr_lba_end) { - printf("GEOM: %s: GPT partition %d is invalid -- " - "ignored.\n", gp->name, idx + 1); - continue; - } - - part = g_malloc(sizeof(struct g_gpt_part), M_WAITOK | M_ZERO); - part->index = idx; - part->offset = part_start * pp->sectorsize; - if (last == NULL) - LIST_INSERT_HEAD(&softc->parts, part, parts); - else - LIST_INSERT_AFTER(last, part, parts); - part->ent.ent_type = uuid; - le_uuid_dec(&ent->ent_uuid, &part->ent.ent_uuid); - part->ent.ent_lba_start = part_start; - part->ent.ent_lba_end = part_end; - part->ent.ent_attr = le64toh(ent->ent_attr); - for (ch = 0; ch < sizeof(ent->ent_name)/2; ch++) - part->ent.ent_name[ch] = le16toh(ent->ent_name[ch]); - - g_topology_lock(); - part->provider = g_new_providerf(gp, "%s%c%d", gp->name, - !memcmp(&uuid, &g_gpt_freebsd, sizeof(struct uuid)) - ? 's' : 'p', idx + 1); - part->provider->index = idx; - part->provider->private = part; /* Close the circle. */ - part->provider->mediasize = (part_end - part_start + 1) * - pp->sectorsize; - part->provider->sectorsize = pp->sectorsize; - part->provider->flags = pp->flags & G_PF_CANDELETE; - if (pp->stripesize > 0) { - part->provider->stripesize = pp->stripesize; - part->provider->stripeoffset = - (pp->stripeoffset + part->offset) % pp->stripesize; - } - g_error_provider(part->provider, 0); - g_topology_unlock(); - - if (bootverbose) { - printf("GEOM: %s: partition ", part->provider->name); - printf_uuid(&part->ent.ent_uuid); - printf(".\n"); - } - } -} - -static int -g_gpt_matched_hdrs(struct gpt_hdr *pri, struct gpt_hdr *sec) -{ - - if (memcmp(&pri->hdr_uuid, &sec->hdr_uuid, sizeof(struct uuid)) != 0) - return (0); - return ((pri->hdr_revision == sec->hdr_revision && - pri->hdr_size == sec->hdr_size && - pri->hdr_lba_start == sec->hdr_lba_start && - pri->hdr_lba_end == sec->hdr_lba_end && - pri->hdr_entries == sec->hdr_entries && - pri->hdr_entsz == sec->hdr_entsz && - pri->hdr_crc_table == sec->hdr_crc_table) ? 1 : 0); -} - -static int -g_gpt_tbl_ok(struct gpt_hdr *hdr, char *tbl) -{ - size_t sz; - uint32_t crc; - - crc = hdr->hdr_crc_table; - sz = hdr->hdr_entries * hdr->hdr_entsz; - return ((crc32(tbl, sz) == crc) ? 1 : 0); -} - -static void -g_gpt_to_utf8(struct sbuf *sb, uint16_t *str, size_t len) -{ - u_int bo; - uint32_t ch; - uint16_t c; - - bo = BYTE_ORDER; - while (len > 0 && *str != 0) { - ch = (bo == BIG_ENDIAN) ? be16toh(*str) : le16toh(*str); - str++, len--; - if ((ch & 0xf800) == 0xd800) { - if (len > 0) { - c = (bo == BIG_ENDIAN) ? be16toh(*str) - : le16toh(*str); - str++, len--; - } else - c = 0xfffd; - if ((ch & 0x400) == 0 && (c & 0xfc00) == 0xdc00) { - ch = ((ch & 0x3ff) << 10) + (c & 0x3ff); - ch += 0x10000; - } else - ch = 0xfffd; - } else if (ch == 0xfffe) { /* BOM (U+FEFF) swapped. */ - bo = (bo == BIG_ENDIAN) ? LITTLE_ENDIAN : BIG_ENDIAN; - continue; - } else if (ch == 0xfeff) /* BOM (U+FEFF) unswapped. */ - continue; - - if (ch < 0x80) - sbuf_printf(sb, "%c", ch); - else if (ch < 0x800) - sbuf_printf(sb, "%c%c", 0xc0 | (ch >> 6), - 0x80 | (ch & 0x3f)); - else if (ch < 0x10000) - sbuf_printf(sb, "%c%c%c", 0xe0 | (ch >> 12), - 0x80 | ((ch >> 6) & 0x3f), 0x80 | (ch & 0x3f)); - else if (ch < 0x200000) - sbuf_printf(sb, "%c%c%c%c", 0xf0 | (ch >> 18), - 0x80 | ((ch >> 12) & 0x3f), - 0x80 | ((ch >> 6) & 0x3f), 0x80 | (ch & 0x3f)); - } -} - -static void -g_gpt_wither(struct g_geom *gp, int error) -{ - struct g_gpt_part *part; - struct g_gpt_softc *softc; - - softc = gp->softc; - if (softc != NULL) { - part = LIST_FIRST(&softc->parts); - while (part != NULL) { - LIST_REMOVE(part, parts); - g_free(part); - part = LIST_FIRST(&softc->parts); - } - g_free(softc); - gp->softc = NULL; - } - g_wither_geom(gp, error); -} - -/* - * Class methods. - */ - -static void -g_gpt_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) -{ - struct uuid type; - struct g_geom *gp; - struct g_provider *pp; - struct g_gpt_softc *softc; - const char *flags; - char const *s; - uint64_t start, end; - long entry, entries; - enum g_gpt_ctl ctlreq; - int error; - - G_GPT_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); - g_topology_assert(); - - /* - * Improve error reporting by first checking if the verb is - * a valid one. It also allows us to make assumptions down- - * stream about the validity of the verb. - */ - ctlreq = G_GPT_CTL_NONE; - switch (*verb) { - case 'a': - if (!strcmp(verb, "add")) - ctlreq = G_GPT_CTL_ADD; - break; - case 'c': - if (!strcmp(verb, "create")) - ctlreq = G_GPT_CTL_CREATE; - break; - case 'd': - if (!strcmp(verb, "destroy")) - ctlreq = G_GPT_CTL_DESTROY; - break; - case 'm': - if (!strcmp(verb, "modify")) - ctlreq = G_GPT_CTL_MODIFY; - break; - case 'r': - if (!strcmp(verb, "recover")) - ctlreq = G_GPT_CTL_RECOVER; - else if (!strcmp(verb, "remove")) - ctlreq = G_GPT_CTL_REMOVE; - break; - } - if (ctlreq == G_GPT_CTL_NONE) { - gctl_error(req, "%d verb '%s'", EINVAL, verb); - return; - } - - /* - * All verbs take an optional flags parameter. The flags parameter - * is a string with each letter an independent flag. Each verb has - * it's own set of valid flags and the meaning of the flags is - * specific to the verb. Typically the presence of a letter (=flag) - * in the string means true and the absence means false. - */ - s = gctl_get_asciiparam(req, "flags"); - flags = (s == NULL) ? "" : s; - - /* - * Only the create verb takes a provider parameter. Make this a - * special case so that more code sharing is possible for the - * common case. - */ - if (ctlreq == G_GPT_CTL_CREATE) { - /* - * Create a GPT on a pristine disk-like provider. - * Required parameters/attributes: - * provider - * Optional parameters/attributes: - * entries - */ - s = gctl_get_asciiparam(req, "provider"); - if (s == NULL) { - gctl_error(req, "%d provider", ENOATTR); - return; - } - pp = g_provider_by_name(s); - if (pp == NULL) { - gctl_error(req, "%d provider '%s'", EINVAL, s); - return; - } - /* Check that there isn't already a GPT on the provider. */ - LIST_FOREACH(gp, &mp->geom, geom) { - if (!strcmp(s, gp->name)) { - gctl_error(req, "%d geom '%s'", EEXIST, s); - return; - } - } - s = gctl_get_asciiparam(req, "entries"); - if (s != NULL) { - entries = strtol(s, (char **)(uintptr_t)&s, 0); - if (entries < 128 || *s != '\0') { - gctl_error(req, "%d entries %ld", EINVAL, - entries); - return; - } - } else - entries = 128; /* Documented mininum */ - gp = g_gpt_ctl_create(req, flags, mp, pp, entries); - return; - } - - /* - * All but the create verb, which is handled above, operate on an - * existing GPT geom. The geom parameter is non-optional, so get - * it here first. - */ - s = gctl_get_asciiparam(req, "geom"); - if (s == NULL) { - gctl_error(req, "%d geom", ENOATTR); - return; - } - /* Get the GPT geom with the given name. */ - LIST_FOREACH(gp, &mp->geom, geom) { - if (!strcmp(s, gp->name)) - break; - } - if (gp == NULL) { - gctl_error(req, "%d geom '%s'", EINVAL, s); - return; - } - softc = gp->softc; - - /* - * Now handle the verbs that can operate on a downgraded or - * partially corrupted GPT. In particular these are the verbs - * that don't deal with the table entries. We implement the - * policy that all table entry related requests require a - * valid GPT. - */ - if (ctlreq == G_GPT_CTL_DESTROY) { - /* - * Destroy a GPT completely. - */ - g_gpt_ctl_destroy(req, flags, gp); - return; - } - if (ctlreq == G_GPT_CTL_RECOVER) { - /* - * Recover a downgraded GPT. - */ - g_gpt_ctl_recover(req, flags, gp); - return; - } - - /* - * Check that the GPT is complete and valid before we make changes - * to the table entries. - */ - if (softc->state[GPT_HDR_PRIMARY] != GPT_HDR_OK || - softc->state[GPT_HDR_SECONDARY] != GPT_HDR_OK) { - gctl_error(req, "%d geom '%s'", ENXIO, s); - return; - } - - /* - * The add verb is the only table entry related verb that doesn't - * require the entry parameter. All other verbs identify the table - * entry by the entry number. Handle the add here. - */ - if (ctlreq == G_GPT_CTL_ADD) { - /* - * Add a partition entry to a GPT. - * Required parameters/attributes: - * type - * start - * end - * Optional parameters/attributes: - * entry (read/write) - * label - */ - s = gctl_get_asciiparam(req, "type"); - if (s == NULL) { - gctl_error(req, "%d type", ENOATTR); - return; - } - error = parse_uuid(s, &type); - if (error != 0) { - gctl_error(req, "%d type '%s'", error, s); - return; - } - s = gctl_get_asciiparam(req, "start"); - if (s == NULL) { - gctl_error(req, "%d start", ENOATTR); - return; - } - start = strtoq(s, (char **)(uintptr_t)&s, 0); - if (start < softc->hdr[GPT_HDR_PRIMARY].hdr_lba_start || - start > softc->hdr[GPT_HDR_PRIMARY].hdr_lba_end || - *s != '\0') { - gctl_error(req, "%d start %jd", EINVAL, - (intmax_t)start); - return; - } - s = gctl_get_asciiparam(req, "end"); - if (s == NULL) { - gctl_error(req, "%d end", ENOATTR); - return; - } - end = strtoq(s, (char **)(uintptr_t)&s, 0); - if (end < start || - end > softc->hdr[GPT_HDR_PRIMARY].hdr_lba_end || - *s != '\0') { - gctl_error(req, "%d end %jd", EINVAL, - (intmax_t)end); - return; - } - entry = 0; - s = gctl_get_asciiparam(req, "entry"); - if (s != NULL && *s != '\0') { - entry = strtol(s, (char **)(uintptr_t)&s, 0); - if (*s != '\0' || entry <= 0 || - entry > softc->hdr[GPT_HDR_PRIMARY].hdr_entries) { - gctl_error(req, "%d entry %ld", EINVAL, entry); - return; - } - } - g_gpt_ctl_add(req, flags, gp, &type, start, end, entry); - return; - } - - /* - * Get the table entry number. Entry numbers run from 1 to the - * number of entries in the table. - */ - s = gctl_get_asciiparam(req, "entry"); - if (s == NULL) { - gctl_error(req, "%d entry", ENOATTR); - return; - } - entry = strtol(s, (char **)(uintptr_t)&s, 0); - if (*s != '\0' || entry <= 0 || - entry > softc->hdr[GPT_HDR_PRIMARY].hdr_entries) { - gctl_error(req, "%d entry %ld", EINVAL, entry); - return; - } - - if (ctlreq == G_GPT_CTL_MODIFY) { - /* - * Modify a partition entry. - */ - g_gpt_ctl_modify(req, flags, gp, entry); - return; - } - if (ctlreq == G_GPT_CTL_REMOVE) { - /* - * Remove a partition entry. - */ - g_gpt_ctl_remove(req, flags, gp, entry); - return; - } -} - -static int -g_gpt_destroy_geom(struct gctl_req *req, struct g_class *mp, - struct g_geom *gp) -{ - - G_GPT_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); - g_topology_assert(); - - g_gpt_wither(gp, EINVAL); - return (0); -} - -static struct g_geom * -g_gpt_taste(struct g_class *mp, struct g_provider *pp, int insist __unused) -{ - struct g_consumer *cp; - struct g_geom *gp; - struct g_gpt_softc *softc; - struct gpt_hdr *hdr; - void *buf; - off_t ofs; - size_t nbytes; - int error; - - G_GPT_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); - g_topology_assert(); - - /* - * We don't nest. That is, we disallow nesting a GPT inside a GPT - * partition. We check only for direct nesting. Indirect nesting is - * not easy to determine. If you want, you can therefore nest GPT - * partitions by putting a dummy GEOM in between them. But I didn't - * say that... - */ - if (pp->geom->class == &g_gpt_class) - return (NULL); - - /* - * Create a GEOM with consumer and hook it up to the provider. - * With that we become part of the topology. Optain read, write - * and exclusive access to the provider. - */ - gp = g_new_geomf(mp, "%s", pp->name); - softc = g_malloc(sizeof(struct g_gpt_softc), M_WAITOK | M_ZERO); - gp->softc = softc; - LIST_INIT(&softc->parts); - cp = g_new_consumer(gp); - error = g_attach(cp, pp); - if (error == 0) - error = g_access(cp, 1, 0, 0); - if (error != 0) { - g_gpt_wither(gp, error); - return (NULL); - } - - g_topology_unlock(); - - /* - * Now that we have access permissions, we can sanity-check the - * provider. Since the first sector on the provider must be a PMBR - * and a PMBR is 512 bytes large, the sector size must be at least - * 512 bytes. We also require that the sector size is a multiple - * of the GPT entry size (which is 128 bytes). Lastly, since the - * theoretical minimum number of sectors needed by GPT is 6, any - * medium that has less than 6 sectors is never going to be able - * to hold a GPT. The number 6 comes from: - * 1 sector for the PMBR - * 2 sectors for the GPT headers (each 1 sector) - * 2 sectors for the GPT tables (each 1 sector) - * 1 sector for an actual partition - * It's better to catch this pathological case early than behaving - * pathologically later on by panicing... - */ - if (pp->sectorsize < 512 || - pp->sectorsize % sizeof(struct gpt_ent) != 0 || - pp->mediasize < 6 * pp->sectorsize) - goto fail; - - /* - * Read both the primary and secondary GPT headers. We have all - * the information at our fingertips that way to determine if - * there's a GPT, including whether recovery is appropriate. - */ - buf = g_read_data(cp, pp->sectorsize, pp->sectorsize, &error); - if (buf == NULL) - goto fail; - g_gpt_load_hdr(softc, pp, GPT_HDR_PRIMARY, buf); - g_free(buf); - - buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, - &error); - if (buf == NULL) - goto fail; - g_gpt_load_hdr(softc, pp, GPT_HDR_SECONDARY, buf); - g_free(buf); - - /* Bail out if there are no GPT headers at all. */ - if (softc->state[GPT_HDR_PRIMARY] == GPT_HDR_MISSING && - softc->state[GPT_HDR_SECONDARY] == GPT_HDR_MISSING) { - error = ENXIO; /* Device not configured for GPT. */ - goto fail; - } - - /* - * We have at least one GPT header (though that one may be corrupt - * or invalid). This disk supposedly has GPT in some shape or form. - * First check that there's a protective MBR. Complain if there - * is none and fail. - */ - if (!g_gpt_has_pmbr(cp, &error)) { - if (error != 0) - goto fail; - printf("GEOM: %s: GPT detected, but no protective MBR.\n", - pp->name); - error = ENXIO; - goto fail; - } - - /* - * Now, catch the non-recoverable case where there's no good GPT - * header at all. That is, unrecoverable by us. The user may able - * to fix it up with some magic. - */ - if (softc->state[GPT_HDR_PRIMARY] != GPT_HDR_OK && - softc->state[GPT_HDR_SECONDARY] != GPT_HDR_OK) { - printf("GEOM: %s: corrupt or invalid GPT detected.\n", - pp->name); - printf("GEOM: %s: GPT rejected -- may not be recoverable.\n", - pp->name); - error = EINVAL; /* No valid GPT header exists. */ - goto fail; - } - - /* - * Ok, at least one header is good. We can use the GPT. If there's - * a corrupt or invalid header, we'd like to user to know about it. - * Also catch the case where both headers appear to be good but are - * not mirroring each other. We only check superficially for that. - */ - if (softc->state[GPT_HDR_PRIMARY] != GPT_HDR_OK) { - printf("GEOM: %s: the primary GPT header is corrupt or " - "invalid.\n", pp->name); - printf("GEOM: %s: using the secondary instead -- recovery " - "strongly advised.\n", pp->name); - } else if (softc->state[GPT_HDR_SECONDARY] != GPT_HDR_OK) { - printf("GEOM: %s: the secondary GPT header is corrupt or " - "invalid.\n", pp->name); - printf("GEOM: %s: using the primary only -- recovery " - "suggested.\n", pp->name); - } else if (!g_gpt_matched_hdrs(softc->hdr + GPT_HDR_PRIMARY, - softc->hdr + GPT_HDR_SECONDARY)) { - printf("GEOM: %s: the primary and secondary GPT header do " - "not agree.\n", pp->name); - printf("GEOM: %s: GPT rejected -- recovery required.\n", - pp->name); - error = EINVAL; /* No consistent GPT exists. */ - goto fail; - } - - /* Always prefer the primary header. */ - hdr = (softc->state[GPT_HDR_PRIMARY] == GPT_HDR_OK) - ? softc->hdr + GPT_HDR_PRIMARY : softc->hdr + GPT_HDR_SECONDARY; - - /* - * Now that we've got a GPT header, we have to deal with the table - * itself. Again there's a primary table and a secondary table and - * either or both may be corrupt or invalid. Redundancy is nice, - * but it's a combinatorial pain in the butt. - */ - - nbytes = ((hdr->hdr_entries * hdr->hdr_entsz + pp->sectorsize - 1) / - pp->sectorsize) * pp->sectorsize; - - ofs = hdr->hdr_lba_table * pp->sectorsize; - buf = g_read_data(cp, ofs, nbytes, &error); - if (buf == NULL) - goto fail; - - /* - * If the table is corrupt, check if we can use the other one. - * Complain and bail if not. - */ - if (!g_gpt_tbl_ok(hdr, buf)) { - g_free(buf); - if (hdr != softc->hdr + GPT_HDR_PRIMARY || - softc->state[GPT_HDR_SECONDARY] != GPT_HDR_OK) { - printf("GEOM: %s: the GPT table is corrupt -- " - "may not be recoverable.\n", pp->name); - goto fail; - } - softc->state[GPT_HDR_PRIMARY] = GPT_HDR_CORRUPT; - hdr = softc->hdr + GPT_HDR_SECONDARY; - ofs = hdr->hdr_lba_table * pp->sectorsize; - buf = g_read_data(cp, ofs, nbytes, &error); - if (buf == NULL) - goto fail; - - if (!g_gpt_tbl_ok(hdr, buf)) { - g_free(buf); - printf("GEOM: %s: both primary and secondary GPT " - "tables are corrupt.\n", pp->name); - printf("GEOM: %s: GPT rejected -- may not be " - "recoverable.\n", pp->name); - goto fail; - } - printf("GEOM: %s: the primary GPT table is corrupt.\n", - pp->name); - printf("GEOM: %s: using the secondary table -- recovery " - "strongly advised.\n", pp->name); - } - - if (bootverbose) { - printf("GEOM: %s: GPT ", pp->name); - printf_uuid(&hdr->hdr_uuid); - printf(".\n"); - } - - g_gpt_load_tbl(gp, pp, hdr, buf); - g_free(buf); - g_topology_lock(); - g_access(cp, -1, 0, 0); - return (gp); - - fail: - g_topology_lock(); - g_access(cp, -1, 0, 0); - g_gpt_wither(gp, error); - return (NULL); -} - -/* - * Geom methods. - */ - -static int -g_gpt_access(struct g_provider *pp, int dr, int dw, int de) -{ - struct g_consumer *cp; - - G_GPT_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, - dw, de)); - - cp = LIST_FIRST(&pp->geom->consumer); - - /* We always gain write-exclusive access. */ - return (g_access(cp, dr, dw, dw + de)); -} - -static void -g_gpt_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, - struct g_consumer *cp, struct g_provider *pp) -{ - static char *status[5] = { - "unknown", "missing", "corrupt", "invalid", "ok" - }; - struct g_gpt_part *part; - struct g_gpt_softc *softc; - struct gpt_hdr *hdr; - - KASSERT(sb != NULL && gp != NULL, (__func__)); - - if (indent == NULL) { - KASSERT(cp == NULL && pp != NULL, (__func__)); - part = pp->private; - if (part == NULL) - return; - sbuf_printf(sb, " i %u o %ju ty ", pp->index, - (uintmax_t)part->offset); - sbuf_printf_uuid(sb, &part->ent.ent_type); - } else if (cp != NULL) { /* Consumer configuration. */ - KASSERT(pp == NULL, (__func__)); - /* none */ - } else if (pp != NULL) { /* Provider configuration. */ - part = pp->private; - if (part == NULL) - return; - sbuf_printf(sb, "%s%u\n", indent, pp->index); - sbuf_printf(sb, "%s", indent); - sbuf_printf_uuid(sb, &part->ent.ent_type); - sbuf_printf(sb, "\n"); - sbuf_printf(sb, "%s", indent); - sbuf_printf_uuid(sb, &part->ent.ent_uuid); - sbuf_printf(sb, "\n"); - sbuf_printf(sb, "%s%ju\n", indent, - (uintmax_t)part->offset); - sbuf_printf(sb, "%s%ju\n", indent, - (uintmax_t)pp->mediasize); - sbuf_printf(sb, "%s%ju\n", indent, - (uintmax_t)part->ent.ent_attr); - sbuf_printf(sb, "%s\n"); - } else { /* Geom configuration. */ - softc = gp->softc; - hdr = (softc->state[GPT_HDR_PRIMARY] == GPT_HDR_OK) - ? softc->hdr + GPT_HDR_PRIMARY - : softc->hdr + GPT_HDR_SECONDARY; - sbuf_printf(sb, "%s", indent); - sbuf_printf_uuid(sb, &hdr->hdr_uuid); - sbuf_printf(sb, "\n"); - sbuf_printf(sb, "%s%s\n", indent, - status[softc->state[GPT_HDR_PRIMARY]]); - sbuf_printf(sb, "%s%s\n", indent, - status[softc->state[GPT_HDR_SECONDARY]]); - sbuf_printf(sb, "%s%s\n", indent, - (hdr == softc->hdr + GPT_HDR_PRIMARY) ? "primary" : - "secondary"); - sbuf_printf(sb, "%s%u\n", indent, - hdr->hdr_revision); - sbuf_printf(sb, "%s%u\n", indent, - hdr->hdr_size); - sbuf_printf(sb, "%s%u\n", indent, - hdr->hdr_crc_self); - sbuf_printf(sb, "%s%ju\n", indent, - (uintmax_t)hdr->hdr_lba_self); - sbuf_printf(sb, "%s%ju\n", indent, - (uintmax_t)hdr->hdr_lba_alt); - sbuf_printf(sb, "%s%ju\n", indent, - (uintmax_t)hdr->hdr_lba_start); - sbuf_printf(sb, "%s%ju\n", indent, - (uintmax_t)hdr->hdr_lba_end); - sbuf_printf(sb, "%s%ju\n", indent, - (uintmax_t)hdr->hdr_lba_table); - sbuf_printf(sb, "%s%u\n", indent, - hdr->hdr_crc_table); - sbuf_printf(sb, "%s%u\n", indent, - hdr->hdr_entries); - sbuf_printf(sb, "%s%u\n", indent, - hdr->hdr_entsz); - } -} - -static void -g_gpt_orphan(struct g_consumer *cp) -{ - struct g_provider *pp; - - pp = cp->provider; - KASSERT(pp != NULL, (__func__)); - G_GPT_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); - g_topology_assert(); - - KASSERT(pp->error != 0, (__func__)); - g_gpt_wither(cp->geom, pp->error); -} - -static void -g_gpt_spoiled(struct g_consumer *cp) -{ - - G_GPT_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); - g_topology_assert(); - - g_gpt_wither(cp->geom, ENXIO); -} - -static void -g_gpt_start(struct bio *bp) -{ - struct bio *bp2; - struct g_consumer *cp; - struct g_geom *gp; - struct g_gpt_part *part; - struct g_kerneldump *gkd; - struct g_provider *pp; - - pp = bp->bio_to; - gp = pp->geom; - cp = LIST_FIRST(&gp->consumer); - - G_GPT_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, - pp->name)); - - part = pp->private; - if (part == NULL) { - g_io_deliver(bp, ENXIO); - return; - } - - switch(bp->bio_cmd) { - case BIO_READ: - case BIO_WRITE: - case BIO_DELETE: - if (bp->bio_offset >= pp->mediasize) { - g_io_deliver(bp, EIO); - break; - } - bp2 = g_clone_bio(bp); - if (bp2 == NULL) { - g_io_deliver(bp, ENOMEM); - break; - } - if (bp2->bio_offset + bp2->bio_length > pp->mediasize) - bp2->bio_length = pp->mediasize - bp2->bio_offset; - bp2->bio_done = g_std_done; - bp2->bio_offset += part->offset; - g_io_request(bp2, cp); - break; - case BIO_GETATTR: - if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { - /* - * Refuse non-swap partitions to be used as kernel - * dumps. - */ - if (memcmp(&part->ent.ent_type, &g_gpt_freebsd_swap, - sizeof(struct uuid)) && memcmp(&part->ent.ent_type, - &g_gpt_linux_swap, sizeof(struct uuid))) { - g_io_deliver(bp, ENXIO); - break; - } - gkd = (struct g_kerneldump *)bp->bio_data; - if (gkd->offset >= pp->mediasize) { - g_io_deliver(bp, EIO); - break; - } - if (gkd->offset + gkd->length > pp->mediasize) - gkd->length = pp->mediasize - gkd->offset; - gkd->offset += part->offset; - /* FALLTHROUGH */ - } - /* FALLTHROUGH */ - case BIO_FLUSH: - bp2 = g_clone_bio(bp); - if (bp2 == NULL) { - g_io_deliver(bp, ENOMEM); - break; - } - bp2->bio_done = g_std_done; - g_io_request(bp2, cp); - break; - default: - g_io_deliver(bp, EOPNOTSUPP); - break; - } -} diff --git a/sys/geom/part/g_part.c b/sys/geom/part/g_part.c new file mode 100644 index 0000000..2293e21 --- /dev/null +++ b/sys/geom/part/g_part.c @@ -0,0 +1,1385 @@ +/*- + * Copyright (c) 2002, 2005, 2006, 2007 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "g_part_if.h" + +static kobj_method_t g_part_null_methods[] = { + { 0, 0 } +}; + +static struct g_part_scheme g_part_null_scheme = { + NULL, + g_part_null_methods, + sizeof(struct g_part_table), +}; +G_PART_SCHEME_DECLARE(g_part_null_scheme); + +SET_DECLARE(g_part_scheme_set, struct g_part_scheme); + +struct g_part_alias_list { + const char *lexeme; + enum g_part_alias alias; +} g_part_alias_list[G_PART_ALIAS_COUNT] = { + { "@efi", G_PART_ALIAS_EFI }, + { "@freebsd", G_PART_ALIAS_FREEBSD }, + { "@freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, + { "@freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, + { "@freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, + { "@mbr", G_PART_ALIAS_MBR } +}; + +/* + * The GEOM partitioning class. + */ +static g_ctl_req_t g_part_ctlreq; +static g_ctl_destroy_geom_t g_part_destroy_geom; +static g_taste_t g_part_taste; + +static g_access_t g_part_access; +static g_dumpconf_t g_part_dumpconf; +static g_orphan_t g_part_orphan; +static g_spoiled_t g_part_spoiled; +static g_start_t g_part_start; + +static struct g_class g_part_class = { + .name = "PART", + .version = G_VERSION, + /* Class methods. */ + .ctlreq = g_part_ctlreq, + .destroy_geom = g_part_destroy_geom, + .taste = g_part_taste, + /* Geom methods. */ + .access = g_part_access, + .dumpconf = g_part_dumpconf, + .orphan = g_part_orphan, + .spoiled = g_part_spoiled, + .start = g_part_start, +}; + +DECLARE_GEOM_CLASS(g_part_class, g_part); + +enum g_part_ctl { + G_PART_CTL_NONE, + G_PART_CTL_ADD, + G_PART_CTL_COMMIT, + G_PART_CTL_CREATE, + G_PART_CTL_DELETE, + G_PART_CTL_DESTROY, + G_PART_CTL_MODIFY, + G_PART_CTL_MOVE, + G_PART_CTL_QUERY, + G_PART_CTL_RECOVER, + G_PART_CTL_RESIZE, + G_PART_CTL_UNDO +}; + +/* + * Support functions. + */ + +static void g_part_wither(struct g_geom *, int); + +const char * +g_part_alias_name(enum g_part_alias alias) +{ + int i; + + for (i = 0; i < G_PART_ALIAS_COUNT; i++) { + if (g_part_alias_list[i].alias != alias) + continue; + return (g_part_alias_list[i].lexeme); + } + + return (NULL); +} + +struct g_part_entry * +g_part_new_entry(struct g_part_table *table, int index, quad_t start, + quad_t end) +{ + struct g_part_entry *entry, *last; + + last = NULL; + LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { + if (entry->gpe_index == index) + break; + if (entry->gpe_index > index) { + entry = NULL; + break; + } + last = entry; + } + if (entry == NULL) { + entry = g_malloc(table->gpt_scheme->gps_entrysz, + M_WAITOK | M_ZERO); + entry->gpe_index = index; + if (last == NULL) + LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); + else + LIST_INSERT_AFTER(last, entry, gpe_entry); + } + entry->gpe_start = start; + entry->gpe_end = end; + return (entry); +} + +static void +g_part_new_provider(struct g_geom *gp, struct g_part_table *table, + struct g_part_entry *entry) +{ + char buf[32]; + struct g_consumer *cp; + struct g_provider *pp; + + cp = LIST_FIRST(&gp->consumer); + pp = cp->provider; + + entry->gpe_offset = entry->gpe_start * pp->sectorsize; + + if (entry->gpe_pp == NULL) { + entry->gpe_pp = g_new_providerf(gp, "%s%s", gp->name, + G_PART_NAME(table, entry, buf, sizeof(buf))); + entry->gpe_pp->private = entry; /* Close the circle. */ + } + entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ + entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * + pp->sectorsize; + entry->gpe_pp->sectorsize = pp->sectorsize; + entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE; + if (pp->stripesize > 0) { + entry->gpe_pp->stripesize = pp->stripesize; + entry->gpe_pp->stripeoffset = (pp->stripeoffset + + entry->gpe_offset) % pp->stripesize; + } + g_error_provider(entry->gpe_pp, 0); +} + +static int +g_part_parm_geom(const char *p, struct g_geom **v) +{ + struct g_geom *gp; + + LIST_FOREACH(gp, &g_part_class.geom, geom) { + if (!strcmp(p, gp->name)) + break; + } + if (gp == NULL) + return (EINVAL); + *v = gp; + return (0); +} + +static int +g_part_parm_provider(const char *p, struct g_provider **v) +{ + struct g_provider *pp; + + pp = g_provider_by_name(p); + if (pp == NULL) + return (EINVAL); + *v = pp; + return (0); +} + +static int +g_part_parm_quad(const char *p, quad_t *v) +{ + char *x; + quad_t q; + + q = strtoq(p, &x, 0); + if (*x != '\0' || q < 0) + return (EINVAL); + *v = q; + return (0); +} + +static int +g_part_parm_scheme(const char *p, struct g_part_scheme **v) +{ + struct g_part_scheme **iter, *s; + + s = NULL; + SET_FOREACH(iter, g_part_scheme_set) { + if ((*iter)->name == NULL) + continue; + if (!strcmp((*iter)->name, p)) { + s = *iter; + break; + } + } + if (s == NULL) + return (EINVAL); + *v = s; + return (0); +} + +static int +g_part_parm_str(const char *p, const char **v) +{ + + if (p[0] == '\0') + return (EINVAL); + *v = p; + return (0); +} + +static int +g_part_parm_uint(const char *p, u_int *v) +{ + char *x; + long l; + + l = strtol(p, &x, 0); + if (*x != '\0' || l < 0 || l > INT_MAX) + return (EINVAL); + *v = (unsigned int)l; + return (0); +} + +static int +g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) +{ + struct g_part_scheme **iter, *scheme; + struct g_part_table *table; + int pri, probe; + + table = gp->softc; + scheme = (table != NULL) ? table->gpt_scheme : &g_part_null_scheme; + pri = (scheme != &g_part_null_scheme) ? G_PART_PROBE(table, cp) : + INT_MIN; + if (pri == 0) + goto done; + if (pri > 0) { /* error */ + scheme = &g_part_null_scheme; + pri = INT_MIN; + } + + SET_FOREACH(iter, g_part_scheme_set) { + if ((*iter) == &g_part_null_scheme) + continue; + table = (void *)kobj_create((kobj_class_t)(*iter), M_GEOM, + M_WAITOK); + table->gpt_gp = gp; + table->gpt_scheme = *iter; + table->gpt_depth = depth; + probe = G_PART_PROBE(table, cp); + if (probe <= 0 && probe > pri) { + pri = probe; + scheme = *iter; + if (gp->softc != NULL) + kobj_delete((kobj_t)gp->softc, M_GEOM); + gp->softc = table; + if (pri == 0) + goto done; + } else + kobj_delete((kobj_t)table, M_GEOM); + } + +done: + return ((scheme == &g_part_null_scheme) ? ENXIO : 0); +} + +/* + * Control request functions. + */ + +static int +g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) +{ + char buf[16]; + struct g_geom *gp; + struct g_provider *pp; + struct g_part_entry *delent, *last, *entry; + struct g_part_table *table; + quad_t end; + unsigned int index; + int error; + + gp = gpp->gpp_geom; + G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); + g_topology_assert(); + + pp = LIST_FIRST(&gp->consumer)->provider; + table = gp->softc; + end = gpp->gpp_start + gpp->gpp_size - 1; + + if (gpp->gpp_start < table->gpt_first || + gpp->gpp_start > table->gpt_last) { + gctl_error(req, "%d start '%jd'", EINVAL, + (intmax_t)gpp->gpp_start); + return (EINVAL); + } + if (end < gpp->gpp_start || end > table->gpt_last) { + gctl_error(req, "%d size '%jd'", EINVAL, + (intmax_t)gpp->gpp_size); + return (EINVAL); + } + if (gpp->gpp_index > table->gpt_entries) { + gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); + return (EINVAL); + } + + delent = last = NULL; + index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; + LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { + if (entry->gpe_deleted) { + if (entry->gpe_index == index) + delent = entry; + continue; + } + if (entry->gpe_index == index) { + index = entry->gpe_index + 1; + last = entry; + } + if (gpp->gpp_start >= entry->gpe_start && + gpp->gpp_start <= entry->gpe_end) { + gctl_error(req, "%d start '%jd'", ENOSPC, + (intmax_t)gpp->gpp_start); + return (ENOSPC); + } + if (end >= entry->gpe_start && end <= entry->gpe_end) { + gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); + return (ENOSPC); + } + if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { + gctl_error(req, "%d size '%jd'", ENOSPC, + (intmax_t)gpp->gpp_size); + return (ENOSPC); + } + } + if (gpp->gpp_index > 0 && index != gpp->gpp_index) { + gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); + return (EEXIST); + } + snprintf(buf, sizeof(buf), "%d", index); + gctl_set_param(req, "index", buf, strlen(buf) + 1); + + entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, + M_WAITOK | M_ZERO) : delent; + entry->gpe_index = index; + entry->gpe_start = gpp->gpp_start; + entry->gpe_end = end; + error = G_PART_ADD(table, entry, gpp); + if (error) { + gctl_error(req, "%d", error); + if (delent == NULL) + g_free(entry); + return (error); + } + if (delent == NULL) { + if (last == NULL) + LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); + else + LIST_INSERT_AFTER(last, entry, gpe_entry); + entry->gpe_created = 1; + } else { + entry->gpe_deleted = 0; + entry->gpe_modified = 1; + } + g_part_new_provider(gp, table, entry); + return (0); +} + +static int +g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) +{ + struct g_consumer *cp; + struct g_geom *gp; + struct g_provider *pp; + struct g_part_entry *entry, *tmp; + struct g_part_table *table; + char *buf; + int error, i; + + gp = gpp->gpp_geom; + G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); + g_topology_assert(); + + table = gp->softc; + if (!table->gpt_opened) { + gctl_error(req, "%d", EPERM); + return (EPERM); + } + + cp = LIST_FIRST(&gp->consumer); + if ((table->gpt_smhead | table->gpt_smtail) != 0) { + pp = cp->provider; + buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); + while (table->gpt_smhead != 0) { + i = ffs(table->gpt_smhead) - 1; + error = g_write_data(cp, i * pp->sectorsize, buf, + pp->sectorsize); + if (error) { + g_free(buf); + goto fail; + } + table->gpt_smhead &= ~(1 << i); + } + while (table->gpt_smtail != 0) { + i = ffs(table->gpt_smtail) - 1; + error = g_write_data(cp, pp->mediasize - (i + 1) * + pp->sectorsize, buf, pp->sectorsize); + if (error) { + g_free(buf); + goto fail; + } + table->gpt_smtail &= ~(1 << i); + } + g_free(buf); + } + + if (table->gpt_scheme == &g_part_null_scheme) { + g_access(cp, -1, -1, -1); + g_part_wither(gp, ENXIO); + return (0); + } + + error = G_PART_WRITE(table, cp); + if (error) + goto fail; + + LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { + if (!entry->gpe_deleted) { + entry->gpe_created = 0; + entry->gpe_modified = 0; + continue; + } + LIST_REMOVE(entry, gpe_entry); + g_free(entry); + } + table->gpt_created = 0; + table->gpt_opened = 0; + g_access(cp, -1, -1, -1); + return (0); + +fail: + gctl_error(req, "%d", error); + return (error); +} + +static int +g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) +{ + struct g_consumer *cp; + struct g_geom *gp; + struct g_provider *pp; + struct g_part_scheme *scheme; + struct g_part_table *null, *table; + int attr, error; + + pp = gpp->gpp_provider; + scheme = gpp->gpp_scheme; + G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); + g_topology_assert(); + + /* Check that there isn't already a g_part geom on the provider. */ + error = g_part_parm_geom(pp->name, &gp); + if (!error) { + null = gp->softc; + if (null->gpt_scheme != &g_part_null_scheme) { + gctl_error(req, "%d geom '%s'", EEXIST, pp->name); + return (EEXIST); + } + } else + null = NULL; + + if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && + (gpp->gpp_entries < scheme->gps_minent || + gpp->gpp_entries > scheme->gps_maxent)) { + gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); + return (EINVAL); + } + + if (null == NULL) + gp = g_new_geomf(&g_part_class, "%s", pp->name); + gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, + M_WAITOK); + table = gp->softc; + table->gpt_gp = gp; + table->gpt_scheme = gpp->gpp_scheme; + table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? + gpp->gpp_entries : scheme->gps_minent; + LIST_INIT(&table->gpt_entry); + if (null == NULL) { + cp = g_new_consumer(gp); + error = g_attach(cp, pp); + if (error == 0) + error = g_access(cp, 1, 1, 1); + if (error != 0) { + g_part_wither(gp, error); + gctl_error(req, "%d geom '%s'", error, pp->name); + return (error); + } + table->gpt_opened = 1; + } else { + cp = LIST_FIRST(&gp->consumer); + table->gpt_opened = null->gpt_opened; + table->gpt_smhead = null->gpt_smhead; + table->gpt_smtail = null->gpt_smtail; + } + + g_topology_unlock(); + + /* Make sure we can nest and if so, determine our depth. */ + error = g_getattr("PART::isleaf", cp, &attr); + if (!error && attr) { + error = ENODEV; + goto fail; + } + error = g_getattr("PART::depth", cp, &attr); + table->gpt_depth = (!error) ? attr + 1 : 0; + + error = G_PART_CREATE(table, gpp); + if (error) + goto fail; + + g_topology_lock(); + + table->gpt_created = 1; + if (null != NULL) + kobj_delete((kobj_t)null, M_GEOM); + return (0); + +fail: + g_topology_lock(); + if (null == NULL) { + g_access(cp, -1, -1, -1); + g_part_wither(gp, error); + } else { + kobj_delete((kobj_t)gp->softc, M_GEOM); + gp->softc = null; + } + gctl_error(req, "%d provider", error); + return (error); +} + +static int +g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) +{ + struct g_geom *gp; + struct g_provider *pp; + struct g_part_entry *entry; + struct g_part_table *table; + + gp = gpp->gpp_geom; + G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); + g_topology_assert(); + + table = gp->softc; + + LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { + if (entry->gpe_deleted) + continue; + if (entry->gpe_index == gpp->gpp_index) + break; + } + if (entry == NULL) { + gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); + return (ENOENT); + } + + pp = entry->gpe_pp; + if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { + gctl_error(req, "%d", EBUSY); + return (EBUSY); + } + + pp->private = NULL; + entry->gpe_pp = NULL; + if (entry->gpe_created) { + LIST_REMOVE(entry, gpe_entry); + g_free(entry); + } else { + entry->gpe_modified = 0; + entry->gpe_deleted = 1; + } + g_wither_provider(pp, ENXIO); + return (0); +} + +static int +g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) +{ + struct g_geom *gp; + struct g_part_entry *entry; + struct g_part_table *null, *table; + int error; + + gp = gpp->gpp_geom; + G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); + g_topology_assert(); + + table = gp->softc; + LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { + if (entry->gpe_deleted) + continue; + gctl_error(req, "%d", EBUSY); + return (EBUSY); + } + + error = G_PART_DESTROY(table, gpp); + if (error) { + gctl_error(req, "%d", error); + return (error); + } + + gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, + M_WAITOK); + null = gp->softc; + null->gpt_gp = gp; + null->gpt_scheme = &g_part_null_scheme; + LIST_INIT(&null->gpt_entry); + null->gpt_depth = table->gpt_depth; + null->gpt_opened = table->gpt_opened; + null->gpt_smhead = table->gpt_smhead; + null->gpt_smtail = table->gpt_smtail; + + while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { + LIST_REMOVE(entry, gpe_entry); + g_free(entry); + } + kobj_delete((kobj_t)table, M_GEOM); + + return (0); +} + +static int +g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) +{ + struct g_geom *gp; + struct g_part_entry *entry; + struct g_part_table *table; + int error; + + gp = gpp->gpp_geom; + G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); + g_topology_assert(); + + table = gp->softc; + + LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { + if (entry->gpe_deleted) + continue; + if (entry->gpe_index == gpp->gpp_index) + break; + } + if (entry == NULL) { + gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); + return (ENOENT); + } + + error = G_PART_MODIFY(table, entry, gpp); + if (error) { + gctl_error(req, "%d", error); + return (error); + } + + if (!entry->gpe_created) + entry->gpe_modified = 1; + return (0); +} + +static int +g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) +{ + gctl_error(req, "%d verb 'move'", ENOSYS); + return (ENOSYS); +} + +static int +g_part_ctl_query(struct gctl_req *req, struct g_part_parms *gpp) +{ + gctl_error(req, "%d verb 'query'", ENOSYS); + return (ENOSYS); +} + +static int +g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) +{ + gctl_error(req, "%d verb 'recover'", ENOSYS); + return (ENOSYS); +} + +static int +g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) +{ + gctl_error(req, "%d verb 'resize'", ENOSYS); + return (ENOSYS); +} + +static int +g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) +{ + struct g_consumer *cp; + struct g_provider *pp; + struct g_geom *gp; + struct g_part_entry *entry, *tmp; + struct g_part_table *table; + int error, reprobe; + + gp = gpp->gpp_geom; + G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); + g_topology_assert(); + + table = gp->softc; + if (!table->gpt_opened) { + gctl_error(req, "%d", EPERM); + return (EPERM); + } + + cp = LIST_FIRST(&gp->consumer); + LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { + entry->gpe_modified = 0; + if (entry->gpe_created) { + pp = entry->gpe_pp; + pp->private = NULL; + entry->gpe_pp = NULL; + g_wither_provider(pp, ENXIO); + entry->gpe_deleted = 1; + } + if (entry->gpe_deleted) { + LIST_REMOVE(entry, gpe_entry); + g_free(entry); + } + } + + g_topology_unlock(); + + reprobe = (table->gpt_scheme == &g_part_null_scheme || + table->gpt_created) ? 1 : 0; + + if (reprobe) { + if (!LIST_EMPTY(&table->gpt_entry)) { + error = EBUSY; + goto fail; + } + error = g_part_probe(gp, cp, table->gpt_depth); + if (error) + goto fail; + table = gp->softc; + } + + error = G_PART_READ(table, cp); + if (error) + goto fail; + + g_topology_lock(); + + LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) + g_part_new_provider(gp, table, entry); + + table->gpt_opened = 0; + g_access(cp, -1, -1, -1); + return (0); + +fail: + g_topology_lock(); + gctl_error(req, "%d", error); + return (error); +} + +static void +g_part_wither(struct g_geom *gp, int error) +{ + struct g_part_entry *entry; + struct g_part_table *table; + + table = gp->softc; + if (table != NULL) { + while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { + LIST_REMOVE(entry, gpe_entry); + g_free(entry); + } + if (gp->softc != NULL) { + kobj_delete((kobj_t)gp->softc, M_GEOM); + gp->softc = NULL; + } + } + g_wither_geom(gp, error); +} + +/* + * Class methods. + */ + +static void +g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) +{ + struct g_part_parms gpp; + struct g_part_table *table; + struct gctl_req_arg *ap; + const char *p; + enum g_part_ctl ctlreq; + unsigned int i, mparms, oparms, parm; + int error, modifies; + + G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); + g_topology_assert(); + + ctlreq = G_PART_CTL_NONE; + modifies = 0; + mparms = oparms = 0; + switch (*verb) { + case 'a': + if (!strcmp(verb, "add")) { + ctlreq = G_PART_CTL_ADD; + modifies = 1; + mparms = G_PART_PARM_GEOM | G_PART_PARM_SIZE | + G_PART_PARM_START | G_PART_PARM_TYPE; + oparms = G_PART_PARM_FLAGS | G_PART_PARM_INDEX | + G_PART_PARM_LABEL; + } + break; + case 'c': + if (!strcmp(verb, "commit")) { + ctlreq = G_PART_CTL_COMMIT; + mparms = G_PART_PARM_GEOM; + oparms = G_PART_PARM_FLAGS; + } else if (!strcmp(verb, "create")) { + ctlreq = G_PART_CTL_CREATE; + modifies = 1; + mparms = G_PART_PARM_PROVIDER | + G_PART_PARM_SCHEME; + oparms = G_PART_PARM_ENTRIES | G_PART_PARM_FLAGS; + } + break; + case 'd': + if (!strcmp(verb, "delete")) { + ctlreq = G_PART_CTL_DELETE; + modifies = 1; + mparms = G_PART_PARM_GEOM | G_PART_PARM_INDEX; + oparms = G_PART_PARM_FLAGS; + } else if (!strcmp(verb, "destroy")) { + ctlreq = G_PART_CTL_DESTROY; + modifies = 1; + mparms = G_PART_PARM_GEOM; + oparms = G_PART_PARM_FLAGS; + } + break; + case 'm': + if (!strcmp(verb, "modify")) { + ctlreq = G_PART_CTL_MODIFY; + modifies = 1; + mparms = G_PART_PARM_GEOM | G_PART_PARM_INDEX; + oparms = G_PART_PARM_FLAGS | G_PART_PARM_LABEL | + G_PART_PARM_TYPE; + } else if (!strcmp(verb, "move")) { + ctlreq = G_PART_CTL_MOVE; + modifies = 1; + mparms = G_PART_PARM_GEOM | G_PART_PARM_INDEX; + oparms = G_PART_PARM_FLAGS; + } + break; + case 'q': + if (!strcmp(verb, "query")) { + ctlreq = G_PART_CTL_QUERY; + mparms = G_PART_PARM_REQUEST | G_PART_PARM_RESPONSE; + oparms = G_PART_PARM_FLAGS | G_PART_PARM_GEOM; + } + break; + case 'r': + if (!strcmp(verb, "recover")) { + ctlreq = G_PART_CTL_RECOVER; + modifies = 1; + mparms = G_PART_PARM_GEOM; + oparms = G_PART_PARM_FLAGS; + } else if (!strcmp(verb, "resize")) { + ctlreq = G_PART_CTL_RESIZE; + modifies = 1; + mparms = G_PART_PARM_GEOM | G_PART_PARM_INDEX; + oparms = G_PART_PARM_FLAGS; + } + break; + case 'u': + if (!strcmp(verb, "undo")) { + ctlreq = G_PART_CTL_UNDO; + mparms = G_PART_PARM_GEOM; + oparms = G_PART_PARM_FLAGS; + } + break; + } + if (ctlreq == G_PART_CTL_NONE) { + gctl_error(req, "%d verb '%s'", EINVAL, verb); + return; + } + + bzero(&gpp, sizeof(gpp)); + for (i = 0; i < req->narg; i++) { + ap = &req->arg[i]; + parm = 0; + switch (ap->name[0]) { + case 'c': + if (!strcmp(ap->name, "class")) + continue; + break; + case 'e': + if (!strcmp(ap->name, "entries")) + parm = G_PART_PARM_ENTRIES; + break; + case 'f': + if (!strcmp(ap->name, "flags")) + parm = G_PART_PARM_FLAGS; + break; + case 'g': + if (!strcmp(ap->name, "geom")) + parm = G_PART_PARM_GEOM; + break; + case 'i': + if (!strcmp(ap->name, "index")) + parm = G_PART_PARM_INDEX; + break; + case 'l': + if (!strcmp(ap->name, "label")) + parm = G_PART_PARM_LABEL; + break; + case 'p': + if (!strcmp(ap->name, "provider")) + parm = G_PART_PARM_PROVIDER; + break; + case 'r': + if (!strcmp(ap->name, "request")) + parm = G_PART_PARM_REQUEST; + else if (!strcmp(ap->name, "response")) + parm = G_PART_PARM_RESPONSE; + break; + case 's': + if (!strcmp(ap->name, "scheme")) + parm = G_PART_PARM_SCHEME; + else if (!strcmp(ap->name, "size")) + parm = G_PART_PARM_SIZE; + else if (!strcmp(ap->name, "start")) + parm = G_PART_PARM_START; + break; + case 't': + if (!strcmp(ap->name, "type")) + parm = G_PART_PARM_TYPE; + break; + case 'v': + if (!strcmp(ap->name, "verb")) + continue; + break; + } + if ((parm & (mparms | oparms)) == 0) { + gctl_error(req, "%d param '%s'", EINVAL, ap->name); + return; + } + p = gctl_get_asciiparam(req, ap->name); + if (p == NULL) { + gctl_error(req, "%d param '%s'", ENOATTR, ap->name); + return; + } + switch (parm) { + case G_PART_PARM_ENTRIES: + error = g_part_parm_uint(p, &gpp.gpp_entries); + break; + case G_PART_PARM_FLAGS: + error = g_part_parm_str(p, &gpp.gpp_flags); + break; + case G_PART_PARM_GEOM: + error = g_part_parm_geom(p, &gpp.gpp_geom); + break; + case G_PART_PARM_INDEX: + error = g_part_parm_uint(p, &gpp.gpp_index); + break; + case G_PART_PARM_LABEL: + error = g_part_parm_str(p, &gpp.gpp_label); + break; + case G_PART_PARM_PROVIDER: + error = g_part_parm_provider(p, &gpp.gpp_provider); + break; + case G_PART_PARM_REQUEST: + error = g_part_parm_str(p, &gpp.gpp_request); + break; + case G_PART_PARM_RESPONSE: + error = 0; /* Write-only parameter. */ + break; + case G_PART_PARM_SCHEME: + error = g_part_parm_scheme(p, &gpp.gpp_scheme); + break; + case G_PART_PARM_SIZE: + error = g_part_parm_quad(p, &gpp.gpp_size); + break; + case G_PART_PARM_START: + error = g_part_parm_quad(p, &gpp.gpp_start); + break; + case G_PART_PARM_TYPE: + error = g_part_parm_str(p, &gpp.gpp_type); + break; + default: + error = EDOOFUS; + break; + } + if (error) { + gctl_error(req, "%d %s '%s'", error, ap->name, p); + return; + } + gpp.gpp_parms |= parm; + } + if ((gpp.gpp_parms & mparms) != mparms) { + parm = mparms - (gpp.gpp_parms & mparms); + gctl_error(req, "%d param '%x'", ENOATTR, parm); + return; + } + + /* Obtain permissions if possible/necessary. */ + if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { + table = gpp.gpp_geom->softc; + if (table != NULL && !table->gpt_opened) { + error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), + 1, 1, 1); + if (error) { + gctl_error(req, "%d geom '%s'", error, + gpp.gpp_geom->name); + return; + } + table->gpt_opened = 1; + } + } + + error = EDOOFUS; /* Prevent bogus uninit. warning. */ + switch (ctlreq) { + case G_PART_CTL_NONE: + panic("%s", __func__); + case G_PART_CTL_ADD: + error = g_part_ctl_add(req, &gpp); + break; + case G_PART_CTL_COMMIT: + error = g_part_ctl_commit(req, &gpp); + break; + case G_PART_CTL_CREATE: + error = g_part_ctl_create(req, &gpp); + break; + case G_PART_CTL_DELETE: + error = g_part_ctl_delete(req, &gpp); + break; + case G_PART_CTL_DESTROY: + error = g_part_ctl_destroy(req, &gpp); + break; + case G_PART_CTL_MODIFY: + error = g_part_ctl_modify(req, &gpp); + break; + case G_PART_CTL_MOVE: + error = g_part_ctl_move(req, &gpp); + break; + case G_PART_CTL_QUERY: + error = g_part_ctl_query(req, &gpp); + break; + case G_PART_CTL_RECOVER: + error = g_part_ctl_recover(req, &gpp); + break; + case G_PART_CTL_RESIZE: + error = g_part_ctl_resize(req, &gpp); + break; + case G_PART_CTL_UNDO: + error = g_part_ctl_undo(req, &gpp); + break; + } +} + +static int +g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, + struct g_geom *gp) +{ + + G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); + g_topology_assert(); + + g_part_wither(gp, EINVAL); + return (0); +} + +static struct g_geom * +g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) +{ + struct g_consumer *cp; + struct g_geom *gp; + struct g_part_entry *entry; + struct g_part_table *table; + int attr, depth; + int error; + + G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); + g_topology_assert(); + + /* + * Create a GEOM with consumer and hook it up to the provider. + * With that we become part of the topology. Optain read access + * to the provider. + */ + gp = g_new_geomf(mp, "%s", pp->name); + cp = g_new_consumer(gp); + error = g_attach(cp, pp); + if (error == 0) + error = g_access(cp, 1, 0, 0); + if (error != 0) { + g_part_wither(gp, error); + return (NULL); + } + + g_topology_unlock(); + + /* Make sure we can nest and if so, determine our depth. */ + error = g_getattr("PART::isleaf", cp, &attr); + if (!error && attr) { + error = ENODEV; + goto fail; + } + error = g_getattr("PART::depth", cp, &attr); + depth = (!error) ? attr + 1 : 0; + + error = g_part_probe(gp, cp, depth); + if (error) + goto fail; + + table = gp->softc; + error = G_PART_READ(table, cp); + if (error) + goto fail; + + g_topology_lock(); + LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) + g_part_new_provider(gp, table, entry); + + g_access(cp, -1, 0, 0); + return (gp); + + fail: + g_topology_lock(); + g_access(cp, -1, 0, 0); + g_part_wither(gp, error); + return (NULL); +} + +/* + * Geom methods. + */ + +static int +g_part_access(struct g_provider *pp, int dr, int dw, int de) +{ + struct g_consumer *cp; + + G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, + dw, de)); + + cp = LIST_FIRST(&pp->geom->consumer); + + /* We always gain write-exclusive access. */ + return (g_access(cp, dr, dw, dw + de)); +} + +static void +g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, + struct g_consumer *cp, struct g_provider *pp) +{ + char buf[64]; + struct g_part_entry *entry; + struct g_part_table *table; + + KASSERT(sb != NULL && gp != NULL, (__func__)); + table = gp->softc; + + if (indent == NULL) { + KASSERT(cp == NULL && pp != NULL, (__func__)); + entry = pp->private; + if (entry == NULL) + return; + sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, + (uintmax_t)entry->gpe_offset, + G_PART_TYPE(table, entry, buf, sizeof(buf))); + } else if (cp != NULL) { /* Consumer configuration. */ + KASSERT(pp == NULL, (__func__)); + /* none */ + } else if (pp != NULL) { /* Provider configuration. */ + entry = pp->private; + if (entry == NULL) + return; + sbuf_printf(sb, "%s%u\n", indent, + entry->gpe_index); + sbuf_printf(sb, "%s%s\n", indent, + G_PART_TYPE(table, entry, buf, sizeof(buf))); + sbuf_printf(sb, "%s%ju\n", indent, + (uintmax_t)entry->gpe_offset); + sbuf_printf(sb, "%s%ju\n", indent, + (uintmax_t)pp->mediasize); + G_PART_DUMPCONF(table, entry, sb, indent); + } else { /* Geom configuration. */ + sbuf_printf(sb, "%s%u\n", indent, + table->gpt_entries); + sbuf_printf(sb, "%s%ju\n", indent, + (uintmax_t)table->gpt_first); + sbuf_printf(sb, "%s%ju\n", indent, + (uintmax_t)table->gpt_last); + G_PART_DUMPCONF(table, NULL, sb, indent); + } +} + +static void +g_part_orphan(struct g_consumer *cp) +{ + struct g_provider *pp; + + pp = cp->provider; + KASSERT(pp != NULL, (__func__)); + G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); + g_topology_assert(); + + KASSERT(pp->error != 0, (__func__)); + g_part_wither(cp->geom, pp->error); +} + +static void +g_part_spoiled(struct g_consumer *cp) +{ + + G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); + g_topology_assert(); + + g_part_wither(cp->geom, ENXIO); +} + +static void +g_part_start(struct bio *bp) +{ + struct bio *bp2; + struct g_consumer *cp; + struct g_geom *gp; + struct g_part_entry *entry; + struct g_part_table *table; + struct g_kerneldump *gkd; + struct g_provider *pp; + int attr; + + pp = bp->bio_to; + gp = pp->geom; + table = gp->softc; + cp = LIST_FIRST(&gp->consumer); + + G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, + pp->name)); + + entry = pp->private; + if (entry == NULL) { + g_io_deliver(bp, ENXIO); + return; + } + + switch(bp->bio_cmd) { + case BIO_DELETE: + case BIO_READ: + case BIO_WRITE: + if (bp->bio_offset >= pp->mediasize) { + g_io_deliver(bp, EIO); + return; + } + bp2 = g_clone_bio(bp); + if (bp2 == NULL) { + g_io_deliver(bp, ENOMEM); + return; + } + if (bp2->bio_offset + bp2->bio_length > pp->mediasize) + bp2->bio_length = pp->mediasize - bp2->bio_offset; + bp2->bio_done = g_std_done; + bp2->bio_offset += entry->gpe_offset; + g_io_request(bp2, cp); + return; + case BIO_FLUSH: + break; + case BIO_GETATTR: + if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { + /* + * Check that the partition is suitable for kernel + * dumps. Typically only swap partitions should be + * used. + */ + if (!G_PART_DUMPTO(table, entry)) { + g_io_deliver(bp, ENXIO); + return; + } + gkd = (struct g_kerneldump *)bp->bio_data; + if (gkd->offset >= pp->mediasize) { + g_io_deliver(bp, EIO); + return; + } + if (gkd->offset + gkd->length > pp->mediasize) + gkd->length = pp->mediasize - gkd->offset; + gkd->offset += entry->gpe_offset; + } else if (!strcmp("PART::isleaf", bp->bio_attribute)) { + if (bp->bio_length != sizeof(int)) { + g_io_deliver(bp, EFAULT); + return; + } + attr = table->gpt_isleaf ? 1 : 0; + bcopy(&attr, bp->bio_data, sizeof(int)); + bp->bio_completed = sizeof(int); + g_io_deliver(bp, 0); + return; + } else if (!strcmp("PART::depth", bp->bio_attribute)) { + if (bp->bio_length != sizeof(int)) { + g_io_deliver(bp, EFAULT); + return; + } + bcopy(&table->gpt_depth, bp->bio_data, sizeof(int)); + bp->bio_completed = sizeof(int); + g_io_deliver(bp, 0); + return; + } + break; + default: + g_io_deliver(bp, EOPNOTSUPP); + return; + } + + bp2 = g_clone_bio(bp); + if (bp2 == NULL) { + g_io_deliver(bp, ENOMEM); + return; + } + bp2->bio_done = g_std_done; + g_io_request(bp2, cp); +} diff --git a/sys/geom/part/g_part.h b/sys/geom/part/g_part.h new file mode 100644 index 0000000..bd697b7 --- /dev/null +++ b/sys/geom/part/g_part.h @@ -0,0 +1,130 @@ +/*- + * Copyright (c) 2006, 2007 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _GEOM_PART_H_ +#define _GEOM_PART_H_ + +#define G_PART_TRACE(args) g_trace args + +#define G_PART_PROBE_PRI_LOW -10 +#define G_PART_PROBE_PRI_NORM -5 +#define G_PART_PROBE_PRI_HIGH 0 + +enum g_part_alias { + G_PART_ALIAS_EFI, /* A EFI system partition entry. */ + G_PART_ALIAS_FREEBSD, /* A BSD labeled partition entry. */ + G_PART_ALIAS_FREEBSD_SWAP, /* A swap partition entry. */ + G_PART_ALIAS_FREEBSD_UFS, /* A UFS/UFS2 file system entry. */ + G_PART_ALIAS_FREEBSD_VINUM, /* A Vinum partition entry. */ + G_PART_ALIAS_MBR, /* A MBR (extended) partition entry. */ + /* Keep the following last */ + G_PART_ALIAS_COUNT +}; + +const char *g_part_alias_name(enum g_part_alias); + +/* G_PART scheme (KOBJ class). */ +struct g_part_scheme { + KOBJ_CLASS_FIELDS; + size_t gps_entrysz; + int gps_minent; + int gps_maxent; +}; +#define G_PART_SCHEME_DECLARE(s) DATA_SET(g_part_scheme_set, s) + +struct g_part_entry { + LIST_ENTRY(g_part_entry) gpe_entry; + struct g_provider *gpe_pp; /* Corresponding provider. */ + off_t gpe_offset; /* Byte offset. */ + quad_t gpe_start; /* First LBA of partition. */ + quad_t gpe_end; /* Last LBA of partition. */ + int gpe_index; + int gpe_created:1; /* Entry is newly created. */ + int gpe_deleted:1; /* Entry has been deleted. */ + int gpe_modified:1; /* Entry has been modified. */ +}; + +/* G_PART table (KOBJ instance). */ +struct g_part_table { + KOBJ_FIELDS; + struct g_part_scheme *gpt_scheme; + struct g_geom *gpt_gp; + LIST_HEAD(, g_part_entry) gpt_entry; + quad_t gpt_first; /* First allocatable LBA */ + quad_t gpt_last; /* Last allocatable LBA */ + int gpt_entries; + /* + * gpt_smhead and gpt_smtail are bitmaps representing the first + * 32 sectors on the disk (gpt_smhead) and the last 32 sectors + * on the disk (gpt_smtail). These maps are used by the commit + * verb to clear sectors previously used by a scheme after the + * partitioning scheme has been destroyed. + */ + uint32_t gpt_smhead; + uint32_t gpt_smtail; + + int gpt_depth; /* Sub-partitioning level. */ + int gpt_isleaf:1; /* Cannot be sub-partitioned. */ + int gpt_created:1; /* Newly created. */ + int gpt_modified:1; /* Table changes have been made. */ + int gpt_opened:1; /* Permissions obtained. */ +}; + +struct g_part_entry *g_part_new_entry(struct g_part_table *, int, quad_t, + quad_t); + +/* G_PART ctlreq parameters. */ +#define G_PART_PARM_ENTRIES 0x0001 +#define G_PART_PARM_FLAGS 0x0002 +#define G_PART_PARM_GEOM 0x0004 +#define G_PART_PARM_INDEX 0x0008 +#define G_PART_PARM_LABEL 0x0010 +#define G_PART_PARM_PROVIDER 0x0020 +#define G_PART_PARM_REQUEST 0x0040 +#define G_PART_PARM_RESPONSE 0x0080 +#define G_PART_PARM_SCHEME 0x0100 +#define G_PART_PARM_SIZE 0x0200 +#define G_PART_PARM_START 0x0400 +#define G_PART_PARM_TYPE 0x0800 + +struct g_part_parms { + unsigned int gpp_parms; + unsigned int gpp_entries; + const char *gpp_flags; + struct g_geom *gpp_geom; + unsigned int gpp_index; + const char *gpp_label; + struct g_provider *gpp_provider; + const char *gpp_request; + struct g_part_scheme *gpp_scheme; + quad_t gpp_size; + quad_t gpp_start; + const char *gpp_type; +}; + +#endif /* !_GEOM_PART_H_ */ diff --git a/sys/geom/part/g_part_apm.c b/sys/geom/part/g_part_apm.c new file mode 100644 index 0000000..f90fc65 --- /dev/null +++ b/sys/geom/part/g_part_apm.c @@ -0,0 +1,414 @@ +/*- + * Copyright (c) 2006, 2007 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "g_part_if.h" + +struct g_part_apm_table { + struct g_part_table base; + struct apm_ddr ddr; + struct apm_ent self; +}; + +struct g_part_apm_entry { + struct g_part_entry base; + struct apm_ent ent; +}; + +static int g_part_apm_add(struct g_part_table *, struct g_part_entry *, + struct g_part_parms *); +static int g_part_apm_create(struct g_part_table *, struct g_part_parms *); +static int g_part_apm_destroy(struct g_part_table *, struct g_part_parms *); +static int g_part_apm_dumpto(struct g_part_table *, struct g_part_entry *); +static int g_part_apm_modify(struct g_part_table *, struct g_part_entry *, + struct g_part_parms *); +static char *g_part_apm_name(struct g_part_table *, struct g_part_entry *, + char *, size_t); +static int g_part_apm_probe(struct g_part_table *, struct g_consumer *); +static int g_part_apm_read(struct g_part_table *, struct g_consumer *); +static const char *g_part_apm_type(struct g_part_table *, struct g_part_entry *, + char *, size_t); +static int g_part_apm_write(struct g_part_table *, struct g_consumer *); + +static kobj_method_t g_part_apm_methods[] = { + KOBJMETHOD(g_part_add, g_part_apm_add), + KOBJMETHOD(g_part_create, g_part_apm_create), + KOBJMETHOD(g_part_destroy, g_part_apm_destroy), + KOBJMETHOD(g_part_dumpto, g_part_apm_dumpto), + KOBJMETHOD(g_part_modify, g_part_apm_modify), + KOBJMETHOD(g_part_name, g_part_apm_name), + KOBJMETHOD(g_part_probe, g_part_apm_probe), + KOBJMETHOD(g_part_read, g_part_apm_read), + KOBJMETHOD(g_part_type, g_part_apm_type), + KOBJMETHOD(g_part_write, g_part_apm_write), + { 0, 0 } +}; + +static struct g_part_scheme g_part_apm_scheme = { + "APM", + g_part_apm_methods, + sizeof(struct g_part_apm_table), + .gps_entrysz = sizeof(struct g_part_apm_entry), + .gps_minent = 16, + .gps_maxent = INT_MAX, +}; +G_PART_SCHEME_DECLARE(g_part_apm_scheme); + +static int +apm_parse_type(const char *type, char *buf, size_t bufsz) +{ + + if (type[0] != '@') { + if (strlen(type) > bufsz) + return (EINVAL); + if (!strcmp(type, APM_ENT_TYPE_SELF) || + !strcmp(type, APM_ENT_TYPE_UNUSED)) + return (EINVAL); + strncpy(buf, type, bufsz); + return (0); + } + if (!strcmp(type, g_part_alias_name(G_PART_ALIAS_FREEBSD))) + strcpy(buf, APM_ENT_TYPE_FREEBSD); + else if (!strcmp(type, g_part_alias_name(G_PART_ALIAS_FREEBSD_SWAP))) + strcpy(buf, APM_ENT_TYPE_FREEBSD_SWAP); + else if (!strcmp(type, g_part_alias_name(G_PART_ALIAS_FREEBSD_UFS))) + strcpy(buf, APM_ENT_TYPE_FREEBSD_UFS); + else if (!strcmp(type, g_part_alias_name(G_PART_ALIAS_FREEBSD_VINUM))) + strcpy(buf, APM_ENT_TYPE_FREEBSD_VINUM); + else + return (EINVAL); + return (0); +} + +static int +apm_read_ent(struct g_consumer *cp, uint32_t blk, struct apm_ent *ent) +{ + struct g_provider *pp; + char *buf; + int error; + + pp = cp->provider; + buf = g_read_data(cp, pp->sectorsize * blk, pp->sectorsize, &error); + if (buf == NULL) + return (error); + ent->ent_sig = be16dec(buf); + ent->ent_pmblkcnt = be32dec(buf + 4); + ent->ent_start = be32dec(buf + 8); + ent->ent_size = be32dec(buf + 12); + bcopy(buf + 16, ent->ent_name, sizeof(ent->ent_name)); + bcopy(buf + 48, ent->ent_type, sizeof(ent->ent_type)); + g_free(buf); + return (0); +} + +static int +g_part_apm_add(struct g_part_table *basetable, struct g_part_entry *baseentry, + struct g_part_parms *gpp) +{ + struct g_part_apm_entry *entry; + struct g_part_apm_table *table; + int error; + + entry = (struct g_part_apm_entry *)baseentry; + table = (struct g_part_apm_table *)basetable; + entry->ent.ent_sig = APM_ENT_SIG; + entry->ent.ent_pmblkcnt = table->self.ent_pmblkcnt; + entry->ent.ent_start = gpp->gpp_start; + entry->ent.ent_size = gpp->gpp_size; + if (baseentry->gpe_deleted) { + bzero(entry->ent.ent_type, sizeof(entry->ent.ent_type)); + bzero(entry->ent.ent_name, sizeof(entry->ent.ent_name)); + } + error = apm_parse_type(gpp->gpp_type, entry->ent.ent_type, + sizeof(entry->ent.ent_type)); + if (error) + return (error); + if (gpp->gpp_parms & G_PART_PARM_LABEL) { + if (strlen(gpp->gpp_label) > sizeof(entry->ent.ent_name)) + return (EINVAL); + strncpy(entry->ent.ent_name, gpp->gpp_label, + sizeof(entry->ent.ent_name)); + } + return (0); +} + +static int +g_part_apm_create(struct g_part_table *basetable, struct g_part_parms *gpp) +{ + struct g_provider *pp; + struct g_part_apm_table *table; + + table = (struct g_part_apm_table *)basetable; + pp = gpp->gpp_provider; + if (pp->sectorsize != 512 || + pp->mediasize < (2 + 2 * basetable->gpt_entries) * pp->sectorsize) + return (ENOSPC); + + basetable->gpt_first = 2 + basetable->gpt_entries; + basetable->gpt_last = (pp->mediasize / pp->sectorsize) - 1; + + table->ddr.ddr_sig = APM_DDR_SIG; + table->ddr.ddr_blksize = pp->sectorsize; + table->ddr.ddr_blkcount = basetable->gpt_last + 1; + + table->self.ent_sig = APM_ENT_SIG; + table->self.ent_pmblkcnt = basetable->gpt_entries + 1; + table->self.ent_start = 1; + table->self.ent_size = table->self.ent_pmblkcnt; + strcpy(table->self.ent_name, "Apple"); + strcpy(table->self.ent_type, APM_ENT_TYPE_SELF); + return (0); +} + +static int +g_part_apm_destroy(struct g_part_table *basetable, struct g_part_parms *gpp) +{ + + /* Wipe the first 2 sectors to clear the partitioning. */ + basetable->gpt_smhead |= 3; + return (0); +} + +static int +g_part_apm_dumpto(struct g_part_table *table, struct g_part_entry *baseentry) +{ + struct g_part_apm_entry *entry; + + entry = (struct g_part_apm_entry *)baseentry; + return ((!strcmp(entry->ent.ent_type, APM_ENT_TYPE_FREEBSD_SWAP)) + ? 1 : 0); +} + +static int +g_part_apm_modify(struct g_part_table *basetable, + struct g_part_entry *baseentry, struct g_part_parms *gpp) +{ + struct g_part_apm_entry *entry; + int error; + + entry = (struct g_part_apm_entry *)baseentry; + if (gpp->gpp_parms & G_PART_PARM_LABEL) { + if (strlen(gpp->gpp_label) > sizeof(entry->ent.ent_name)) + return (EINVAL); + } + if (gpp->gpp_parms & G_PART_PARM_TYPE) { + error = apm_parse_type(gpp->gpp_type, entry->ent.ent_type, + sizeof(entry->ent.ent_type)); + if (error) + return (error); + } + if (gpp->gpp_parms & G_PART_PARM_LABEL) { + strncpy(entry->ent.ent_name, gpp->gpp_label, + sizeof(entry->ent.ent_name)); + } + return (0); +} + +static char * +g_part_apm_name(struct g_part_table *table, struct g_part_entry *baseentry, + char *buf, size_t bufsz) +{ + + snprintf(buf, bufsz, "s%d", baseentry->gpe_index + 1); + return (buf); +} + +static int +g_part_apm_probe(struct g_part_table *basetable, struct g_consumer *cp) +{ + struct g_provider *pp; + struct g_part_apm_table *table; + char *buf; + int error; + + /* We don't nest, which means that our depth should be 0. */ + if (basetable->gpt_depth != 0) + return (ENXIO); + + table = (struct g_part_apm_table *)basetable; + pp = cp->provider; + + /* Sanity-check the provider. */ + if (pp->mediasize < 4 * pp->sectorsize) + return (ENOSPC); + + /* Check that there's a Driver Descriptor Record (DDR). */ + buf = g_read_data(cp, 0L, pp->sectorsize, &error); + if (buf == NULL) + return (error); + table->ddr.ddr_sig = be16dec(buf); + table->ddr.ddr_blksize = be16dec(buf + 2); + table->ddr.ddr_blkcount = be32dec(buf + 4); + g_free(buf); + if (table->ddr.ddr_sig != APM_DDR_SIG) + return (ENXIO); + if (table->ddr.ddr_blksize != pp->sectorsize) + return (ENXIO); + + /* Check that there's a Partition Map. */ + error = apm_read_ent(cp, 1, &table->self); + if (error) + return (error); + if (table->self.ent_sig != APM_ENT_SIG) + return (ENXIO); + if (strcmp(table->self.ent_type, APM_ENT_TYPE_SELF)) + return (ENXIO); + if (table->self.ent_pmblkcnt >= table->ddr.ddr_blkcount) + return (ENXIO); + return (G_PART_PROBE_PRI_NORM); +} + +static int +g_part_apm_read(struct g_part_table *basetable, struct g_consumer *cp) +{ + struct apm_ent ent; + struct g_part_apm_entry *entry; + struct g_part_apm_table *table; + int error, index; + + table = (struct g_part_apm_table *)basetable; + + basetable->gpt_first = table->self.ent_pmblkcnt + 1; + basetable->gpt_last = table->ddr.ddr_blkcount - 1; + basetable->gpt_entries = table->self.ent_pmblkcnt - 1; + + for (index = table->self.ent_pmblkcnt - 1; index > 0; index--) { + error = apm_read_ent(cp, index + 1, &ent); + if (error) + continue; + if (!strcmp(ent.ent_type, APM_ENT_TYPE_UNUSED)) + continue; + entry = (struct g_part_apm_entry *)g_part_new_entry(basetable, + index, ent.ent_start, ent.ent_start + ent.ent_size - 1); + entry->ent = ent; + } + + return (0); +} + +static const char * +g_part_apm_type(struct g_part_table *basetable, struct g_part_entry *baseentry, + char *buf, size_t bufsz) +{ + struct g_part_apm_entry *entry; + const char *type; + size_t len; + + entry = (struct g_part_apm_entry *)baseentry; + type = entry->ent.ent_type; + if (!strcmp(type, APM_ENT_TYPE_FREEBSD)) + return (g_part_alias_name(G_PART_ALIAS_FREEBSD)); + if (!strcmp(type, APM_ENT_TYPE_FREEBSD_SWAP)) + return (g_part_alias_name(G_PART_ALIAS_FREEBSD_SWAP)); + if (!strcmp(type, APM_ENT_TYPE_FREEBSD_UFS)) + return (g_part_alias_name(G_PART_ALIAS_FREEBSD_UFS)); + if (!strcmp(type, APM_ENT_TYPE_FREEBSD_VINUM)) + return (g_part_alias_name(G_PART_ALIAS_FREEBSD_VINUM)); + len = MIN(sizeof(entry->ent.ent_type), bufsz - 1); + bcopy(type, buf, len); + buf[len] = '\0'; + return (buf); +} + +static int +g_part_apm_write(struct g_part_table *basetable, struct g_consumer *cp) +{ + char buf[512]; + struct g_part_entry *baseentry; + struct g_part_apm_entry *entry; + struct g_part_apm_table *table; + int error, index; + + table = (struct g_part_apm_table *)basetable; + bzero(buf, sizeof(buf)); + + /* Write the DDR and 'self' entry only when we're newly created. */ + if (basetable->gpt_created) { + be16enc(buf, table->ddr.ddr_sig); + be16enc(buf + 2, table->ddr.ddr_blksize); + be32enc(buf + 4, table->ddr.ddr_blkcount); + error = g_write_data(cp, 0, buf, sizeof(buf)); + if (error) + return (error); + } + + be16enc(buf, table->self.ent_sig); + be16enc(buf + 2, 0); + be32enc(buf + 4, table->self.ent_pmblkcnt); + + if (basetable->gpt_created) { + be32enc(buf + 8, table->self.ent_start); + be32enc(buf + 12, table->self.ent_size); + bcopy(table->self.ent_name, buf + 16, + sizeof(table->self.ent_name)); + bcopy(table->self.ent_type, buf + 48, + sizeof(table->self.ent_type)); + error = g_write_data(cp, 512, buf, sizeof(buf)); + if (error) + return (error); + } + + baseentry = LIST_FIRST(&basetable->gpt_entry); + for (index = 1; index <= basetable->gpt_entries; index++) { + if (baseentry != NULL && index == baseentry->gpe_index) { + entry = (struct g_part_apm_entry *)baseentry; + be32enc(buf + 8, entry->ent.ent_start); + be32enc(buf + 12, entry->ent.ent_size); + bcopy(entry->ent.ent_name, buf + 16, + sizeof(entry->ent.ent_name)); + bcopy(entry->ent.ent_type, buf + 48, + sizeof(entry->ent.ent_type)); + baseentry = LIST_NEXT(baseentry, gpe_entry); + } else { + bzero(buf + 8, 4 + 4 + 32 + 32); + strcpy(buf + 48, APM_ENT_TYPE_UNUSED); + } + error = g_write_data(cp, (index + 1) * 512, buf, sizeof(buf)); + if (error) + return (error); + } + + return (0); +} diff --git a/sys/geom/part/g_part_gpt.c b/sys/geom/part/g_part_gpt.c new file mode 100644 index 0000000..f8370a5 --- /dev/null +++ b/sys/geom/part/g_part_gpt.c @@ -0,0 +1,736 @@ +/*- + * Copyright (c) 2002, 2005, 2006, 2007 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "g_part_if.h" + +CTASSERT(offsetof(struct gpt_hdr, padding) == 92); +CTASSERT(sizeof(struct gpt_ent) == 128); + +#define EQUUID(a,b) (memcmp(a, b, sizeof(struct uuid)) == 0) + +enum gpt_elt { + GPT_ELT_PRIHDR, + GPT_ELT_PRITBL, + GPT_ELT_SECHDR, + GPT_ELT_SECTBL, + GPT_ELT_COUNT +}; + +enum gpt_state { + GPT_STATE_UNKNOWN, /* Not determined. */ + GPT_STATE_MISSING, /* No signature found. */ + GPT_STATE_CORRUPT, /* Checksum mismatch. */ + GPT_STATE_INVALID, /* Nonconformant/invalid. */ + GPT_STATE_OK /* Perfectly fine. */ +}; + +struct g_part_gpt_table { + struct g_part_table base; + struct gpt_hdr hdr; + quad_t lba[GPT_ELT_COUNT]; + enum gpt_state state[GPT_ELT_COUNT]; +}; + +struct g_part_gpt_entry { + struct g_part_entry base; + struct gpt_ent ent; +}; + +static int g_part_gpt_add(struct g_part_table *, struct g_part_entry *, + struct g_part_parms *); +static int g_part_gpt_create(struct g_part_table *, struct g_part_parms *); +static int g_part_gpt_destroy(struct g_part_table *, struct g_part_parms *); +static int g_part_gpt_dumpto(struct g_part_table *, struct g_part_entry *); +static int g_part_gpt_modify(struct g_part_table *, struct g_part_entry *, + struct g_part_parms *); +static char *g_part_gpt_name(struct g_part_table *, struct g_part_entry *, + char *, size_t); +static int g_part_gpt_probe(struct g_part_table *, struct g_consumer *); +static int g_part_gpt_read(struct g_part_table *, struct g_consumer *); +static const char *g_part_gpt_type(struct g_part_table *, struct g_part_entry *, + char *, size_t); +static int g_part_gpt_write(struct g_part_table *, struct g_consumer *); + +static kobj_method_t g_part_gpt_methods[] = { + KOBJMETHOD(g_part_add, g_part_gpt_add), + KOBJMETHOD(g_part_create, g_part_gpt_create), + KOBJMETHOD(g_part_destroy, g_part_gpt_destroy), + KOBJMETHOD(g_part_dumpto, g_part_gpt_dumpto), + KOBJMETHOD(g_part_modify, g_part_gpt_modify), + KOBJMETHOD(g_part_name, g_part_gpt_name), + KOBJMETHOD(g_part_probe, g_part_gpt_probe), + KOBJMETHOD(g_part_read, g_part_gpt_read), + KOBJMETHOD(g_part_type, g_part_gpt_type), + KOBJMETHOD(g_part_write, g_part_gpt_write), + { 0, 0 } +}; + +static struct g_part_scheme g_part_gpt_scheme = { + "GPT", + g_part_gpt_methods, + sizeof(struct g_part_gpt_table), + .gps_entrysz = sizeof(struct g_part_gpt_entry), + .gps_minent = 128, + .gps_maxent = INT_MAX, +}; +G_PART_SCHEME_DECLARE(g_part_gpt_scheme); + +static struct uuid gpt_uuid_efi = GPT_ENT_TYPE_EFI; +static struct uuid gpt_uuid_freebsd = GPT_ENT_TYPE_FREEBSD; +static struct uuid gpt_uuid_freebsd_swap = GPT_ENT_TYPE_FREEBSD_SWAP; +static struct uuid gpt_uuid_freebsd_ufs = GPT_ENT_TYPE_FREEBSD_UFS; +static struct uuid gpt_uuid_freebsd_vinum = GPT_ENT_TYPE_FREEBSD_VINUM; +static struct uuid gpt_uuid_linux_swap = GPT_ENT_TYPE_LINUX_SWAP; +static struct uuid gpt_uuid_mbr = GPT_ENT_TYPE_MBR; +static struct uuid gpt_uuid_unused = GPT_ENT_TYPE_UNUSED; + +static void +gpt_read_hdr(struct g_part_gpt_table *table, struct g_consumer *cp, + enum gpt_elt elt, struct gpt_hdr *hdr) +{ + struct uuid uuid; + struct g_provider *pp; + char *buf; + quad_t lba, last; + int error; + uint32_t crc, sz; + + pp = cp->provider; + last = (pp->mediasize / pp->sectorsize) - 1; + table->lba[elt] = (elt == GPT_ELT_PRIHDR) ? 1 : last; + table->state[elt] = GPT_STATE_MISSING; + buf = g_read_data(cp, table->lba[elt] * pp->sectorsize, pp->sectorsize, + &error); + if (buf == NULL) + return; + bcopy(buf, hdr, sizeof(*hdr)); + if (memcmp(hdr->hdr_sig, GPT_HDR_SIG, sizeof(hdr->hdr_sig)) != 0) + return; + + table->state[elt] = GPT_STATE_CORRUPT; + sz = le32toh(hdr->hdr_size); + if (sz < 92 || sz > pp->sectorsize) + return; + crc = le32toh(hdr->hdr_crc_self); + hdr->hdr_crc_self = 0; + if (crc32(hdr, sz) != crc) + return; + hdr->hdr_size = sz; + hdr->hdr_crc_self = crc; + + table->state[elt] = GPT_STATE_INVALID; + hdr->hdr_revision = le32toh(hdr->hdr_revision); + if (hdr->hdr_revision < 0x00010000) + return; + hdr->hdr_lba_self = le64toh(hdr->hdr_lba_self); + if (hdr->hdr_lba_self != table->lba[elt]) + return; + hdr->hdr_lba_alt = le64toh(hdr->hdr_lba_alt); + + /* Check the managed area. */ + hdr->hdr_lba_start = le64toh(hdr->hdr_lba_start); + if (hdr->hdr_lba_start < 2 || hdr->hdr_lba_start >= last) + return; + hdr->hdr_lba_end = le64toh(hdr->hdr_lba_end); + if (hdr->hdr_lba_end < hdr->hdr_lba_start || hdr->hdr_lba_end >= last) + return; + + /* Check the table location and size of the table. */ + hdr->hdr_entries = le32toh(hdr->hdr_entries); + hdr->hdr_entsz = le32toh(hdr->hdr_entsz); + if (hdr->hdr_entries == 0 || hdr->hdr_entsz < 128 || + (hdr->hdr_entsz & 7) != 0) + return; + hdr->hdr_lba_table = le64toh(hdr->hdr_lba_table); + if (hdr->hdr_lba_table < 2 || hdr->hdr_lba_table >= last) + return; + if (hdr->hdr_lba_table >= hdr->hdr_lba_start && + hdr->hdr_lba_table <= hdr->hdr_lba_end) + return; + lba = hdr->hdr_lba_table + + (hdr->hdr_entries * hdr->hdr_entsz + pp->sectorsize - 1) / + pp->sectorsize - 1; + if (lba >= last) + return; + if (lba >= hdr->hdr_lba_start && lba <= hdr->hdr_lba_end) + return; + + table->state[elt] = GPT_STATE_OK; + le_uuid_dec(&hdr->hdr_uuid, &uuid); + hdr->hdr_uuid = uuid; + hdr->hdr_crc_table = le32toh(hdr->hdr_crc_table); +} + +static struct gpt_ent * +gpt_read_tbl(struct g_part_gpt_table *table, struct g_consumer *cp, + enum gpt_elt elt, struct gpt_hdr *hdr) +{ + struct g_provider *pp; + struct gpt_ent *ent, *tbl; + char *buf, *p; + unsigned int idx, sectors, tblsz; + int error; + uint16_t ch; + + pp = cp->provider; + table->lba[elt] = hdr->hdr_lba_table; + + table->state[elt] = GPT_STATE_MISSING; + tblsz = hdr->hdr_entries * hdr->hdr_entsz; + sectors = (tblsz + pp->sectorsize - 1) / pp->sectorsize; + buf = g_read_data(cp, table->lba[elt] * pp->sectorsize, + sectors * pp->sectorsize, &error); + if (buf == NULL) + return (NULL); + + table->state[elt] = GPT_STATE_CORRUPT; + if (crc32(buf, tblsz) != hdr->hdr_crc_table) { + g_free(buf); + return (NULL); + } + + table->state[elt] = GPT_STATE_OK; + tbl = g_malloc(hdr->hdr_entries * sizeof(struct gpt_ent), + M_WAITOK | M_ZERO); + + for (idx = 0, ent = tbl, p = buf; + idx < hdr->hdr_entries; + idx++, ent++, p += hdr->hdr_entsz) { + le_uuid_dec(p, &ent->ent_type); + le_uuid_dec(p + 16, &ent->ent_uuid); + ent->ent_lba_start = le64dec(p + 32); + ent->ent_lba_end = le64dec(p + 40); + ent->ent_attr = le64dec(p + 48); + for (ch = 0; ch < sizeof(ent->ent_name)/2; ch++) + ent->ent_name[ch] = le16dec(p + 56 + ch * 2); + } + + g_free(buf); + return (tbl); +} + +static int +gpt_matched_hdrs(struct gpt_hdr *pri, struct gpt_hdr *sec) +{ + + if (!EQUUID(&pri->hdr_uuid, &sec->hdr_uuid)) + return (0); + return ((pri->hdr_revision == sec->hdr_revision && + pri->hdr_size == sec->hdr_size && + pri->hdr_lba_start == sec->hdr_lba_start && + pri->hdr_lba_end == sec->hdr_lba_end && + pri->hdr_entries == sec->hdr_entries && + pri->hdr_entsz == sec->hdr_entsz && + pri->hdr_crc_table == sec->hdr_crc_table) ? 1 : 0); +} + +static int +gpt_parse_type(const char *type, struct uuid *uuid) +{ + struct uuid tmp; + int error; + + if (type[0] != '@') { + error = parse_uuid(type, &tmp); + if (error) + return (error); + if (EQUUID(&tmp, &gpt_uuid_unused)) + return (EINVAL); + *uuid = tmp; + return (0); + } + if (!strcmp(type, g_part_alias_name(G_PART_ALIAS_EFI))) + *uuid = gpt_uuid_efi; + else if (!strcmp(type, g_part_alias_name(G_PART_ALIAS_FREEBSD))) + *uuid = gpt_uuid_freebsd; + else if (!strcmp(type, g_part_alias_name(G_PART_ALIAS_FREEBSD_SWAP))) + *uuid = gpt_uuid_freebsd_swap; + else if (!strcmp(type, g_part_alias_name(G_PART_ALIAS_FREEBSD_UFS))) + *uuid = gpt_uuid_freebsd_ufs; + else if (!strcmp(type, g_part_alias_name(G_PART_ALIAS_FREEBSD_VINUM))) + *uuid = gpt_uuid_freebsd_vinum; + else if (!strcmp(type, g_part_alias_name(G_PART_ALIAS_MBR))) + *uuid = gpt_uuid_mbr; + else + return (EINVAL); + return (0); +} + +static int +g_part_gpt_add(struct g_part_table *basetable, struct g_part_entry *baseentry, + struct g_part_parms *gpp) +{ + struct g_part_gpt_entry *entry; + int error; + + entry = (struct g_part_gpt_entry *)baseentry; + error = gpt_parse_type(gpp->gpp_type, &entry->ent.ent_type); + if (error) + return (error); + kern_uuidgen(&entry->ent.ent_uuid, 1); + entry->ent.ent_lba_start = baseentry->gpe_start; + entry->ent.ent_lba_end = baseentry->gpe_end; + if (baseentry->gpe_deleted) { + entry->ent.ent_attr = 0; + bzero(entry->ent.ent_name, sizeof(entry->ent.ent_name)); + } + /* XXX label */ + return (0); +} + +static int +g_part_gpt_create(struct g_part_table *basetable, struct g_part_parms *gpp) +{ + struct g_provider *pp; + struct g_part_gpt_table *table; + quad_t last; + size_t tblsz; + + table = (struct g_part_gpt_table *)basetable; + pp = gpp->gpp_provider; + tblsz = (basetable->gpt_entries * sizeof(struct gpt_ent) + + pp->sectorsize - 1) / pp->sectorsize; + if (pp->sectorsize < 512 || + pp->mediasize < (3 + 2 * tblsz + basetable->gpt_entries) * + pp->sectorsize) + return (ENOSPC); + + last = (pp->mediasize / pp->sectorsize) - 1; + + table->lba[GPT_ELT_PRIHDR] = 1; + table->lba[GPT_ELT_PRITBL] = 2; + table->lba[GPT_ELT_SECHDR] = last; + table->lba[GPT_ELT_SECTBL] = last - tblsz; + + bcopy(GPT_HDR_SIG, table->hdr.hdr_sig, sizeof(table->hdr.hdr_sig)); + table->hdr.hdr_revision = GPT_HDR_REVISION; + table->hdr.hdr_size = offsetof(struct gpt_hdr, padding); + table->hdr.hdr_lba_start = 2 + tblsz; + table->hdr.hdr_lba_end = last - tblsz - 1; + kern_uuidgen(&table->hdr.hdr_uuid, 1); + table->hdr.hdr_entries = basetable->gpt_entries; + table->hdr.hdr_entsz = sizeof(struct gpt_ent); + + basetable->gpt_first = table->hdr.hdr_lba_start; + basetable->gpt_last = table->hdr.hdr_lba_end; + return (0); +} + +static int +g_part_gpt_destroy(struct g_part_table *basetable, struct g_part_parms *gpp) +{ + + /* + * Wipe the first 2 sectors as well as the last to clear the + * partitioning. + */ + basetable->gpt_smhead |= 3; + basetable->gpt_smtail |= 1; + return (0); +} + +static int +g_part_gpt_dumpto(struct g_part_table *table, struct g_part_entry *baseentry) +{ + struct g_part_gpt_entry *entry; + + entry = (struct g_part_gpt_entry *)baseentry; + return ((EQUUID(&entry->ent.ent_type, &gpt_uuid_freebsd_swap) || + EQUUID(&entry->ent.ent_type, &gpt_uuid_linux_swap)) ? 1 : 0); +} + +static int +g_part_gpt_modify(struct g_part_table *basetable, + struct g_part_entry *baseentry, struct g_part_parms *gpp) +{ + struct g_part_gpt_entry *entry; + int error; + + entry = (struct g_part_gpt_entry *)baseentry; + error = gpt_parse_type(gpp->gpp_type, &entry->ent.ent_type); + if (error) + return (error); + /* XXX label */ + return (0); +} + +static char * +g_part_gpt_name(struct g_part_table *table, struct g_part_entry *baseentry, + char *buf, size_t bufsz) +{ + struct g_part_gpt_entry *entry; + char c; + + entry = (struct g_part_gpt_entry *)baseentry; + c = (EQUUID(&entry->ent.ent_type, &gpt_uuid_freebsd)) ? 's' : 'p'; + snprintf(buf, bufsz, "%c%d", c, baseentry->gpe_index); + return (buf); +} + +static int +g_part_gpt_probe(struct g_part_table *table, struct g_consumer *cp) +{ + struct g_provider *pp; + char *buf; + int error, res; + + /* We don't nest, which means that our depth should be 0. */ + if (table->gpt_depth != 0) + return (ENXIO); + + pp = cp->provider; + + /* + * Sanity-check the provider. Since the first sector on the provider + * must be a PMBR and a PMBR is 512 bytes large, the sector size + * must be at least 512 bytes. Also, since the theoretical minimum + * number of sectors needed by GPT is 6, any medium that has less + * than 6 sectors is never going to be able to hold a GPT. The + * number 6 comes from: + * 1 sector for the PMBR + * 2 sectors for the GPT headers (each 1 sector) + * 2 sectors for the GPT tables (each 1 sector) + * 1 sector for an actual partition + * It's better to catch this pathological case early than behaving + * pathologically later on... + */ + if (pp->sectorsize < 512 || pp->mediasize < 6 * pp->sectorsize) + return (ENOSPC); + + /* Check that there's a MBR. */ + buf = g_read_data(cp, 0L, pp->sectorsize, &error); + if (buf == NULL) + return (error); + res = le16dec(buf + DOSMAGICOFFSET); + g_free(buf); + if (res != DOSMAGIC) + return (ENXIO); + + /* Check that there's a primary header. */ + buf = g_read_data(cp, pp->sectorsize, pp->sectorsize, &error); + if (buf == NULL) + return (error); + res = memcmp(buf, GPT_HDR_SIG, 8); + g_free(buf); + if (res == 0) + return (G_PART_PROBE_PRI_HIGH); + + /* No primary? Check that there's a secondary. */ + buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, + &error); + if (buf == NULL) + return (error); + res = memcmp(buf, GPT_HDR_SIG, 8); + g_free(buf); + return ((res == 0) ? G_PART_PROBE_PRI_HIGH : ENXIO); +} + +static int +g_part_gpt_read(struct g_part_table *basetable, struct g_consumer *cp) +{ + struct gpt_hdr prihdr, sechdr; + struct gpt_ent *tbl, *pritbl, *sectbl; + struct g_provider *pp; + struct g_part_gpt_table *table; + struct g_part_gpt_entry *entry; + int index; + + table = (struct g_part_gpt_table *)basetable; + pp = cp->provider; + + /* Read the primary header and table. */ + gpt_read_hdr(table, cp, GPT_ELT_PRIHDR, &prihdr); + if (table->state[GPT_ELT_PRIHDR] == GPT_STATE_OK) { + pritbl = gpt_read_tbl(table, cp, GPT_ELT_PRITBL, &prihdr); + } else { + table->state[GPT_ELT_PRITBL] = GPT_STATE_MISSING; + pritbl = NULL; + } + + /* Read the secondary header and table. */ + gpt_read_hdr(table, cp, GPT_ELT_SECHDR, &sechdr); + if (table->state[GPT_ELT_SECHDR] == GPT_STATE_OK) { + sectbl = gpt_read_tbl(table, cp, GPT_ELT_SECTBL, &sechdr); + } else { + table->state[GPT_ELT_SECTBL] = GPT_STATE_MISSING; + sectbl = NULL; + } + + /* Fail if we haven't got any good tables at all. */ + if (table->state[GPT_ELT_PRITBL] != GPT_STATE_OK && + table->state[GPT_ELT_SECTBL] != GPT_STATE_OK) { + printf("GEOM: %s: corrupt or invalid GPT detected.\n", + pp->name); + printf("GEOM: %s: GPT rejected -- may not be recoverable.\n", + pp->name); + return (EINVAL); + } + + /* + * If both headers are good but they disagree with each other, + * then invalidate one. We prefer to keep the primary header, + * unless the primary table is corrupt. + */ + if (table->state[GPT_ELT_PRIHDR] == GPT_STATE_OK && + table->state[GPT_ELT_SECHDR] == GPT_STATE_OK && + !gpt_matched_hdrs(&prihdr, &sechdr)) { + if (table->state[GPT_ELT_PRITBL] == GPT_STATE_OK) + table->state[GPT_ELT_SECHDR] = GPT_STATE_INVALID; + else + table->state[GPT_ELT_PRIHDR] = GPT_STATE_INVALID; + } + + if (table->state[GPT_ELT_PRIHDR] != GPT_STATE_OK) { + printf("GEOM: %s: the primary GPT table is corrupt or " + "invalid.\n", pp->name); + printf("GEOM: %s: using the secondary instead -- recovery " + "strongly advised.\n", pp->name); + table->hdr = sechdr; + tbl = sectbl; + if (pritbl != NULL) + g_free(pritbl); + } else { + if (table->state[GPT_ELT_SECHDR] != GPT_STATE_OK) { + printf("GEOM: %s: the secondary GPT table is corrupt " + "or invalid.\n", pp->name); + printf("GEOM: %s: using the primary only -- recovery " + "suggested.\n", pp->name); + } + table->hdr = prihdr; + tbl = pritbl; + if (sectbl != NULL) + g_free(sectbl); + } + + basetable->gpt_first = table->hdr.hdr_lba_start; + basetable->gpt_last = table->hdr.hdr_lba_end; + basetable->gpt_entries = table->hdr.hdr_entries; + + for (index = basetable->gpt_entries - 1; index >= 0; index--) { + if (EQUUID(&tbl[index].ent_type, &gpt_uuid_unused)) + continue; + entry = (struct g_part_gpt_entry *)g_part_new_entry(basetable, + index+1, tbl[index].ent_lba_start, tbl[index].ent_lba_end); + entry->ent = tbl[index]; + } + + g_free(tbl); + return (0); +} + +static const char * +g_part_gpt_type(struct g_part_table *basetable, struct g_part_entry *baseentry, + char *buf, size_t bufsz) +{ + struct g_part_gpt_entry *entry; + struct uuid *type; + + entry = (struct g_part_gpt_entry *)baseentry; + type = &entry->ent.ent_type; + if (EQUUID(type, &gpt_uuid_efi)) + return (g_part_alias_name(G_PART_ALIAS_EFI)); + if (EQUUID(type, &gpt_uuid_freebsd)) + return (g_part_alias_name(G_PART_ALIAS_FREEBSD)); + if (EQUUID(type, &gpt_uuid_freebsd_swap)) + return (g_part_alias_name(G_PART_ALIAS_FREEBSD_SWAP)); + if (EQUUID(type, &gpt_uuid_freebsd_ufs)) + return (g_part_alias_name(G_PART_ALIAS_FREEBSD_UFS)); + if (EQUUID(type, &gpt_uuid_freebsd_vinum)) + return (g_part_alias_name(G_PART_ALIAS_FREEBSD_VINUM)); + if (EQUUID(type, &gpt_uuid_mbr)) + return (g_part_alias_name(G_PART_ALIAS_MBR)); + snprintf_uuid(buf, bufsz, type); + return (buf); +} + +static int +g_part_gpt_write(struct g_part_table *basetable, struct g_consumer *cp) +{ + unsigned char *buf, *bp; + struct g_provider *pp; + struct g_part_entry *baseentry; + struct g_part_gpt_entry *entry; + struct g_part_gpt_table *table; + size_t tlbsz; + uint32_t crc; + int error, index; + + pp = cp->provider; + table = (struct g_part_gpt_table *)basetable; + tlbsz = (table->hdr.hdr_entries * table->hdr.hdr_entsz + + pp->sectorsize - 1) / pp->sectorsize; + + if (basetable->gpt_created) { + buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); + le16enc(buf + DOSMAGICOFFSET, DOSMAGIC); + buf[DOSPARTOFF + 1] = 0xff; /* shd */ + buf[DOSPARTOFF + 2] = 0xff; /* ssect */ + buf[DOSPARTOFF + 3] = 0xff; /* scyl */ + buf[DOSPARTOFF + 4] = 0xee; /* typ */ + buf[DOSPARTOFF + 5] = 0xff; /* ehd */ + buf[DOSPARTOFF + 6] = 0xff; /* esect */ + buf[DOSPARTOFF + 7] = 0xff; /* ecyl */ + le32enc(buf + DOSPARTOFF + 8, 1); /* start */ + le32enc(buf + DOSPARTOFF + 12, + MIN(pp->mediasize / pp->sectorsize - 1, 0xffffffffLL)); + error = g_write_data(cp, 0, buf, pp->sectorsize); + g_free(buf); + if (error) + return (error); + } + + /* Allocate space for the header and entries. */ + buf = g_malloc((tlbsz + 1) * pp->sectorsize, M_WAITOK | M_ZERO); + + memcpy(buf, table->hdr.hdr_sig, sizeof(table->hdr.hdr_sig)); + le32enc(buf + 8, table->hdr.hdr_revision); + le32enc(buf + 12, table->hdr.hdr_size); + le64enc(buf + 40, table->hdr.hdr_lba_start); + le64enc(buf + 48, table->hdr.hdr_lba_end); + le_uuid_enc(buf + 56, &table->hdr.hdr_uuid); + le32enc(buf + 80, table->hdr.hdr_entries); + le32enc(buf + 84, table->hdr.hdr_entsz); + + LIST_FOREACH(baseentry, &basetable->gpt_entry, gpe_entry) { + entry = (struct g_part_gpt_entry *)baseentry; + index = baseentry->gpe_index - 1; + bp = buf + pp->sectorsize + table->hdr.hdr_entsz * index; + le_uuid_enc(bp, &entry->ent.ent_type); + le_uuid_enc(bp + 16, &entry->ent.ent_uuid); + le64enc(bp + 32, entry->ent.ent_lba_start); + le64enc(bp + 40, entry->ent.ent_lba_end); + le64enc(bp + 48, entry->ent.ent_attr); + memcpy(bp + 56, entry->ent.ent_name, + sizeof(entry->ent.ent_name)); + } + + crc = crc32(buf + pp->sectorsize, + table->hdr.hdr_entries * table->hdr.hdr_entsz); + le32enc(buf + 88, crc); + + /* Write primary meta-data. */ + le32enc(buf + 16, 0); /* hdr_crc_self. */ + le64enc(buf + 24, table->lba[GPT_ELT_PRIHDR]); /* hdr_lba_self. */ + le64enc(buf + 32, table->lba[GPT_ELT_SECHDR]); /* hdr_lba_alt. */ + le64enc(buf + 72, table->lba[GPT_ELT_PRITBL]); /* hdr_lba_table. */ + crc = crc32(buf, table->hdr.hdr_size); + le32enc(buf + 16, crc); + + error = g_write_data(cp, table->lba[GPT_ELT_PRITBL] * pp->sectorsize, + buf + pp->sectorsize, tlbsz * pp->sectorsize); + if (error) + goto out; + error = g_write_data(cp, table->lba[GPT_ELT_PRIHDR] * pp->sectorsize, + buf, pp->sectorsize); + if (error) + goto out; + + /* Write secondary meta-data. */ + le32enc(buf + 16, 0); /* hdr_crc_self. */ + le64enc(buf + 24, table->lba[GPT_ELT_SECHDR]); /* hdr_lba_self. */ + le64enc(buf + 32, table->lba[GPT_ELT_PRIHDR]); /* hdr_lba_alt. */ + le64enc(buf + 72, table->lba[GPT_ELT_SECTBL]); /* hdr_lba_table. */ + crc = crc32(buf, table->hdr.hdr_size); + le32enc(buf + 16, crc); + + error = g_write_data(cp, table->lba[GPT_ELT_SECTBL] * pp->sectorsize, + buf + pp->sectorsize, tlbsz * pp->sectorsize); + if (error) + goto out; + error = g_write_data(cp, table->lba[GPT_ELT_SECHDR] * pp->sectorsize, + buf, pp->sectorsize); + + out: + g_free(buf); + return (error); +} + +#if 0 +static void +g_gpt_to_utf8(struct sbuf *sb, uint16_t *str, size_t len) +{ + u_int bo; + uint32_t ch; + uint16_t c; + + bo = BYTE_ORDER; + while (len > 0 && *str != 0) { + ch = (bo == BIG_ENDIAN) ? be16toh(*str) : le16toh(*str); + str++, len--; + if ((ch & 0xf800) == 0xd800) { + if (len > 0) { + c = (bo == BIG_ENDIAN) ? be16toh(*str) + : le16toh(*str); + str++, len--; + } else + c = 0xfffd; + if ((ch & 0x400) == 0 && (c & 0xfc00) == 0xdc00) { + ch = ((ch & 0x3ff) << 10) + (c & 0x3ff); + ch += 0x10000; + } else + ch = 0xfffd; + } else if (ch == 0xfffe) { /* BOM (U+FEFF) swapped. */ + bo = (bo == BIG_ENDIAN) ? LITTLE_ENDIAN : BIG_ENDIAN; + continue; + } else if (ch == 0xfeff) /* BOM (U+FEFF) unswapped. */ + continue; + + if (ch < 0x80) + sbuf_printf(sb, "%c", ch); + else if (ch < 0x800) + sbuf_printf(sb, "%c%c", 0xc0 | (ch >> 6), + 0x80 | (ch & 0x3f)); + else if (ch < 0x10000) + sbuf_printf(sb, "%c%c%c", 0xe0 | (ch >> 12), + 0x80 | ((ch >> 6) & 0x3f), 0x80 | (ch & 0x3f)); + else if (ch < 0x200000) + sbuf_printf(sb, "%c%c%c%c", 0xf0 | (ch >> 18), + 0x80 | ((ch >> 12) & 0x3f), + 0x80 | ((ch >> 6) & 0x3f), 0x80 | (ch & 0x3f)); + } +} +#endif diff --git a/sys/geom/part/g_part_if.m b/sys/geom/part/g_part_if.m new file mode 100644 index 0000000..589db64 --- /dev/null +++ b/sys/geom/part/g_part_if.m @@ -0,0 +1,117 @@ +#- +# Copyright (c) 2006, 2007 Marcel Moolenaar +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# $FreeBSD$ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +# The G_PART scheme interface. + +INTERFACE g_part; + +# add() - scheme specific processing for the add verb. +METHOD int add { + struct g_part_table *table; + struct g_part_entry *entry; + struct g_part_parms *gpp; +}; + +# create() - scheme specific processing for the create verb. +METHOD int create { + struct g_part_table *table; + struct g_part_parms *gpp; +}; + +# destroy() - scheme specific processing for the destroy verb. +METHOD int destroy { + struct g_part_table *table; + struct g_part_parms *gpp; +}; + +# dumpconf() +METHOD void dumpconf { + struct g_part_table *table; + struct g_part_entry *entry; + struct sbuf *sb; + const char *indent; +}; + +# dumpto() - return whether the partiton can be used for kernel dumps. +METHOD int dumpto { + struct g_part_table *table; + struct g_part_entry *entry; +}; + +# modify() - scheme specific processing for the modify verb. +METHOD int modify { + struct g_part_table *table; + struct g_part_entry *entry; + struct g_part_parms *gpp; +}; + +# name() - return the name of the given partition entry. +# Typical names are "p1", "s0" or "c". +METHOD const char * name { + struct g_part_table *table; + struct g_part_entry *entry; + char *buf; + size_t bufsz; +}; + +# probe() - probe the provider attached to the given consumer for the +# existence of the scheme implemented by the G_PART interface handler. +METHOD int probe { + struct g_part_table *table; + struct g_consumer *cp; +}; + +# read() - read the on-disk partition table into memory. +METHOD int read { + struct g_part_table *table; + struct g_consumer *cp; +}; + +# type() - return a string representation of the partition type. +# Preferrably, the alias names. +METHOD const char * type { + struct g_part_table *table; + struct g_part_entry *entry; + char *buf; + size_t bufsz; +}; + +# write() - write the in-memory partition table to disk. +METHOD int write { + struct g_part_table *table; + struct g_consumer *cp; +}; -- cgit v1.1