Current Path : /sys/amd64/compile/hs32/modules/usr/src/sys/modules/libmchain/@/ufs/ffs/ |
FreeBSD hs32.drive.ne.jp 9.1-RELEASE FreeBSD 9.1-RELEASE #1: Wed Jan 14 12:18:08 JST 2015 root@hs32.drive.ne.jp:/sys/amd64/compile/hs32 amd64 |
Current File : //sys/amd64/compile/hs32/modules/usr/src/sys/modules/libmchain/@/ufs/ffs/ffs_subr.c |
/*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_subr.c 8.5 (Berkeley) 3/21/95 */ #include <sys/cdefs.h> __FBSDID("$FreeBSD: release/9.1.0/sys/ufs/ffs/ffs_subr.c 207141 2010-04-24 07:05:35Z jeff $"); #include <sys/param.h> #ifndef _KERNEL #include <ufs/ufs/dinode.h> #include <ufs/ffs/fs.h> #else #include <sys/systm.h> #include <sys/lock.h> #include <sys/malloc.h> #include <sys/mount.h> #include <sys/vnode.h> #include <sys/bio.h> #include <sys/buf.h> #include <sys/ucred.h> #include <ufs/ufs/quota.h> #include <ufs/ufs/inode.h> #include <ufs/ufs/extattr.h> #include <ufs/ufs/ufsmount.h> #include <ufs/ufs/ufs_extern.h> #include <ufs/ffs/ffs_extern.h> #include <ufs/ffs/fs.h> #ifdef KDB void ffs_checkoverlap(struct buf *, struct inode *); #endif /* * Return buffer with the contents of block "offset" from the beginning of * directory "ip". If "res" is non-zero, fill it in with a pointer to the * remaining space in the directory. */ int ffs_blkatoff(vp, offset, res, bpp) struct vnode *vp; off_t offset; char **res; struct buf **bpp; { struct inode *ip; struct fs *fs; struct buf *bp; ufs_lbn_t lbn; int bsize, error; ip = VTOI(vp); fs = ip->i_fs; lbn = lblkno(fs, offset); bsize = blksize(fs, ip, lbn); *bpp = NULL; error = bread(vp, lbn, bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } if (res) *res = (char *)bp->b_data + blkoff(fs, offset); *bpp = bp; return (0); } /* * Load up the contents of an inode and copy the appropriate pieces * to the incore copy. */ void ffs_load_inode(bp, ip, fs, ino) struct buf *bp; struct inode *ip; struct fs *fs; ino_t ino; { if (ip->i_ump->um_fstype == UFS1) { *ip->i_din1 = *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); ip->i_mode = ip->i_din1->di_mode; ip->i_nlink = ip->i_din1->di_nlink; ip->i_size = ip->i_din1->di_size; ip->i_flags = ip->i_din1->di_flags; ip->i_gen = ip->i_din1->di_gen; ip->i_uid = ip->i_din1->di_uid; ip->i_gid = ip->i_din1->di_gid; } else { *ip->i_din2 = *((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); ip->i_mode = ip->i_din2->di_mode; ip->i_nlink = ip->i_din2->di_nlink; ip->i_size = ip->i_din2->di_size; ip->i_flags = ip->i_din2->di_flags; ip->i_gen = ip->i_din2->di_gen; ip->i_uid = ip->i_din2->di_uid; ip->i_gid = ip->i_din2->di_gid; } } #endif /* KERNEL */ /* * Update the frsum fields to reflect addition or deletion * of some frags. */ void ffs_fragacct(fs, fragmap, fraglist, cnt) struct fs *fs; int fragmap; int32_t fraglist[]; int cnt; { int inblk; int field, subfield; int siz, pos; inblk = (int)(fragtbl[fs->fs_frag][fragmap]) << 1; fragmap <<= 1; for (siz = 1; siz < fs->fs_frag; siz++) { if ((inblk & (1 << (siz + (fs->fs_frag % NBBY)))) == 0) continue; field = around[siz]; subfield = inside[siz]; for (pos = siz; pos <= fs->fs_frag; pos++) { if ((fragmap & field) == subfield) { fraglist[siz] += cnt; pos += siz; field <<= siz; subfield <<= siz; } field <<= 1; subfield <<= 1; } } } #ifdef KDB void ffs_checkoverlap(bp, ip) struct buf *bp; struct inode *ip; { struct buf *ebp, *ep; ufs2_daddr_t start, last; struct vnode *vp; ebp = &buf[nbuf]; start = bp->b_blkno; last = start + btodb(bp->b_bcount) - 1; for (ep = buf; ep < ebp; ep++) { if (ep == bp || (ep->b_flags & B_INVAL) || ep->b_vp == NULLVP) continue; vp = ip->i_devvp; /* look for overlap */ if (ep->b_bcount == 0 || ep->b_blkno > last || ep->b_blkno + btodb(ep->b_bcount) <= start) continue; vprint("Disk overlap", vp); printf("\tstart %jd, end %jd overlap start %jd, end %jd\n", (intmax_t)start, (intmax_t)last, (intmax_t)ep->b_blkno, (intmax_t)(ep->b_blkno + btodb(ep->b_bcount) - 1)); panic("ffs_checkoverlap: Disk buffer overlap"); } } #endif /* KDB */ /* * block operations * * check if a block is available */ int ffs_isblock(fs, cp, h) struct fs *fs; unsigned char *cp; ufs1_daddr_t h; { unsigned char mask; switch ((int)fs->fs_frag) { case 8: return (cp[h] == 0xff); case 4: mask = 0x0f << ((h & 0x1) << 2); return ((cp[h >> 1] & mask) == mask); case 2: mask = 0x03 << ((h & 0x3) << 1); return ((cp[h >> 2] & mask) == mask); case 1: mask = 0x01 << (h & 0x7); return ((cp[h >> 3] & mask) == mask); default: #ifdef _KERNEL panic("ffs_isblock"); #endif break; } return (0); } /* * check if a block is free */ int ffs_isfreeblock(fs, cp, h) struct fs *fs; u_char *cp; ufs1_daddr_t h; { switch ((int)fs->fs_frag) { case 8: return (cp[h] == 0); case 4: return ((cp[h >> 1] & (0x0f << ((h & 0x1) << 2))) == 0); case 2: return ((cp[h >> 2] & (0x03 << ((h & 0x3) << 1))) == 0); case 1: return ((cp[h >> 3] & (0x01 << (h & 0x7))) == 0); default: #ifdef _KERNEL panic("ffs_isfreeblock"); #endif break; } return (0); } /* * take a block out of the map */ void ffs_clrblock(fs, cp, h) struct fs *fs; u_char *cp; ufs1_daddr_t h; { switch ((int)fs->fs_frag) { case 8: cp[h] = 0; return; case 4: cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2)); return; case 2: cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1)); return; case 1: cp[h >> 3] &= ~(0x01 << (h & 0x7)); return; default: #ifdef _KERNEL panic("ffs_clrblock"); #endif break; } } /* * put a block into the map */ void ffs_setblock(fs, cp, h) struct fs *fs; unsigned char *cp; ufs1_daddr_t h; { switch ((int)fs->fs_frag) { case 8: cp[h] = 0xff; return; case 4: cp[h >> 1] |= (0x0f << ((h & 0x1) << 2)); return; case 2: cp[h >> 2] |= (0x03 << ((h & 0x3) << 1)); return; case 1: cp[h >> 3] |= (0x01 << (h & 0x7)); return; default: #ifdef _KERNEL panic("ffs_setblock"); #endif break; } } /* * Update the cluster map because of an allocation or free. * * Cnt == 1 means free; cnt == -1 means allocating. */ void ffs_clusteracct(fs, cgp, blkno, cnt) struct fs *fs; struct cg *cgp; ufs1_daddr_t blkno; int cnt; { int32_t *sump; int32_t *lp; u_char *freemapp, *mapp; int i, start, end, forw, back, map, bit; if (fs->fs_contigsumsize <= 0) return; freemapp = cg_clustersfree(cgp); sump = cg_clustersum(cgp); /* * Allocate or clear the actual block. */ if (cnt > 0) setbit(freemapp, blkno); else clrbit(freemapp, blkno); /* * Find the size of the cluster going forward. */ start = blkno + 1; end = start + fs->fs_contigsumsize; if (end >= cgp->cg_nclusterblks) end = cgp->cg_nclusterblks; mapp = &freemapp[start / NBBY]; map = *mapp++; bit = 1 << (start % NBBY); for (i = start; i < end; i++) { if ((map & bit) == 0) break; if ((i & (NBBY - 1)) != (NBBY - 1)) { bit <<= 1; } else { map = *mapp++; bit = 1; } } forw = i - start; /* * Find the size of the cluster going backward. */ start = blkno - 1; end = start - fs->fs_contigsumsize; if (end < 0) end = -1; mapp = &freemapp[start / NBBY]; map = *mapp--; bit = 1 << (start % NBBY); for (i = start; i > end; i--) { if ((map & bit) == 0) break; if ((i & (NBBY - 1)) != 0) { bit >>= 1; } else { map = *mapp--; bit = 1 << (NBBY - 1); } } back = start - i; /* * Account for old cluster and the possibly new forward and * back clusters. */ i = back + forw + 1; if (i > fs->fs_contigsumsize) i = fs->fs_contigsumsize; sump[i] += cnt; if (back > 0) sump[back] -= cnt; if (forw > 0) sump[forw] -= cnt; /* * Update cluster summary information. */ lp = &sump[fs->fs_contigsumsize]; for (i = fs->fs_contigsumsize; i > 0; i--) if (*lp-- > 0) break; fs->fs_maxcluster[cgp->cg_cgx] = i; }