Current Path : /sys/amd64/compile/hs32/modules/usr/src/sys/modules/cxgb/cxgb/@/ia64/include/ |
FreeBSD hs32.drive.ne.jp 9.1-RELEASE FreeBSD 9.1-RELEASE #1: Wed Jan 14 12:18:08 JST 2015 root@hs32.drive.ne.jp:/sys/amd64/compile/hs32 amd64 |
Current File : //sys/amd64/compile/hs32/modules/usr/src/sys/modules/cxgb/cxgb/@/ia64/include/pmap.h |
/*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Derived from hp300 version by Mike Hibler, this version by William * Jolitz uses a recursive map [a pde points to the page directory] to * map the page tables using the pagetables themselves. This is done to * reduce the impact on kernel virtual memory for lots of sparse address * space, and to reduce the cost of memory to each process. * * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 * from: i386 pmap.h,v 1.54 1997/11/20 19:30:35 bde Exp * $FreeBSD: release/9.1.0/sys/ia64/include/pmap.h 223873 2011-07-08 16:30:54Z marcel $ */ #ifndef _MACHINE_PMAP_H_ #define _MACHINE_PMAP_H_ #include <sys/queue.h> #include <sys/_lock.h> #include <sys/_mutex.h> #include <machine/atomic.h> #include <machine/pte.h> #include <machine/vmparam.h> #ifdef _KERNEL #define MAXKPT (PAGE_SIZE/sizeof(vm_offset_t)) #define vtophys(va) pmap_kextract((vm_offset_t)(va)) #endif /* _KERNEL */ /* * Pmap stuff */ struct pv_entry; struct md_page { int pv_list_count; TAILQ_HEAD(,pv_entry) pv_list; vm_memattr_t memattr; }; struct pmap { struct mtx pm_mtx; TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ uint32_t pm_rid[IA64_VM_MINKERN_REGION]; struct pmap_statistics pm_stats; /* pmap statistics */ }; typedef struct pmap *pmap_t; #ifdef _KERNEL extern struct pmap kernel_pmap_store; #define kernel_pmap (&kernel_pmap_store) #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) #define PMAP_LOCK_ASSERT(pmap, type) \ mtx_assert(&(pmap)->pm_mtx, (type)) #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ NULL, MTX_DEF) #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) #endif /* * For each vm_page_t, there is a list of all currently valid virtual * mappings of that page. An entry is a pv_entry_t, the list is pv_list. */ typedef struct pv_entry { pmap_t pv_pmap; /* pmap where mapping lies */ vm_offset_t pv_va; /* virtual address for mapping */ TAILQ_ENTRY(pv_entry) pv_list; TAILQ_ENTRY(pv_entry) pv_plist; } *pv_entry_t; #ifdef _KERNEL extern vm_paddr_t phys_avail[]; extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; extern uint64_t pmap_vhpt_base[]; extern int pmap_vhpt_log2size; #define pmap_page_get_memattr(m) ((m)->md.memattr) #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) #define pmap_mapbios(pa, sz) pmap_mapdev(pa, sz) #define pmap_unmapbios(va, sz) pmap_unmapdev(va, sz) vm_offset_t pmap_alloc_vhpt(void); void pmap_bootstrap(void); void pmap_kenter(vm_offset_t va, vm_offset_t pa); vm_paddr_t pmap_kextract(vm_offset_t va); void pmap_kremove(vm_offset_t); void *pmap_mapdev(vm_paddr_t, vm_size_t); void pmap_page_set_memattr(vm_page_t, vm_memattr_t); vm_offset_t pmap_page_to_va(vm_page_t); vm_offset_t pmap_steal_memory(vm_size_t); struct pmap *pmap_switch(struct pmap *pmap); void pmap_unmapdev(vm_offset_t, vm_size_t); #endif /* _KERNEL */ #endif /* !_MACHINE_PMAP_H_ */