diff --git a/sys/amd64/amd64/elf_machdep.c b/sys/amd64/amd64/elf_machdep.c index ca07adc..c854ecd 100644 --- a/sys/amd64/amd64/elf_machdep.c +++ b/sys/amd64/amd64/elf_machdep.c @@ -72,7 +72,8 @@ struct sysentvec elf64_freebsd_sysvec = { .sv_setregs = exec_setregs, .sv_fixlimit = NULL, .sv_maxssiz = NULL, - .sv_flags = SV_ABI_FREEBSD | SV_LP64 | SV_SHP | SV_TIMEKEEP, + .sv_flags = SV_ABI_FREEBSD | SV_LP64 | SV_SHP | SV_TIMEKEEP | + SV_ASLR, .sv_set_syscall_retval = cpu_set_syscall_retval, .sv_fetch_syscall_args = cpu_fetch_syscall_args, .sv_syscallnames = syscallnames, diff --git a/sys/compat/freebsd32/freebsd32_misc.c b/sys/compat/freebsd32/freebsd32_misc.c index bdcdf6f..23d372e 100644 --- a/sys/compat/freebsd32/freebsd32_misc.c +++ b/sys/compat/freebsd32/freebsd32_misc.c @@ -3026,6 +3026,7 @@ freebsd32_procctl(struct thread *td, struct freebsd32_procctl_args *uap) switch (uap->com) { case PROC_SPROTECT: case PROC_TRACE_CTL: + case PROC_ASLR_CTL: error = copyin(PTRIN(uap->data), &flags, sizeof(flags)); if (error != 0) return (error); @@ -3055,6 +3056,7 @@ freebsd32_procctl(struct thread *td, struct freebsd32_procctl_args *uap) data = &x.rk; break; case PROC_TRACE_STATUS: + case PROC_ASLR_STATUS: data = &flags; break; default: @@ -3073,6 +3075,7 @@ freebsd32_procctl(struct thread *td, struct freebsd32_procctl_args *uap) error = error1; break; case PROC_TRACE_STATUS: + case PROC_ASLR_STATUS: if (error == 0) error = copyout(&flags, uap->data, sizeof(flags)); break; diff --git a/sys/compat/ia32/ia32_sysvec.c b/sys/compat/ia32/ia32_sysvec.c index f201570..abf26a8 100644 --- a/sys/compat/ia32/ia32_sysvec.c +++ b/sys/compat/ia32/ia32_sysvec.c @@ -120,11 +120,9 @@ struct sysentvec ia32_freebsd_sysvec = { .sv_setregs = ia32_setregs, .sv_fixlimit = ia32_fixlimit, .sv_maxssiz = &ia32_maxssiz, - .sv_flags = SV_ABI_FREEBSD | SV_IA32 | SV_ILP32 | + .sv_flags = SV_ABI_FREEBSD | SV_IA32 | SV_ILP32 #ifdef __amd64__ - SV_SHP | SV_TIMEKEEP -#else - 0 + | SV_SHP | SV_TIMEKEEP | SV_ASLR #endif , .sv_set_syscall_retval = ia32_set_syscall_retval, diff --git a/sys/i386/i386/elf_machdep.c b/sys/i386/i386/elf_machdep.c index 3c76ab2..3fa2d09 100644 --- a/sys/i386/i386/elf_machdep.c +++ b/sys/i386/i386/elf_machdep.c @@ -79,7 +79,7 @@ struct sysentvec elf32_freebsd_sysvec = { .sv_fixlimit = NULL, .sv_maxssiz = NULL, .sv_flags = SV_ABI_FREEBSD | SV_IA32 | SV_ILP32 | SV_SHP | - SV_TIMEKEEP, + SV_TIMEKEEP | SV_ASLR, .sv_set_syscall_retval = cpu_set_syscall_retval, .sv_fetch_syscall_args = cpu_fetch_syscall_args, .sv_syscallnames = syscallnames, diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index 43d4800..be053c9 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -137,6 +137,21 @@ SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0, #endif #endif +static int __elfN(aslr_enabled) = 1; +SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, + aslr_enabled, CTLFLAG_RWTUN, &__elfN(aslr_enabled), 0, + __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable aslr"); + +static int __elfN(pie_aslr_enabled) = 1; +SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, + pie_aslr_enabled, CTLFLAG_RWTUN, &__elfN(pie_aslr_enabled), 0, + __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable aslr for PIE binaries"); + +static int __elfN(aslr_care_sbrk) = 1; +SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, + aslr_care_sbrk, CTLFLAG_RW, &__elfN(aslr_care_sbrk), 0, + __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used"); + static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; #define trunc_page_ps(va, ps) ((va) & ~(ps - 1)) @@ -453,10 +468,11 @@ __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, * The mapping is not page aligned. This means we have * to copy the data. Sigh. */ - rv = vm_map_find(map, NULL, 0, &start, end - start, 0, - VMFS_NO_SPACE, prot | VM_PROT_WRITE, VM_PROT_ALL, - 0); - if (rv) + vm_map_lock(map); + rv = vm_map_insert(map, NULL, 0, start, end, + prot | VM_PROT_WRITE, VM_PROT_ALL, 0); + vm_map_unlock(map); + if (rv != KERN_SUCCESS) return (rv); if (object == NULL) return (KERN_SUCCESS); @@ -471,9 +487,8 @@ __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, sz); vm_imgact_unmap_page(sf); - if (error) { + if (error != 0) return (KERN_FAILURE); - } offset += sz; } rv = KERN_SUCCESS; @@ -755,6 +770,7 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) const Elf_Phdr *phdr; Elf_Auxargs *elf_auxargs; struct vmspace *vmspace; + vm_map_t map; const char *err_str, *newinterp; char *interp, *interp_buf, *path; Elf_Brandinfo *brand_info; @@ -762,6 +778,7 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) vm_prot_t prot; u_long text_size, data_size, total_size, text_addr, data_addr; u_long seg_size, seg_addr, addr, baddr, et_dyn_addr, entry, proghdr; + u_long rbase, maxalign, mapsz, minv, maxv; int32_t osrel; int error, i, n, interp_name_len, have_interp; @@ -803,12 +820,17 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) err_str = newinterp = NULL; interp = interp_buf = NULL; td = curthread; + maxalign = PAGE_SIZE; + mapsz = 0; for (i = 0; i < hdr->e_phnum; i++) { switch (phdr[i].p_type) { case PT_LOAD: if (n == 0) baddr = phdr[i].p_vaddr; + if (phdr[i].p_align > maxalign) + maxalign = phdr[i].p_align; + mapsz += phdr[i].p_memsz; n++; break; case PT_INTERP: @@ -862,6 +884,8 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) error = ENOEXEC; goto ret; } + sv = brand_info->sysvec; + et_dyn_addr = 0; if (hdr->e_type == ET_DYN) { if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) { uprintf("Cannot execute shared object\n"); @@ -872,13 +896,17 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) * Honour the base load address from the dso if it is * non-zero for some reason. */ - if (baddr == 0) - et_dyn_addr = ET_DYN_LOAD_ADDR; - else - et_dyn_addr = 0; - } else - et_dyn_addr = 0; - sv = brand_info->sysvec; + if (baddr == 0) { + if ((sv->sv_flags & SV_ASLR) == 0) + et_dyn_addr = ET_DYN_LOAD_ADDR; + else if ((__elfN(pie_aslr_enabled) && + (imgp->proc->p_flag2 & P2_ASLR_DISABLE) == 0) || + (imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0) + et_dyn_addr = 1; + else + et_dyn_addr = ET_DYN_LOAD_ADDR; + } + } if (interp != NULL && brand_info->interp_newpath != NULL) newinterp = brand_info->interp_newpath; @@ -897,6 +925,42 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) error = exec_new_vmspace(imgp, sv); imgp->proc->p_sysent = sv; + vmspace = imgp->proc->p_vmspace; + map = &vmspace->vm_map; + + if ((sv->sv_flags & SV_ASLR) == 0 || + (imgp->proc->p_flag2 & P2_ASLR_DISABLE) != 0) { + KASSERT(et_dyn_addr != 1, ("et_dyn_addr == 1 and !ASLR")); + } else if ((imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0 || + (__elfN(aslr_enabled) && hdr->e_type != ET_DYN) || + et_dyn_addr == 1) { + vm_map_lock(map); + map->flags |= MAP_ASLR; + /* + * If user does not care about sbrk, utilize the bss + * grow region for mappings as well. We can select + * the base for the image anywere and still not suffer + * from the fragmentation. + */ + if (!__elfN(aslr_care_sbrk) || + (imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0) + map->flags |= MAP_ASLR_IGNSTART; + vm_map_unlock(map); + } + if (et_dyn_addr == 1) { + KASSERT((map->flags & MAP_ASLR) != 0, + ("et_dyn_addr but !MAP_ASLR")); + rbase = arc4random(); +#if __ELF_WORD_SIZE == 64 + rbase |= ((u_long)arc4random()) << 32; +#endif + minv = vm_map_min(map) + mapsz + lim_max(td, RLIMIT_DATA); + maxv = vm_map_max(map) - lim_max(td, RLIMIT_STACK); + et_dyn_addr = vm_map_min(map) + + /* +1 reserves half of the address space to interpreter.*/ + rbase % ((maxv - minv) >> (fls(maxalign) + 1)); + et_dyn_addr <<= fls(maxalign); + } vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); if (error != 0) @@ -989,7 +1053,6 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) goto ret; } - vmspace = imgp->proc->p_vmspace; vmspace->vm_tsize = text_size >> PAGE_SHIFT; vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; vmspace->vm_dsize = data_size >> PAGE_SHIFT; diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 94e139e..2aa1614 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -497,7 +497,8 @@ do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread * * Increase reference counts on shared objects. */ p2->p_flag = P_INMEM; - p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC); + p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC | + P2_ASLR_ENABLE | P2_ASLR_DISABLE | P2_ASLR_IGNSTART); p2->p_swtick = ticks; if (p1->p_flag & P_PROFIL) startprofclock(p2); diff --git a/sys/kern/kern_procctl.c b/sys/kern/kern_procctl.c index 8ef72901..9247ccc 100644 --- a/sys/kern/kern_procctl.c +++ b/sys/kern/kern_procctl.c @@ -1,6 +1,6 @@ /*- * Copyright (c) 2014 John Baldwin - * Copyright (c) 2014 The FreeBSD Foundation + * Copyright (c) 2014-2016 The FreeBSD Foundation * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. @@ -43,6 +43,10 @@ __FBSDID("$FreeBSD$"); #include #include +#include +#include +#include + static int protect_setchild(struct thread *td, struct proc *p, int flags) { @@ -336,6 +340,52 @@ trace_status(struct thread *td, struct proc *p, int *data) return (0); } +static int +aslr_ctl(struct thread *td, struct proc *p, int state) +{ + + PROC_LOCK_ASSERT(p, MA_OWNED); + + switch (state) { + case PROC_ASLR_FORCE_ENABLE: + p->p_flag2 &= ~P2_ASLR_DISABLE; + p->p_flag2 |= P2_ASLR_ENABLE; + break; + case PROC_ASLR_FORCE_DISABLE: + p->p_flag2 |= P2_ASLR_DISABLE; + p->p_flag2 &= ~P2_ASLR_ENABLE; + break; + case PROC_ASLR_NOFORCE: + p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE); + break; + default: + return (EINVAL); + } + return (0); +} + +static int +aslr_status(struct thread *td, struct proc *p, int *data) +{ + int d; + + switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) { + case 0: + d = PROC_ASLR_NOFORCE; + break; + case P2_ASLR_ENABLE: + d = PROC_ASLR_FORCE_ENABLE; + break; + case P2_ASLR_DISABLE: + d = PROC_ASLR_FORCE_DISABLE; + break; + } + if ((p->p_vmspace->vm_map.flags & MAP_ASLR) != 0) + d |= PROC_ASLR_ACTIVE; + *data = d; + return (0); +} + #ifndef _SYS_SYSPROTO_H_ struct procctl_args { idtype_t idtype; @@ -359,6 +409,7 @@ sys_procctl(struct thread *td, struct procctl_args *uap) switch (uap->com) { case PROC_SPROTECT: case PROC_TRACE_CTL: + case PROC_ASLR_CTL: error = copyin(uap->data, &flags, sizeof(flags)); if (error != 0) return (error); @@ -386,6 +437,7 @@ sys_procctl(struct thread *td, struct procctl_args *uap) data = &x.rk; break; case PROC_TRACE_STATUS: + case PROC_ASLR_STATUS: data = &flags; break; default: @@ -403,6 +455,7 @@ sys_procctl(struct thread *td, struct procctl_args *uap) error = error1; break; case PROC_TRACE_STATUS: + case PROC_ASLR_STATUS: if (error == 0) error = copyout(&flags, uap->data, sizeof(flags)); break; @@ -432,6 +485,10 @@ kern_procctl_single(struct thread *td, struct proc *p, int com, void *data) return (trace_ctl(td, p, *(int *)data)); case PROC_TRACE_STATUS: return (trace_status(td, p, data)); + case PROC_ASLR_CTL: + return (aslr_ctl(td, p, *(int *)data)); + case PROC_ASLR_STATUS: + return (aslr_status(td, p, data)); default: return (EINVAL); } @@ -452,6 +509,8 @@ kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data) case PROC_REAP_GETPIDS: case PROC_REAP_KILL: case PROC_TRACE_STATUS: + case PROC_ASLR_CTL: + case PROC_ASLR_STATUS: if (idtype != P_PID) return (EINVAL); } @@ -471,6 +530,8 @@ kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data) tree_locked = true; break; case PROC_TRACE_STATUS: + case PROC_ASLR_CTL: + case PROC_ASLR_STATUS: tree_locked = false; break; default: diff --git a/sys/sys/proc.h b/sys/sys/proc.h index f9ca6d9..e7de97b 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -699,6 +699,9 @@ struct proc { #define P2_NOTRACE_EXEC 0x00000004 /* Keep P2_NOPTRACE on exec(2). */ #define P2_AST_SU 0x00000008 /* Handles SU ast for kthreads. */ #define P2_LWP_EVENTS 0x00000010 /* Report LWP events via ptrace(2). */ +#define P2_ASLR_ENABLE 0x00000020 /* Force enable ASLR. */ +#define P2_ASLR_DISABLE 0x00000040 /* Force disable ASLR. */ +#define P2_ASLR_IGNSTART 0x00000080 /* Enable ASLR to consume sbrk area. */ /* Flags protected by proctree_lock, kept in p_treeflags. */ #define P_TREE_ORPHANED 0x00000001 /* Reparented, on orphan list */ diff --git a/sys/sys/procctl.h b/sys/sys/procctl.h index 75dbf53..4a62669 100644 --- a/sys/sys/procctl.h +++ b/sys/sys/procctl.h @@ -43,6 +43,8 @@ #define PROC_REAP_KILL 6 /* kill descendants */ #define PROC_TRACE_CTL 7 /* en/dis ptrace and coredumps */ #define PROC_TRACE_STATUS 8 /* query tracing status */ +#define PROC_ASLR_CTL 9 /* en/dis ASLR */ +#define PROC_ASLR_STATUS 10 /* query ASLR status */ /* Operations for PROC_SPROTECT (passed in integer arg). */ #define PPROT_OP(x) ((x) & 0xf) @@ -102,6 +104,11 @@ struct procctl_reaper_kill { #define PROC_TRACE_CTL_DISABLE 2 #define PROC_TRACE_CTL_DISABLE_EXEC 3 +#define PROC_ASLR_FORCE_ENABLE 1 +#define PROC_ASLR_FORCE_DISABLE 2 +#define PROC_ASLR_NOFORCE 3 +#define PROC_ASLR_ACTIVE 0x80000000 + #ifndef _KERNEL __BEGIN_DECLS int procctl(idtype_t, id_t, int, void *); diff --git a/sys/sys/sysent.h b/sys/sys/sysent.h index a79ff04..c9bd29a 100644 --- a/sys/sys/sysent.h +++ b/sys/sys/sysent.h @@ -138,7 +138,8 @@ struct sysentvec { #define SV_AOUT 0x008000 /* a.out executable. */ #define SV_SHP 0x010000 /* Shared page. */ #define SV_CAPSICUM 0x020000 /* Force cap_enter() on startup. */ -#define SV_TIMEKEEP 0x040000 +#define SV_TIMEKEEP 0x040000 /* Shared page timehands. */ +#define SV_ASLR 0x080000 /* ASLR allowed. */ #define SV_ABI_MASK 0xff #define SV_PROC_FLAG(p, x) ((p)->p_sysent->sv_flags & (x)) diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 5e22b3e..e347ae9 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -1470,6 +1470,20 @@ vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, return (result); } +static const int aslr_pages_rnd_64[2] = {0x1000, 0x10}; +static const int aslr_pages_rnd_32[2] = {0x100, 0x4}; + +static int aslr_sloppiness = 5; +SYSCTL_INT(_vm, OID_AUTO, aslr_sloppiness, CTLFLAG_RW, &aslr_sloppiness, 0, + ""); + +static int aslr_collapse_anon = 1; +SYSCTL_INT(_vm, OID_AUTO, aslr_collapse_anon, CTLFLAG_RW, + &aslr_collapse_anon, 0, + ""); + +#define MAP_32BIT_MAX_ADDR ((vm_offset_t)1 << 31) + /* * vm_map_find finds an unallocated region in the target address * map with the given length. The search is defined to be @@ -1485,8 +1499,10 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_size_t length, vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, int cow) { - vm_offset_t alignment, initial_addr, start; - int result; + vm_map_entry_t prev_entry; + vm_offset_t alignment, initial_addr, start, rand_max, rs, re; + const int *aslr_pages_rnd; + int result, do_aslr, pidx, anon; KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || object == NULL, @@ -1499,8 +1515,34 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, alignment = (vm_offset_t)1 << (find_space >> 8); } else alignment = 0; - initial_addr = *addr; + do_aslr = (map->flags & MAP_ASLR) != 0 ? aslr_sloppiness : 0; + anon = object == NULL && (cow & (MAP_INHERIT_SHARE | + MAP_STACK_GROWS_UP | MAP_STACK_GROWS_DOWN)) == 0 && + prot != PROT_NONE && aslr_collapse_anon; + if (do_aslr) { + if (vm_map_max(map) > MAP_32BIT_MAX_ADDR && + (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR)) + aslr_pages_rnd = aslr_pages_rnd_64; + else + aslr_pages_rnd = aslr_pages_rnd_32; + if (find_space != VMFS_NO_SPACE && (map->flags & + MAP_ASLR_IGNSTART) != 0) { + initial_addr = anon ? map->anon_loc : vm_map_min(map); + } else { + initial_addr = anon && *addr == 0 ? map->anon_loc : + *addr; + } + } else { + initial_addr = *addr; + } again: + if (anon && do_aslr == aslr_sloppiness - 1) { + /* + * A try at anon_loc location failed, do free pass + * from the start of the map. + */ + initial_addr = vm_map_min(map); + } start = initial_addr; vm_map_lock(map); do { @@ -1508,12 +1550,46 @@ again: if (vm_map_findspace(map, start, length, addr) || (max_addr != 0 && *addr + length > max_addr)) { vm_map_unlock(map); + if (do_aslr > 0) { + do_aslr--; + goto again; + } if (find_space == VMFS_OPTIMAL_SPACE) { find_space = VMFS_ANY_SPACE; goto again; } return (KERN_NO_SPACE); } + /* + * The R step for ASLR. But skip it if we are + * trying to coalesce anon memory request. + */ + if (do_aslr > 0 && + !(anon && do_aslr == aslr_sloppiness)) { + vm_map_lookup_entry(map, *addr, &prev_entry); + if (MAXPAGESIZES > 1 && pagesizes[1] != 0 && + (find_space == VMFS_SUPER_SPACE || + find_space == VMFS_OPTIMAL_SPACE)) + pidx = 1; + else + pidx = 0; + re = prev_entry->next == &map->header ? + map->max_offset : prev_entry->next->start; + rs = prev_entry == &map->header ? + map->min_offset : prev_entry->end; + rand_max = re - (*addr - rs) - length; + if (max_addr != 0 && + *addr + rand_max > max_addr) + rand_max = max_addr - *addr - length; + rand_max /= pagesizes[pidx]; + if (rand_max < aslr_pages_rnd[pidx]) { + vm_map_unlock(map); + do_aslr--; + goto again; + } + *addr += (arc4random() % rand_max) * + pagesizes[pidx]; + } switch (find_space) { case VMFS_SUPER_SPACE: case VMFS_OPTIMAL_SPACE: @@ -1529,7 +1605,6 @@ again: } break; } - start = *addr; } if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { @@ -1539,8 +1614,15 @@ again: result = vm_map_insert(map, object, offset, start, start + length, prot, max, cow); } + if (result != KERN_SUCCESS && do_aslr > 0) { + vm_map_unlock(map); + do_aslr--; + goto again; + } } while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE && find_space != VMFS_ANY_SPACE); + if (result == KERN_SUCCESS && anon) + map->anon_loc = *addr + length; vm_map_unlock(map); return (result); } diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h index 2c0a4ad..ffc9fd8 100644 --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -190,6 +190,7 @@ struct vm_map { pmap_t pmap; /* (c) Physical map */ #define min_offset header.start /* (c) */ #define max_offset header.end /* (c) */ + vm_offset_t anon_loc; int busy; }; @@ -198,6 +199,8 @@ struct vm_map { */ #define MAP_WIREFUTURE 0x01 /* wire all future pages */ #define MAP_BUSY_WAKEUP 0x02 +#define MAP_ASLR 0x04 /* enabled ASLR */ +#define MAP_ASLR_IGNSTART 0x08 #ifdef _KERNEL static __inline vm_offset_t