monitor_mm.c revision 262566
1/* $OpenBSD: monitor_mm.c,v 1.19 2014/01/04 17:50:55 tedu Exp $ */ 2/* 3 * Copyright 2002 Niels Provos <provos@citi.umich.edu> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include "includes.h" 28 29#include <sys/types.h> 30#ifdef HAVE_SYS_MMAN_H 31#include <sys/mman.h> 32#endif 33#include <sys/param.h> 34#include "openbsd-compat/sys-tree.h" 35 36#include <errno.h> 37#include <stdarg.h> 38#include <stddef.h> 39#include <stdlib.h> 40#include <string.h> 41 42#include "xmalloc.h" 43#include "ssh.h" 44#include "log.h" 45#include "monitor_mm.h" 46 47static int 48mm_compare(struct mm_share *a, struct mm_share *b) 49{ 50 ptrdiff_t diff = (char *)a->address - (char *)b->address; 51 52 if (diff == 0) 53 return (0); 54 else if (diff < 0) 55 return (-1); 56 else 57 return (1); 58} 59 60RB_GENERATE(mmtree, mm_share, next, mm_compare) 61 62static struct mm_share * 63mm_make_entry(struct mm_master *mm, struct mmtree *head, 64 void *address, size_t size) 65{ 66 struct mm_share *tmp, *tmp2; 67 68 if (mm->mmalloc == NULL) 69 tmp = xcalloc(1, sizeof(struct mm_share)); 70 else 71 tmp = mm_xmalloc(mm->mmalloc, sizeof(struct mm_share)); 72 tmp->address = address; 73 tmp->size = size; 74 75 tmp2 = RB_INSERT(mmtree, head, tmp); 76 if (tmp2 != NULL) 77 fatal("mm_make_entry(%p): double address %p->%p(%zu)", 78 mm, tmp2, address, size); 79 80 return (tmp); 81} 82 83/* Creates a shared memory area of a certain size */ 84 85struct mm_master * 86mm_create(struct mm_master *mmalloc, size_t size) 87{ 88 void *address; 89 struct mm_master *mm; 90 91 if (mmalloc == NULL) 92 mm = xcalloc(1, sizeof(struct mm_master)); 93 else 94 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master)); 95 96 /* 97 * If the memory map has a mm_master it can be completely 98 * shared including authentication between the child 99 * and the client. 100 */ 101 mm->mmalloc = mmalloc; 102 103 address = xmmap(size); 104 if (address == (void *)MAP_FAILED) 105 fatal("mmap(%zu): %s", size, strerror(errno)); 106 107 mm->address = address; 108 mm->size = size; 109 110 RB_INIT(&mm->rb_free); 111 RB_INIT(&mm->rb_allocated); 112 113 mm_make_entry(mm, &mm->rb_free, address, size); 114 115 return (mm); 116} 117 118/* Frees either the allocated or the free list */ 119 120static void 121mm_freelist(struct mm_master *mmalloc, struct mmtree *head) 122{ 123 struct mm_share *mms, *next; 124 125 for (mms = RB_ROOT(head); mms; mms = next) { 126 next = RB_NEXT(mmtree, head, mms); 127 RB_REMOVE(mmtree, head, mms); 128 if (mmalloc == NULL) 129 free(mms); 130 else 131 mm_free(mmalloc, mms); 132 } 133} 134 135/* Destroys a memory mapped area */ 136 137void 138mm_destroy(struct mm_master *mm) 139{ 140 mm_freelist(mm->mmalloc, &mm->rb_free); 141 mm_freelist(mm->mmalloc, &mm->rb_allocated); 142 143#ifdef HAVE_MMAP 144 if (munmap(mm->address, mm->size) == -1) 145 fatal("munmap(%p, %zu): %s", mm->address, mm->size, 146 strerror(errno)); 147#else 148 fatal("%s: UsePrivilegeSeparation=yes and Compression=yes not supported", 149 __func__); 150#endif 151 if (mm->mmalloc == NULL) 152 free(mm); 153 else 154 mm_free(mm->mmalloc, mm); 155} 156 157void * 158mm_xmalloc(struct mm_master *mm, size_t size) 159{ 160 void *address; 161 162 address = mm_malloc(mm, size); 163 if (address == NULL) 164 fatal("%s: mm_malloc(%zu)", __func__, size); 165 memset(address, 0, size); 166 return (address); 167} 168 169 170/* Allocates data from a memory mapped area */ 171 172void * 173mm_malloc(struct mm_master *mm, size_t size) 174{ 175 struct mm_share *mms, *tmp; 176 177 if (size == 0) 178 fatal("mm_malloc: try to allocate 0 space"); 179 if (size > SIZE_T_MAX - MM_MINSIZE + 1) 180 fatal("mm_malloc: size too big"); 181 182 size = ((size + (MM_MINSIZE - 1)) / MM_MINSIZE) * MM_MINSIZE; 183 184 RB_FOREACH(mms, mmtree, &mm->rb_free) { 185 if (mms->size >= size) 186 break; 187 } 188 189 if (mms == NULL) 190 return (NULL); 191 192 /* Debug */ 193 memset(mms->address, 0xd0, size); 194 195 tmp = mm_make_entry(mm, &mm->rb_allocated, mms->address, size); 196 197 /* Does not change order in RB tree */ 198 mms->size -= size; 199 mms->address = (char *)mms->address + size; 200 201 if (mms->size == 0) { 202 RB_REMOVE(mmtree, &mm->rb_free, mms); 203 if (mm->mmalloc == NULL) 204 free(mms); 205 else 206 mm_free(mm->mmalloc, mms); 207 } 208 209 return (tmp->address); 210} 211 212/* Frees memory in a memory mapped area */ 213 214void 215mm_free(struct mm_master *mm, void *address) 216{ 217 struct mm_share *mms, *prev, tmp; 218 219 tmp.address = address; 220 mms = RB_FIND(mmtree, &mm->rb_allocated, &tmp); 221 if (mms == NULL) 222 fatal("mm_free(%p): can not find %p", mm, address); 223 224 /* Debug */ 225 memset(mms->address, 0xd0, mms->size); 226 227 /* Remove from allocated list and insert in free list */ 228 RB_REMOVE(mmtree, &mm->rb_allocated, mms); 229 if (RB_INSERT(mmtree, &mm->rb_free, mms) != NULL) 230 fatal("mm_free(%p): double address %p", mm, address); 231 232 /* Find previous entry */ 233 prev = mms; 234 if (RB_LEFT(prev, next)) { 235 prev = RB_LEFT(prev, next); 236 while (RB_RIGHT(prev, next)) 237 prev = RB_RIGHT(prev, next); 238 } else { 239 if (RB_PARENT(prev, next) && 240 (prev == RB_RIGHT(RB_PARENT(prev, next), next))) 241 prev = RB_PARENT(prev, next); 242 else { 243 while (RB_PARENT(prev, next) && 244 (prev == RB_LEFT(RB_PARENT(prev, next), next))) 245 prev = RB_PARENT(prev, next); 246 prev = RB_PARENT(prev, next); 247 } 248 } 249 250 /* Check if range does not overlap */ 251 if (prev != NULL && MM_ADDRESS_END(prev) > address) 252 fatal("mm_free: memory corruption: %p(%zu) > %p", 253 prev->address, prev->size, address); 254 255 /* See if we can merge backwards */ 256 if (prev != NULL && MM_ADDRESS_END(prev) == address) { 257 prev->size += mms->size; 258 RB_REMOVE(mmtree, &mm->rb_free, mms); 259 if (mm->mmalloc == NULL) 260 free(mms); 261 else 262 mm_free(mm->mmalloc, mms); 263 } else 264 prev = mms; 265 266 if (prev == NULL) 267 return; 268 269 /* Check if we can merge forwards */ 270 mms = RB_NEXT(mmtree, &mm->rb_free, prev); 271 if (mms == NULL) 272 return; 273 274 if (MM_ADDRESS_END(prev) > mms->address) 275 fatal("mm_free: memory corruption: %p < %p(%zu)", 276 mms->address, prev->address, prev->size); 277 if (MM_ADDRESS_END(prev) != mms->address) 278 return; 279 280 prev->size += mms->size; 281 RB_REMOVE(mmtree, &mm->rb_free, mms); 282 283 if (mm->mmalloc == NULL) 284 free(mms); 285 else 286 mm_free(mm->mmalloc, mms); 287} 288 289static void 290mm_sync_list(struct mmtree *oldtree, struct mmtree *newtree, 291 struct mm_master *mm, struct mm_master *mmold) 292{ 293 struct mm_master *mmalloc = mm->mmalloc; 294 struct mm_share *mms, *new; 295 296 /* Sync free list */ 297 RB_FOREACH(mms, mmtree, oldtree) { 298 /* Check the values */ 299 mm_memvalid(mmold, mms, sizeof(struct mm_share)); 300 mm_memvalid(mm, mms->address, mms->size); 301 302 new = mm_xmalloc(mmalloc, sizeof(struct mm_share)); 303 memcpy(new, mms, sizeof(struct mm_share)); 304 RB_INSERT(mmtree, newtree, new); 305 } 306} 307 308void 309mm_share_sync(struct mm_master **pmm, struct mm_master **pmmalloc) 310{ 311 struct mm_master *mm; 312 struct mm_master *mmalloc; 313 struct mm_master *mmold; 314 struct mmtree rb_free, rb_allocated; 315 316 debug3("%s: Share sync", __func__); 317 318 mm = *pmm; 319 mmold = mm->mmalloc; 320 mm_memvalid(mmold, mm, sizeof(*mm)); 321 322 mmalloc = mm_create(NULL, mm->size); 323 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master)); 324 memcpy(mm, *pmm, sizeof(struct mm_master)); 325 mm->mmalloc = mmalloc; 326 327 rb_free = mm->rb_free; 328 rb_allocated = mm->rb_allocated; 329 330 RB_INIT(&mm->rb_free); 331 RB_INIT(&mm->rb_allocated); 332 333 mm_sync_list(&rb_free, &mm->rb_free, mm, mmold); 334 mm_sync_list(&rb_allocated, &mm->rb_allocated, mm, mmold); 335 336 mm_destroy(mmold); 337 338 *pmm = mm; 339 *pmmalloc = mmalloc; 340 341 debug3("%s: Share sync end", __func__); 342} 343 344void 345mm_memvalid(struct mm_master *mm, void *address, size_t size) 346{ 347 void *end = (char *)address + size; 348 349 if (address < mm->address) 350 fatal("mm_memvalid: address too small: %p", address); 351 if (end < address) 352 fatal("mm_memvalid: end < address: %p < %p", end, address); 353 if (end > MM_ADDRESS_END(mm)) 354 fatal("mm_memvalid: address too large: %p", address); 355} 356