1/*
2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * HISTORY
30 *
31 * 17-Apr-91   Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98   cpp
33 *
34 */
35
36#include <IOKit/system.h>
37#include <mach/sync_policy.h>
38#include <machine/machine_routines.h>
39#include <vm/vm_kern.h>
40#include <libkern/c++/OSCPPDebug.h>
41
42#include <IOKit/assert.h>
43
44#include <IOKit/IOReturn.h>
45#include <IOKit/IOLib.h>
46#include <IOKit/IOLocks.h>
47#include <IOKit/IOMapper.h>
48#include <IOKit/IOBufferMemoryDescriptor.h>
49#include <IOKit/IOKitDebug.h>
50
51#include "IOKitKernelInternal.h"
52
53#ifdef IOALLOCDEBUG
54#include <libkern/OSDebug.h>
55#include <sys/sysctl.h>
56#endif
57
58#include "libkern/OSAtomic.h"
59#include <libkern/c++/OSKext.h>
60#include <IOKit/IOStatisticsPrivate.h>
61#include <sys/msgbuf.h>
62
63#if IOKITSTATS
64
65#define IOStatisticsAlloc(type, size) \
66do { \
67	IOStatistics::countAlloc(type, size); \
68} while (0)
69
70#else
71
72#define IOStatisticsAlloc(type, size)
73
74#endif /* IOKITSTATS */
75
76extern "C"
77{
78
79
80mach_timespec_t IOZeroTvalspec = { 0, 0 };
81
82extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
83
84extern int
85__doprnt(
86	const char		*fmt,
87	va_list			argp,
88	void			(*putc)(int, void *),
89	void                    *arg,
90	int			radix);
91
92extern void cons_putc_locked(char);
93extern void bsd_log_lock(void);
94extern void bsd_log_unlock(void);
95extern void logwakeup();
96
97
98/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
99
100lck_grp_t	*IOLockGroup;
101
102/*
103 * Global variables for use by iLogger
104 * These symbols are for use only by Apple diagnostic code.
105 * Binary compatibility is not guaranteed for kexts that reference these symbols.
106 */
107
108void *_giDebugLogInternal	= NULL;
109void *_giDebugLogDataInternal	= NULL;
110void *_giDebugReserved1		= NULL;
111void *_giDebugReserved2		= NULL;
112
113iopa_t gIOBMDPageAllocator;
114
115/*
116 * Static variables for this module.
117 */
118
119static queue_head_t gIOMallocContiguousEntries;
120static lck_mtx_t *  gIOMallocContiguousEntriesLock;
121
122#if __x86_64__
123enum { kIOMaxPageableMaps    = 8 };
124enum { kIOPageableMapSize    = 512 * 1024 * 1024 };
125enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
126#else
127enum { kIOMaxPageableMaps    = 16 };
128enum { kIOPageableMapSize    = 96 * 1024 * 1024 };
129enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
130#endif
131
132typedef struct {
133    vm_map_t		map;
134    vm_offset_t	address;
135    vm_offset_t	end;
136} IOMapData;
137
138static struct {
139    UInt32	count;
140    UInt32	hint;
141    IOMapData	maps[ kIOMaxPageableMaps ];
142    lck_mtx_t *	lock;
143} gIOKitPageableSpace;
144
145static iopa_t gIOPageablePageAllocator;
146
147/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
148
149void IOLibInit(void)
150{
151    kern_return_t ret;
152
153    static bool libInitialized;
154
155    if(libInitialized)
156        return;
157
158    gIOKitPageableSpace.maps[0].address = 0;
159    ret = kmem_suballoc(kernel_map,
160                    &gIOKitPageableSpace.maps[0].address,
161                    kIOPageableMapSize,
162                    TRUE,
163                    VM_FLAGS_ANYWHERE,
164                    &gIOKitPageableSpace.maps[0].map);
165    if (ret != KERN_SUCCESS)
166        panic("failed to allocate iokit pageable map\n");
167
168    IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
169
170    gIOKitPageableSpace.lock 		= lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
171    gIOKitPageableSpace.maps[0].end	= gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
172    gIOKitPageableSpace.hint		= 0;
173    gIOKitPageableSpace.count		= 1;
174
175    gIOMallocContiguousEntriesLock 	= lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
176    queue_init( &gIOMallocContiguousEntries );
177
178    iopa_init(&gIOBMDPageAllocator);
179    iopa_init(&gIOPageablePageAllocator);
180
181    libInitialized = true;
182}
183
184/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
185
186IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
187{
188	kern_return_t	result;
189	thread_t		thread;
190
191	result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
192	if (result != KERN_SUCCESS)
193		return (NULL);
194
195	thread_deallocate(thread);
196
197	return (thread);
198}
199
200
201void IOExitThread(void)
202{
203    (void) thread_terminate(current_thread());
204}
205
206/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
207
208
209void * IOMalloc(vm_size_t size)
210{
211    void * address;
212
213    address = (void *)kalloc(size);
214    if ( address ) {
215#if IOALLOCDEBUG
216		debug_iomalloc_size += size;
217#endif
218		IOStatisticsAlloc(kIOStatisticsMalloc, size);
219    }
220
221    return address;
222}
223
224void IOFree(void * address, vm_size_t size)
225{
226    if (address) {
227		kfree(address, size);
228#if IOALLOCDEBUG
229		debug_iomalloc_size -= size;
230#endif
231		IOStatisticsAlloc(kIOStatisticsFree, size);
232    }
233}
234
235/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
236
237void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
238{
239    kern_return_t	kr;
240    vm_offset_t		address;
241    vm_offset_t		allocationAddress;
242    vm_size_t		adjustedSize;
243    uintptr_t		alignMask;
244
245    if (size == 0)
246        return 0;
247    if (alignment == 0)
248        alignment = 1;
249
250    alignMask = alignment - 1;
251    adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
252
253    if (size > adjustedSize) {
254	    address = 0;    /* overflow detected */
255    }
256    else if (adjustedSize >= page_size) {
257
258        kr = kernel_memory_allocate(kernel_map, &address,
259					size, alignMask, 0);
260	if (KERN_SUCCESS != kr)
261	    address = 0;
262
263    } else {
264
265	adjustedSize += alignMask;
266
267	if (adjustedSize >= page_size) {
268
269	    kr = kernel_memory_allocate(kernel_map, &allocationAddress,
270					    adjustedSize, 0, 0);
271	    if (KERN_SUCCESS != kr)
272		allocationAddress = 0;
273
274	} else
275	    allocationAddress = (vm_address_t) kalloc(adjustedSize);
276
277        if (allocationAddress) {
278            address = (allocationAddress + alignMask
279                    + (sizeof(vm_size_t) + sizeof(vm_address_t)))
280                    & (~alignMask);
281
282            *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t)))
283			    = adjustedSize;
284            *((vm_address_t *)(address - sizeof(vm_address_t)))
285                            = allocationAddress;
286	} else
287	    address = 0;
288    }
289
290    assert(0 == (address & alignMask));
291
292    if( address) {
293#if IOALLOCDEBUG
294		debug_iomalloc_size += size;
295#endif
296    	IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
297	}
298
299    return (void *) address;
300}
301
302void IOFreeAligned(void * address, vm_size_t size)
303{
304    vm_address_t	allocationAddress;
305    vm_size_t	adjustedSize;
306
307    if( !address)
308	return;
309
310    assert(size);
311
312    adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
313    if (adjustedSize >= page_size) {
314
315        kmem_free( kernel_map, (vm_offset_t) address, size);
316
317    } else {
318      	adjustedSize = *((vm_size_t *)( (vm_address_t) address
319                                - sizeof(vm_address_t) - sizeof(vm_size_t)));
320        allocationAddress = *((vm_address_t *)( (vm_address_t) address
321				- sizeof(vm_address_t) ));
322
323	if (adjustedSize >= page_size)
324	    kmem_free( kernel_map, allocationAddress, adjustedSize);
325	else
326	  kfree((void *)allocationAddress, adjustedSize);
327    }
328
329#if IOALLOCDEBUG
330    debug_iomalloc_size -= size;
331#endif
332
333    IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
334}
335
336/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
337
338void
339IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
340{
341    mach_vm_address_t allocationAddress;
342    mach_vm_size_t    adjustedSize;
343
344    if (!address)
345	return;
346
347    assert(size);
348
349    adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
350    if (adjustedSize >= page_size) {
351
352	kmem_free( kernel_map, (vm_offset_t) address, size);
353
354    } else {
355
356	adjustedSize = *((mach_vm_size_t *)
357			(address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
358	allocationAddress = *((mach_vm_address_t *)
359			(address - sizeof(mach_vm_address_t) ));
360	kfree((void *)allocationAddress, adjustedSize);
361    }
362
363    IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
364#if IOALLOCDEBUG
365    debug_iomalloc_size -= size;
366#endif
367}
368
369mach_vm_address_t
370IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
371			                mach_vm_size_t alignment, bool contiguous)
372{
373    kern_return_t	kr;
374    mach_vm_address_t	address;
375    mach_vm_address_t	allocationAddress;
376    mach_vm_size_t	adjustedSize;
377    mach_vm_address_t	alignMask;
378
379    if (size == 0)
380	return (0);
381    if (alignment == 0)
382        alignment = 1;
383
384    alignMask = alignment - 1;
385    adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
386
387    contiguous = (contiguous && (adjustedSize > page_size))
388                   || (alignment > page_size);
389
390    if (contiguous || maxPhys)
391    {
392        int options = 0;
393	vm_offset_t virt;
394
395	adjustedSize = size;
396        contiguous = (contiguous && (adjustedSize > page_size))
397                           || (alignment > page_size);
398
399	if (!contiguous)
400	{
401	    if (maxPhys <= 0xFFFFFFFF)
402	    {
403		maxPhys = 0;
404		options |= KMA_LOMEM;
405	    }
406	    else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
407	    {
408		maxPhys = 0;
409	    }
410	}
411	if (contiguous || maxPhys)
412	{
413	    kr = kmem_alloc_contig(kernel_map, &virt, size,
414				   alignMask, atop(maxPhys), atop(alignMask), 0);
415	}
416	else
417	{
418	    kr = kernel_memory_allocate(kernel_map, &virt,
419					size, alignMask, options);
420	}
421	if (KERN_SUCCESS == kr)
422	    address = virt;
423	else
424	    address = 0;
425    }
426    else
427    {
428	adjustedSize += alignMask;
429        allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);
430
431        if (allocationAddress) {
432
433            address = (allocationAddress + alignMask
434                    + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
435                    & (~alignMask);
436
437            if (atop_32(address) != atop_32(address + size - 1))
438                address = round_page(address);
439
440            *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
441                            - sizeof(mach_vm_address_t))) = adjustedSize;
442            *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
443                            = allocationAddress;
444	} else
445	    address = 0;
446    }
447
448    if (address) {
449    IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
450#if IOALLOCDEBUG
451	debug_iomalloc_size += size;
452#endif
453    }
454
455    return (address);
456}
457
458
459/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
460
461struct _IOMallocContiguousEntry
462{
463    mach_vm_address_t	       virtualAddr;
464    IOBufferMemoryDescriptor * md;
465    queue_chain_t	       link;
466};
467typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
468
469void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
470			   IOPhysicalAddress * physicalAddress)
471{
472    mach_vm_address_t	address = 0;
473
474    if (size == 0)
475	return 0;
476    if (alignment == 0)
477	alignment = 1;
478
479    /* Do we want a physical address? */
480    if (!physicalAddress)
481    {
482	address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
483    }
484    else do
485    {
486	IOBufferMemoryDescriptor * bmd;
487	mach_vm_address_t          physicalMask;
488	vm_offset_t		   alignMask;
489
490	alignMask = alignment - 1;
491	physicalMask = (0xFFFFFFFF ^ alignMask);
492
493	bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
494		kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
495	if (!bmd)
496	    break;
497
498	_IOMallocContiguousEntry *
499	entry = IONew(_IOMallocContiguousEntry, 1);
500	if (!entry)
501	{
502	    bmd->release();
503	    break;
504	}
505	entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
506	entry->md          = bmd;
507	lck_mtx_lock(gIOMallocContiguousEntriesLock);
508	queue_enter( &gIOMallocContiguousEntries, entry,
509		    _IOMallocContiguousEntry *, link );
510	lck_mtx_unlock(gIOMallocContiguousEntriesLock);
511
512	address          = (mach_vm_address_t) entry->virtualAddr;
513	*physicalAddress = bmd->getPhysicalAddress();
514    }
515    while (false);
516
517    return (void *) address;
518}
519
520void IOFreeContiguous(void * _address, vm_size_t size)
521{
522    _IOMallocContiguousEntry * entry;
523    IOMemoryDescriptor *       md = NULL;
524
525    mach_vm_address_t address = (mach_vm_address_t) _address;
526
527    if( !address)
528	return;
529
530    assert(size);
531
532    lck_mtx_lock(gIOMallocContiguousEntriesLock);
533    queue_iterate( &gIOMallocContiguousEntries, entry,
534		    _IOMallocContiguousEntry *, link )
535    {
536	if( entry->virtualAddr == address ) {
537	    md   = entry->md;
538	    queue_remove( &gIOMallocContiguousEntries, entry,
539			    _IOMallocContiguousEntry *, link );
540	    break;
541	}
542    }
543    lck_mtx_unlock(gIOMallocContiguousEntriesLock);
544
545    if (md)
546    {
547	md->release();
548	IODelete(entry, _IOMallocContiguousEntry, 1);
549    }
550    else
551    {
552	IOKernelFreePhysical((mach_vm_address_t) address, size);
553    }
554}
555
556/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
557
558kern_return_t IOIteratePageableMaps(vm_size_t size,
559                    IOIteratePageableMapsCallback callback, void * ref)
560{
561    kern_return_t	kr = kIOReturnNotReady;
562    vm_size_t		segSize;
563    UInt32		attempts;
564    UInt32		index;
565    vm_offset_t		min;
566    vm_map_t		map;
567
568    if (size > kIOPageableMaxMapSize)
569        return( kIOReturnBadArgument );
570
571    do {
572        index = gIOKitPageableSpace.hint;
573        attempts = gIOKitPageableSpace.count;
574        while( attempts--) {
575            kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
576            if( KERN_SUCCESS == kr) {
577                gIOKitPageableSpace.hint = index;
578                break;
579            }
580            if( index)
581                index--;
582            else
583                index = gIOKitPageableSpace.count - 1;
584        }
585        if( KERN_SUCCESS == kr)
586            break;
587
588        lck_mtx_lock( gIOKitPageableSpace.lock );
589
590        index = gIOKitPageableSpace.count;
591        if( index >= (kIOMaxPageableMaps - 1)) {
592            lck_mtx_unlock( gIOKitPageableSpace.lock );
593            break;
594        }
595
596        if( size < kIOPageableMapSize)
597            segSize = kIOPageableMapSize;
598        else
599            segSize = size;
600
601        min = 0;
602        kr = kmem_suballoc(kernel_map,
603                    &min,
604                    segSize,
605                    TRUE,
606                    VM_FLAGS_ANYWHERE,
607                    &map);
608        if( KERN_SUCCESS != kr) {
609            lck_mtx_unlock( gIOKitPageableSpace.lock );
610            break;
611        }
612
613        gIOKitPageableSpace.maps[index].map 	= map;
614        gIOKitPageableSpace.maps[index].address = min;
615        gIOKitPageableSpace.maps[index].end 	= min + segSize;
616        gIOKitPageableSpace.hint 		= index;
617        gIOKitPageableSpace.count 		= index + 1;
618
619        lck_mtx_unlock( gIOKitPageableSpace.lock );
620
621    } while( true );
622
623    return kr;
624}
625
626struct IOMallocPageableRef
627{
628    vm_offset_t address;
629    vm_size_t	 size;
630};
631
632static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
633{
634    struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
635    kern_return_t	         kr;
636
637    kr = kmem_alloc_pageable( map, &ref->address, ref->size );
638
639    return( kr );
640}
641
642static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment)
643{
644    kern_return_t	       kr = kIOReturnNotReady;
645    struct IOMallocPageableRef ref;
646
647    if (alignment > page_size)
648        return( 0 );
649    if (size > kIOPageableMaxMapSize)
650        return( 0 );
651
652    ref.size = size;
653    kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
654    if( kIOReturnSuccess != kr)
655        ref.address = 0;
656
657    return( (void *) ref.address );
658}
659
660vm_map_t IOPageableMapForAddress( uintptr_t address )
661{
662    vm_map_t	map = 0;
663    UInt32	index;
664
665    for( index = 0; index < gIOKitPageableSpace.count; index++) {
666        if( (address >= gIOKitPageableSpace.maps[index].address)
667         && (address < gIOKitPageableSpace.maps[index].end) ) {
668            map = gIOKitPageableSpace.maps[index].map;
669            break;
670        }
671    }
672    if( !map)
673        panic("IOPageableMapForAddress: null");
674
675    return( map );
676}
677
678static void IOFreePageablePages(void * address, vm_size_t size)
679{
680    vm_map_t map;
681
682    map = IOPageableMapForAddress( (vm_address_t) address);
683    if( map)
684        kmem_free( map, (vm_offset_t) address, size);
685}
686
687static uintptr_t IOMallocOnePageablePage(iopa_t * a)
688{
689    return ((uintptr_t) IOMallocPageablePages(page_size, page_size));
690}
691
692void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
693{
694    void * addr;
695
696    if (size >= (page_size - 4*kIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment);
697    else                   addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment));
698
699    if (addr) {
700#if IOALLOCDEBUG
701       debug_iomallocpageable_size += size;
702#endif
703       IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
704    }
705
706    return (addr);
707}
708
709void IOFreePageable(void * address, vm_size_t size)
710{
711#if IOALLOCDEBUG
712    debug_iomallocpageable_size -= size;
713#endif
714    IOStatisticsAlloc(kIOStatisticsFreePageable, size);
715
716    if (size < (page_size - 4*kIOPageAllocChunkBytes))
717    {
718	address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
719	size = page_size;
720    }
721    if (address) IOFreePageablePages(address, size);
722}
723
724/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
725
726#if 0
727#undef assert
728#define assert(ex)  \
729	((ex) ? (void)0 : Assert(__FILE__, __LINE__, # ex))
730#endif
731
732typedef char iopa_page_t_assert[(sizeof(iopa_page_t) <= kIOPageAllocChunkBytes) ? 1 : -1];
733
734extern "C" void
735iopa_init(iopa_t * a)
736{
737    bzero(a, sizeof(*a));
738    a->lock = IOLockAlloc();
739    queue_init(&a->list);
740}
741
742static uintptr_t
743iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
744{
745    uint32_t n, s;
746    uint64_t avail = pa->avail;
747
748    assert(avail);
749
750    // find strings of count 1 bits in avail
751    for (n = count; n > 1; n -= s)
752    {
753    	s = n >> 1;
754    	avail = avail & (avail << s);
755    }
756    // and aligned
757    avail &= align;
758
759    if (avail)
760    {
761	n = __builtin_clzll(avail);
762	pa->avail &= ~((-1ULL << (64 - count)) >> n);
763	if (!pa->avail && pa->link.next)
764	{
765	    remque(&pa->link);
766	    pa->link.next = 0;
767	}
768	return (n * kIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
769    }
770
771    return (0);
772}
773
774static uint32_t
775log2up(uint32_t size)
776{
777    if (size <= 1) size = 0;
778    else size = 32 - __builtin_clz(size - 1);
779    return (size);
780}
781
782uintptr_t
783iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
784{
785    static const uint64_t align_masks[] = {
786	0xFFFFFFFFFFFFFFFF,
787	0xAAAAAAAAAAAAAAAA,
788	0x8888888888888888,
789	0x8080808080808080,
790	0x8000800080008000,
791	0x8000000080000000,
792	0x8000000000000000,
793    };
794    iopa_page_t * pa;
795    uintptr_t     addr = 0;
796    uint32_t      count;
797    uint64_t      align;
798
799    if (!bytes) bytes = 1;
800    count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
801    align = align_masks[log2up((balign + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes)];
802
803    IOLockLock(a->lock);
804    pa = (typeof(pa)) queue_first(&a->list);
805    while (!queue_end(&a->list, &pa->link))
806    {
807	addr = iopa_allocinpage(pa, count, align);
808	if (addr)
809	{
810	    a->bytecount += bytes;
811	    break;
812	}
813	pa = (typeof(pa)) queue_next(&pa->link);
814    }
815    IOLockUnlock(a->lock);
816
817    if (!addr)
818    {
819	addr = alloc(a);
820	if (addr)
821	{
822	    pa = (typeof(pa)) (addr + page_size - kIOPageAllocChunkBytes);
823	    pa->signature = kIOPageAllocSignature;
824	    pa->avail     = -2ULL;
825
826	    addr = iopa_allocinpage(pa, count, align);
827	    IOLockLock(a->lock);
828	    if (pa->avail) enqueue_head(&a->list, &pa->link);
829	    a->pagecount++;
830	    if (addr) a->bytecount += bytes;
831	    IOLockUnlock(a->lock);
832	}
833    }
834
835    assert((addr & ((1 << log2up(balign)) - 1)) == 0);
836    return (addr);
837}
838
839uintptr_t
840iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
841{
842    iopa_page_t * pa;
843    uint32_t      count;
844    uintptr_t     chunk;
845
846    if (!bytes) bytes = 1;
847
848    chunk = (addr & page_mask);
849    assert(0 == (chunk & (kIOPageAllocChunkBytes - 1)));
850
851    pa = (typeof(pa)) (addr | (page_size - kIOPageAllocChunkBytes));
852    assert(kIOPageAllocSignature == pa->signature);
853
854    count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
855    chunk /= kIOPageAllocChunkBytes;
856
857    IOLockLock(a->lock);
858    if (!pa->avail)
859    {
860	assert(!pa->link.next);
861	enqueue_tail(&a->list, &pa->link);
862    }
863    pa->avail |= ((-1ULL << (64 - count)) >> chunk);
864    if (pa->avail != -2ULL) pa = 0;
865    else
866    {
867        remque(&pa->link);
868        pa->link.next = 0;
869        pa->signature = 0;
870	a->pagecount--;
871	// page to free
872	pa = (typeof(pa)) trunc_page(pa);
873    }
874    a->bytecount -= bytes;
875    IOLockUnlock(a->lock);
876
877    return ((uintptr_t) pa);
878}
879
880/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
881
882IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
883				  IOByteCount length, IOOptionBits cacheMode )
884{
885    IOReturn	ret = kIOReturnSuccess;
886    ppnum_t	pagenum;
887
888    if( task != kernel_task)
889	return( kIOReturnUnsupported );
890    if ((address | length) & PAGE_MASK)
891    {
892//	OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
893	return( kIOReturnUnsupported );
894    }
895    length = round_page(address + length) - trunc_page( address );
896    address = trunc_page( address );
897
898    // make map mode
899    cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
900
901    while( (kIOReturnSuccess == ret) && (length > 0) ) {
902
903	// Get the physical page number
904	pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
905	if( pagenum) {
906            ret = IOUnmapPages( get_task_map(task), address, page_size );
907	    ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
908	} else
909	    ret = kIOReturnVMError;
910
911	address += page_size;
912	length -= page_size;
913    }
914
915    return( ret );
916}
917
918
919IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
920				  IOByteCount length )
921{
922    if( task != kernel_task)
923	return( kIOReturnUnsupported );
924
925    flush_dcache64( (addr64_t) address, (unsigned) length, false );
926
927    return( kIOReturnSuccess );
928}
929
930/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
931
932vm_offset_t OSKernelStackRemaining( void )
933{
934    return (ml_stack_remaining());
935}
936
937/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
938
939/*
940 * Spin for indicated number of milliseconds.
941 */
942void IOSleep(unsigned milliseconds)
943{
944    delay_for_interval(milliseconds, kMillisecondScale);
945}
946
947/*
948 * Spin for indicated number of microseconds.
949 */
950void IODelay(unsigned microseconds)
951{
952    delay_for_interval(microseconds, kMicrosecondScale);
953}
954
955/*
956 * Spin for indicated number of nanoseconds.
957 */
958void IOPause(unsigned nanoseconds)
959{
960    delay_for_interval(nanoseconds, kNanosecondScale);
961}
962
963/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
964
965static void _iolog_consputc(int ch, void *arg __unused)
966{
967    cons_putc_locked(ch);
968}
969
970static void _iolog_logputc(int ch, void *arg __unused)
971{
972    log_putc_locked(ch);
973}
974
975void IOLog(const char *format, ...)
976{
977    va_list ap;
978
979    va_start(ap, format);
980    IOLogv(format, ap);
981    va_end(ap);
982}
983
984void IOLogv(const char *format, va_list ap)
985{
986    va_list ap2;
987
988    va_copy(ap2, ap);
989
990    bsd_log_lock();
991    __doprnt(format, ap, _iolog_logputc, NULL, 16);
992    bsd_log_unlock();
993    logwakeup();
994
995    __doprnt(format, ap2, _iolog_consputc, NULL, 16);
996}
997
998#if !__LP64__
999void IOPanic(const char *reason)
1000{
1001	panic("%s", reason);
1002}
1003#endif
1004
1005/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1006
1007/*
1008 * Convert a integer constant (typically a #define or enum) to a string.
1009 */
1010static char noValue[80];	// that's pretty
1011
1012const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
1013{
1014	for( ; regValueArray->name; regValueArray++) {
1015		if(regValueArray->value == value)
1016			return(regValueArray->name);
1017	}
1018	snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1019	return((const char *)noValue);
1020}
1021
1022IOReturn IOFindValueForName(const char *string,
1023	const IONamedValue *regValueArray,
1024	int *value)
1025{
1026	for( ; regValueArray->name; regValueArray++) {
1027		if(!strcmp(regValueArray->name, string)) {
1028			*value = regValueArray->value;
1029			return kIOReturnSuccess;
1030		}
1031	}
1032	return kIOReturnBadArgument;
1033}
1034
1035OSString * IOCopyLogNameForPID(int pid)
1036{
1037    char   buf[128];
1038    size_t len;
1039    snprintf(buf, sizeof(buf), "pid %d, ", pid);
1040    len = strlen(buf);
1041    proc_name(pid, buf + len, sizeof(buf) - len);
1042    return (OSString::withCString(buf));
1043}
1044
1045/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1046
1047IOAlignment IOSizeToAlignment(unsigned int size)
1048{
1049    register int shift;
1050    const int intsize = sizeof(unsigned int) * 8;
1051
1052    for (shift = 1; shift < intsize; shift++) {
1053	if (size & 0x80000000)
1054	    return (IOAlignment)(intsize - shift);
1055	size <<= 1;
1056    }
1057    return 0;
1058}
1059
1060unsigned int IOAlignmentToSize(IOAlignment align)
1061{
1062    unsigned int size;
1063
1064    for (size = 1; align; align--) {
1065	size <<= 1;
1066    }
1067    return size;
1068}
1069
1070} /* extern "C" */
1071
1072
1073
1074