1/*
2 * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2003-2007, Axel D��rfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9#ifndef _KERNEL_VM_VM_CACHE_H
10#define _KERNEL_VM_VM_CACHE_H
11
12
13#include <debug.h>
14#include <kernel.h>
15#include <util/DoublyLinkedList.h>
16#include <vm/vm.h>
17#include <vm/vm_types.h>
18
19#include "kernel_debug_config.h"
20
21
22struct kernel_args;
23struct ObjectCache;
24
25
26enum {
27	CACHE_TYPE_RAM = 0,
28	CACHE_TYPE_VNODE,
29	CACHE_TYPE_DEVICE,
30	CACHE_TYPE_NULL
31};
32
33enum {
34	PAGE_EVENT_NOT_BUSY	= 0x01		// page not busy anymore
35};
36
37
38extern ObjectCache* gCacheRefObjectCache;
39extern ObjectCache* gAnonymousCacheObjectCache;
40extern ObjectCache* gAnonymousNoSwapCacheObjectCache;
41extern ObjectCache* gVnodeCacheObjectCache;
42extern ObjectCache* gDeviceCacheObjectCache;
43extern ObjectCache* gNullCacheObjectCache;
44
45
46struct VMCachePagesTreeDefinition {
47	typedef page_num_t KeyType;
48	typedef	vm_page NodeType;
49
50	static page_num_t GetKey(const NodeType* node)
51	{
52		return node->cache_offset;
53	}
54
55	static SplayTreeLink<NodeType>* GetLink(NodeType* node)
56	{
57		return &node->cache_link;
58	}
59
60	static int Compare(page_num_t key, const NodeType* node)
61	{
62		return key == node->cache_offset ? 0
63			: (key < node->cache_offset ? -1 : 1);
64	}
65
66	static NodeType** GetListLink(NodeType* node)
67	{
68		return &node->cache_next;
69	}
70};
71
72typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree;
73
74
75struct VMCache : public DoublyLinkedListLinkImpl<VMCache> {
76public:
77	typedef DoublyLinkedList<VMCache> ConsumerList;
78
79public:
80								VMCache();
81	virtual						~VMCache();
82
83			status_t			Init(uint32 cacheType, uint32 allocationFlags);
84
85	virtual	void				Delete();
86
87	inline	bool				Lock();
88	inline	bool				TryLock();
89	inline	bool				SwitchLock(mutex* from);
90	inline	bool				SwitchFromReadLock(rw_lock* from);
91			void				Unlock(bool consumerLocked = false);
92	inline	void				AssertLocked();
93
94	inline	void				AcquireRefLocked();
95	inline	void				AcquireRef();
96	inline	void				ReleaseRefLocked();
97	inline	void				ReleaseRef();
98	inline	void				ReleaseRefAndUnlock(
99									bool consumerLocked = false);
100
101	inline	VMCacheRef*			CacheRef() const	{ return fCacheRef; }
102
103			void				WaitForPageEvents(vm_page* page, uint32 events,
104									bool relock);
105			void				NotifyPageEvents(vm_page* page, uint32 events)
106									{ if (fPageEventWaiters != NULL)
107										_NotifyPageEvents(page, events); }
108	inline	void				MarkPageUnbusy(vm_page* page);
109
110			vm_page*			LookupPage(off_t offset);
111			void				InsertPage(vm_page* page, off_t offset);
112			void				RemovePage(vm_page* page);
113			void				MovePage(vm_page* page, off_t offset);
114			void				MovePage(vm_page* page);
115			void				MoveAllPages(VMCache* fromCache);
116
117	inline	page_num_t			WiredPagesCount() const;
118	inline	void				IncrementWiredPagesCount();
119	inline	void				DecrementWiredPagesCount();
120
121	virtual	int32				GuardSize()	{ return 0; }
122
123			void				AddConsumer(VMCache* consumer);
124
125			status_t			InsertAreaLocked(VMArea* area);
126			status_t			RemoveArea(VMArea* area);
127			void				TransferAreas(VMCache* fromCache);
128			uint32				CountWritableAreas(VMArea* ignoreArea) const;
129
130			status_t			WriteModified();
131			status_t			SetMinimalCommitment(off_t commitment,
132									int priority);
133	virtual	status_t			Resize(off_t newSize, int priority);
134	virtual	status_t			Rebase(off_t newBase, int priority);
135	virtual	status_t			Adopt(VMCache* source, off_t offset, off_t size,
136									off_t newOffset);
137
138	virtual	status_t			Discard(off_t offset, off_t size);
139
140			status_t			FlushAndRemoveAllPages();
141
142			void*				UserData()	{ return fUserData; }
143			void				SetUserData(void* data)	{ fUserData = data; }
144									// Settable by the lock owner and valid as
145									// long as the lock is owned.
146
147			// for debugging only
148			int32				RefCount() const
149									{ return fRefCount; }
150
151	// backing store operations
152	virtual	status_t			Commit(off_t size, int priority);
153	virtual	bool				HasPage(off_t offset);
154
155	virtual	status_t			Read(off_t offset, const generic_io_vec *vecs,
156									size_t count, uint32 flags,
157									generic_size_t *_numBytes);
158	virtual	status_t			Write(off_t offset, const generic_io_vec *vecs,
159									size_t count, uint32 flags,
160									generic_size_t *_numBytes);
161	virtual	status_t			WriteAsync(off_t offset,
162									const generic_io_vec* vecs, size_t count,
163									generic_size_t numBytes, uint32 flags,
164									AsyncIOCallback* callback);
165	virtual	bool				CanWritePage(off_t offset);
166
167	virtual	int32				MaxPagesPerWrite() const
168									{ return -1; } // no restriction
169	virtual	int32				MaxPagesPerAsyncWrite() const
170									{ return -1; } // no restriction
171
172	virtual	status_t			Fault(struct VMAddressSpace *aspace,
173									off_t offset);
174
175	virtual	void				Merge(VMCache* source);
176
177	virtual	status_t			AcquireUnreferencedStoreRef();
178	virtual	void				AcquireStoreRef();
179	virtual	void				ReleaseStoreRef();
180
181	virtual	bool				DebugHasPage(off_t offset);
182			vm_page*			DebugLookupPage(off_t offset);
183
184	virtual	void				Dump(bool showPages) const;
185
186protected:
187	virtual	void				DeleteObject() = 0;
188
189public:
190			VMArea*				areas;
191			ConsumerList		consumers;
192				// list of caches that use this cache as a source
193			VMCachePagesTree	pages;
194			VMCache*			source;
195			off_t				virtual_base;
196			off_t				virtual_end;
197			off_t				committed_size;
198				// TODO: Remove!
199			uint32				page_count;
200			uint32				temporary : 1;
201			uint32				type : 6;
202
203#if DEBUG_CACHE_LIST
204			VMCache*			debug_previous;
205			VMCache*			debug_next;
206#endif
207
208private:
209			struct PageEventWaiter;
210			friend struct VMCacheRef;
211
212private:
213			void				_NotifyPageEvents(vm_page* page, uint32 events);
214
215	inline	bool				_IsMergeable() const;
216
217			void				_MergeWithOnlyConsumer();
218			void				_RemoveConsumer(VMCache* consumer);
219
220			bool				_FreePageRange(VMCachePagesTree::Iterator it,
221									page_num_t* toPage);
222
223private:
224			int32				fRefCount;
225			mutex				fLock;
226			PageEventWaiter*	fPageEventWaiters;
227			void*				fUserData;
228			VMCacheRef*			fCacheRef;
229			page_num_t			fWiredPagesCount;
230};
231
232
233#if DEBUG_CACHE_LIST
234extern VMCache* gDebugCacheList;
235#endif
236
237
238class VMCacheFactory {
239public:
240	static	status_t		CreateAnonymousCache(VMCache*& cache,
241								bool canOvercommit, int32 numPrecommittedPages,
242								int32 numGuardPages, bool swappable,
243								int priority);
244	static	status_t		CreateVnodeCache(VMCache*& cache,
245								struct vnode* vnode);
246	static	status_t		CreateDeviceCache(VMCache*& cache,
247								addr_t baseAddress);
248	static	status_t		CreateNullCache(int priority, VMCache*& cache);
249};
250
251
252
253bool
254VMCache::Lock()
255{
256	return mutex_lock(&fLock) == B_OK;
257}
258
259
260bool
261VMCache::TryLock()
262{
263	return mutex_trylock(&fLock) == B_OK;
264}
265
266
267bool
268VMCache::SwitchLock(mutex* from)
269{
270	return mutex_switch_lock(from, &fLock) == B_OK;
271}
272
273
274bool
275VMCache::SwitchFromReadLock(rw_lock* from)
276{
277	return mutex_switch_from_read_lock(from, &fLock) == B_OK;
278}
279
280
281void
282VMCache::AssertLocked()
283{
284	ASSERT_LOCKED_MUTEX(&fLock);
285}
286
287
288void
289VMCache::AcquireRefLocked()
290{
291	ASSERT_LOCKED_MUTEX(&fLock);
292
293	fRefCount++;
294}
295
296
297void
298VMCache::AcquireRef()
299{
300	Lock();
301	fRefCount++;
302	Unlock();
303}
304
305
306void
307VMCache::ReleaseRefLocked()
308{
309	ASSERT_LOCKED_MUTEX(&fLock);
310
311	fRefCount--;
312}
313
314
315void
316VMCache::ReleaseRef()
317{
318	Lock();
319	fRefCount--;
320	Unlock();
321}
322
323
324void
325VMCache::ReleaseRefAndUnlock(bool consumerLocked)
326{
327	ReleaseRefLocked();
328	Unlock(consumerLocked);
329}
330
331
332void
333VMCache::MarkPageUnbusy(vm_page* page)
334{
335	ASSERT(page->busy);
336	page->busy = false;
337	NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
338}
339
340
341page_num_t
342VMCache::WiredPagesCount() const
343{
344	return fWiredPagesCount;
345}
346
347
348void
349VMCache::IncrementWiredPagesCount()
350{
351	ASSERT(fWiredPagesCount < page_count);
352
353	fWiredPagesCount++;
354}
355
356
357void
358VMCache::DecrementWiredPagesCount()
359{
360	ASSERT(fWiredPagesCount > 0);
361
362	fWiredPagesCount--;
363}
364
365
366// vm_page methods implemented here to avoid VMCache.h inclusion in vm_types.h
367
368inline void
369vm_page::IncrementWiredCount()
370{
371	if (fWiredCount++ == 0)
372		cache_ref->cache->IncrementWiredPagesCount();
373}
374
375
376inline void
377vm_page::DecrementWiredCount()
378{
379	ASSERT_PRINT(fWiredCount > 0, "page: %#" B_PRIx64, physical_page_number * B_PAGE_SIZE);
380
381	if (--fWiredCount == 0)
382		cache_ref->cache->DecrementWiredPagesCount();
383}
384
385
386#ifdef __cplusplus
387extern "C" {
388#endif
389
390status_t vm_cache_init(struct kernel_args *args);
391void vm_cache_init_post_heap();
392struct VMCache *vm_cache_acquire_locked_page_cache(struct vm_page *page,
393	bool dontWait);
394
395#ifdef __cplusplus
396}
397#endif
398
399
400#endif	/* _KERNEL_VM_VM_CACHE_H */
401