1/*
2 * Copyright 2014, Pawe�� Dziepak, pdziepak@quarnos.org.
3 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
4 * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
5 * Distributed under the terms of the MIT License.
6 */
7#ifndef KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
8#define KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
9
10
11#include <atomic>
12
13#include <KernelExport.h>
14
15#include <lock.h>
16#include <vm/vm_types.h>
17
18#include "paging/64bit/paging.h"
19#include "paging/X86PagingMethod.h"
20#include "paging/X86PagingStructures.h"
21
22
23class TranslationMapPhysicalPageMapper;
24class X86PhysicalPageMapper;
25struct vm_page_reservation;
26
27
28class X86PagingMethod64Bit final : public X86PagingMethod {
29public:
30								X86PagingMethod64Bit(bool la57);
31	virtual						~X86PagingMethod64Bit();
32
33	virtual	status_t			Init(kernel_args* args,
34									VMPhysicalPageMapper** _physicalPageMapper);
35	virtual	status_t			InitPostArea(kernel_args* args);
36
37	virtual	status_t			CreateTranslationMap(bool kernel,
38									VMTranslationMap** _map);
39
40	virtual	status_t			MapEarly(kernel_args* args,
41									addr_t virtualAddress,
42									phys_addr_t physicalAddress,
43									uint8 attributes,
44									page_num_t (*get_free_page)(kernel_args*));
45
46	virtual	bool				IsKernelPageAccessible(addr_t virtualAddress,
47									uint32 protection);
48
49	inline	X86PhysicalPageMapper* PhysicalPageMapper() const
50									{ return fPhysicalPageMapper; }
51	inline	TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const
52									{ return fKernelPhysicalPageMapper; }
53
54	inline	uint64*				KernelVirtualPMLTop() const
55									{ return fKernelVirtualPMLTop; }
56	inline	phys_addr_t			KernelPhysicalPMLTop() const
57									{ return fKernelPhysicalPMLTop; }
58
59	static	X86PagingMethod64Bit* Method();
60
61	static	uint64*				PageDirectoryForAddress(uint64* virtualPML4,
62									addr_t virtualAddress, bool isKernel,
63									bool allocateTables,
64									vm_page_reservation* reservation,
65									TranslationMapPhysicalPageMapper*
66										pageMapper, int32& mapCount);
67	static	uint64*				PageDirectoryEntryForAddress(
68									uint64* virtualPML4, addr_t virtualAddress,
69									bool isKernel, bool allocateTables,
70									vm_page_reservation* reservation,
71									TranslationMapPhysicalPageMapper*
72										pageMapper, int32& mapCount);
73	static	uint64*				PageTableForAddress(uint64* virtualPML4,
74									addr_t virtualAddress, bool isKernel,
75									bool allocateTables,
76									vm_page_reservation* reservation,
77									TranslationMapPhysicalPageMapper*
78										pageMapper, int32& mapCount);
79	static	uint64*				PageTableEntryForAddress(uint64* virtualPML4,
80									addr_t virtualAddress, bool isKernel,
81									bool allocateTables,
82									vm_page_reservation* reservation,
83									TranslationMapPhysicalPageMapper*
84										pageMapper, int32& mapCount);
85
86	static	void				PutPageTableEntryInTable(
87									uint64* entry, phys_addr_t physicalAddress,
88									uint32 attributes, uint32 memoryType,
89									bool globalPage);
90	static	void				SetTableEntry(uint64_t* entry,
91									uint64_t newEntry);
92	static	uint64_t			SetTableEntryFlags(uint64_t* entryPointer,
93									uint64_t flags);
94	static	uint64				TestAndSetTableEntry(uint64* entry,
95									uint64 newEntry, uint64 oldEntry);
96	static	uint64_t			ClearTableEntry(uint64_t* entryPointer);
97	static	uint64_t			ClearTableEntryFlags(uint64_t* entryPointer,
98									uint64_t flags);
99
100	static	uint64				MemoryTypeToPageTableEntryFlags(
101									uint32 memoryType);
102
103private:
104	static	void				_EnableExecutionDisable(void* dummy, int cpu);
105
106			phys_addr_t			fKernelPhysicalPMLTop;
107			uint64*				fKernelVirtualPMLTop;
108
109			X86PhysicalPageMapper* fPhysicalPageMapper;
110			TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper;
111
112	static	bool				la57;
113};
114
115
116static_assert(sizeof(std::atomic<uint64_t>) == sizeof(uint64_t),
117	"Non-trivial representation of atomic uint64_t.");
118
119
120/*static*/ inline X86PagingMethod64Bit*
121X86PagingMethod64Bit::Method()
122{
123	return static_cast<X86PagingMethod64Bit*>(gX86PagingMethod);
124}
125
126
127/*static*/ inline void
128X86PagingMethod64Bit::SetTableEntry(uint64_t* entryPointer, uint64_t newEntry)
129{
130	auto& entry = *reinterpret_cast<std::atomic<uint64_t>*>(entryPointer);
131	entry.store(newEntry, std::memory_order_relaxed);
132}
133
134
135/*static*/ inline uint64_t
136X86PagingMethod64Bit::SetTableEntryFlags(uint64_t* entryPointer, uint64_t flags)
137{
138	auto& entry = *reinterpret_cast<std::atomic<uint64_t>*>(entryPointer);
139	return entry.fetch_or(flags);
140}
141
142
143/*static*/ inline uint64
144X86PagingMethod64Bit::TestAndSetTableEntry(uint64* entry, uint64 newEntry, uint64 oldEntry)
145{
146	return atomic_test_and_set64((int64*)entry, newEntry, oldEntry);
147}
148
149
150/*static*/ inline uint64_t
151X86PagingMethod64Bit::ClearTableEntry(uint64_t* entryPointer)
152{
153	auto& entry = *reinterpret_cast<std::atomic<uint64_t>*>(entryPointer);
154	return entry.exchange(0);
155}
156
157
158/*static*/ inline uint64_t
159X86PagingMethod64Bit::ClearTableEntryFlags(uint64_t* entryPointer,
160	uint64_t flags)
161{
162	auto& entry = *reinterpret_cast<std::atomic<uint64_t>*>(entryPointer);
163	return entry.fetch_and(~flags);
164}
165
166
167/*static*/ inline uint64
168X86PagingMethod64Bit::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
169{
170	// ATM we only handle the uncacheable and write-through type explicitly. For
171	// all other types we rely on the MTRRs to be set up correctly. Since we set
172	// the default memory type to write-back and since the uncacheable type in
173	// the PTE overrides any MTRR attribute (though, as per the specs, that is
174	// not recommended for performance reasons), this reduces the work we
175	// actually *have* to do with the MTRRs to setting the remaining types
176	// (usually only write-combining for the frame buffer).
177	switch (memoryType) {
178		case B_MTR_UC:
179			return X86_64_PTE_CACHING_DISABLED | X86_64_PTE_WRITE_THROUGH;
180
181		case B_MTR_WC:
182			// X86_PTE_WRITE_THROUGH would be closer, but the combination with
183			// MTRR WC is "implementation defined" for Pentium Pro/II.
184			return 0;
185
186		case B_MTR_WT:
187			return X86_64_PTE_WRITE_THROUGH;
188
189		case B_MTR_WP:
190		case B_MTR_WB:
191		default:
192			return 0;
193	}
194}
195
196
197#endif	// KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
198