Line data Source code
1 : // Copyright (c) 2009-2010 Satoshi Nakamoto
2 : // Copyright (c) 2009-2013 The Bitcoin Core developers
3 : // Distributed under the MIT software license, see the accompanying
4 : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 :
6 : #ifndef BITCOIN_SUPPORT_PAGELOCKER_H
7 : #define BITCOIN_SUPPORT_PAGELOCKER_H
8 :
9 : #include "support/cleanse.h"
10 :
11 : #include <map>
12 :
13 : #include <boost/thread/mutex.hpp>
14 : #include <boost/thread/once.hpp>
15 :
16 : /**
17 : * Thread-safe class to keep track of locked (ie, non-swappable) memory pages.
18 : *
19 : * Memory locks do not stack, that is, pages which have been locked several times by calls to mlock()
20 : * will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when
21 : * those functions are used naively. This class simulates stacking memory locks by keeping a counter per page.
22 : *
23 : * @note By using a map from each page base address to lock count, this class is optimized for
24 : * small objects that span up to a few pages, mostly smaller than a page. To support large allocations,
25 : * something like an interval tree would be the preferred data structure.
26 : */
27 : template <class Locker>
28 : class LockedPageManagerBase
29 : {
30 : public:
31 97 : LockedPageManagerBase(size_t page_size) : page_size(page_size)
32 : {
33 : // Determine bitmask for extracting page from address
34 97 : assert(!(page_size & (page_size - 1))); // size must be power of two
35 97 : page_mask = ~(page_size - 1);
36 97 : }
37 :
38 97 : ~LockedPageManagerBase()
39 : {
40 194 : }
41 :
42 :
43 : // For all pages in affected range, increase lock count
44 27905 : void LockRange(void* p, size_t size)
45 : {
46 27905 : boost::mutex::scoped_lock lock(mutex);
47 27905 : if (!size)
48 27905 : return;
49 26905 : const size_t base_addr = reinterpret_cast<size_t>(p);
50 26905 : const size_t start_page = base_addr & page_mask;
51 26905 : const size_t end_page = (base_addr + size - 1) & page_mask;
52 55361 : for (size_t page = start_page; page <= end_page; page += page_size) {
53 56912 : Histogram::iterator it = histogram.find(page);
54 56912 : if (it == histogram.end()) // Newly locked page
55 : {
56 10839 : locker.Lock(reinterpret_cast<void*>(page), page_size);
57 32517 : histogram.insert(std::make_pair(page, 1));
58 : } else // Page was already locked; increase counter
59 : {
60 17617 : it->second += 1;
61 : }
62 : }
63 : }
64 :
65 : // For all pages in affected range, decrease lock count
66 27905 : void UnlockRange(void* p, size_t size)
67 : {
68 27905 : boost::mutex::scoped_lock lock(mutex);
69 27905 : if (!size)
70 27905 : return;
71 26905 : const size_t base_addr = reinterpret_cast<size_t>(p);
72 26905 : const size_t start_page = base_addr & page_mask;
73 26905 : const size_t end_page = (base_addr + size - 1) & page_mask;
74 55361 : for (size_t page = start_page; page <= end_page; page += page_size) {
75 56912 : Histogram::iterator it = histogram.find(page);
76 56912 : assert(it != histogram.end()); // Cannot unlock an area that was not locked
77 : // Decrease counter for page, when it is zero, the page will be unlocked
78 28456 : it->second -= 1;
79 28456 : if (it->second == 0) // Nothing on the page anymore that keeps it locked
80 : {
81 : // Unlock page and remove the count from histogram
82 10839 : locker.Unlock(reinterpret_cast<void*>(page), page_size);
83 10839 : histogram.erase(it);
84 : }
85 : }
86 : }
87 :
88 : // Get number of locked pages for diagnostics
89 4 : int GetLockedPageCount()
90 : {
91 4 : boost::mutex::scoped_lock lock(mutex);
92 12 : return histogram.size();
93 : }
94 :
95 : private:
96 : Locker locker;
97 : boost::mutex mutex;
98 : size_t page_size, page_mask;
99 : // map of page base address to lock count
100 : typedef std::map<size_t, int> Histogram;
101 : Histogram histogram;
102 : };
103 :
104 :
105 : /**
106 : * OS-dependent memory page locking/unlocking.
107 : * Defined as policy class to make stubbing for test possible.
108 : */
109 : class MemoryPageLocker
110 : {
111 : public:
112 : /** Lock memory pages.
113 : * addr and len must be a multiple of the system page size
114 : */
115 : bool Lock(const void* addr, size_t len);
116 : /** Unlock memory pages.
117 : * addr and len must be a multiple of the system page size
118 : */
119 : bool Unlock(const void* addr, size_t len);
120 : };
121 :
122 : /**
123 : * Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in
124 : * std::allocator templates.
125 : *
126 : * Some implementations of the STL allocate memory in some constructors (i.e., see
127 : * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.)
128 : * Due to the unpredictable order of static initializers, we have to make sure the
129 : * LockedPageManager instance exists before any other STL-based objects that use
130 : * secure_allocator are created. So instead of having LockedPageManager also be
131 : * static-initialized, it is created on demand.
132 : */
133 96 : class LockedPageManager : public LockedPageManagerBase<MemoryPageLocker>
134 : {
135 : public:
136 : static LockedPageManager& Instance()
137 : {
138 : boost::call_once(LockedPageManager::CreateInstance, LockedPageManager::init_flag);
139 51406 : return *LockedPageManager::_instance;
140 : }
141 :
142 : private:
143 : LockedPageManager();
144 :
145 96 : static void CreateInstance()
146 : {
147 : // Using a local static instance guarantees that the object is initialized
148 : // when it's first needed and also deinitialized after all objects that use
149 : // it are done with it. I can think of one unlikely scenario where we may
150 : // have a static deinitialization order/problem, but the check in
151 : // LockedPageManagerBase's destructor helps us detect if that ever happens.
152 96 : static LockedPageManager instance;
153 96 : LockedPageManager::_instance = &instance;
154 96 : }
155 :
156 : static LockedPageManager* _instance;
157 : static boost::once_flag init_flag;
158 : };
159 :
160 : //
161 : // Functions for directly locking/unlocking memory objects.
162 : // Intended for non-dynamically allocated structures.
163 : //
164 : template <typename T>
165 18076 : void LockObject(const T& t)
166 : {
167 18076 : LockedPageManager::Instance().LockRange((void*)(&t), sizeof(T));
168 18076 : }
169 :
170 : template <typename T>
171 18076 : void UnlockObject(const T& t)
172 : {
173 18076 : memory_cleanse((void*)(&t), sizeof(T));
174 18076 : LockedPageManager::Instance().UnlockRange((void*)(&t), sizeof(T));
175 18076 : }
176 :
177 : #endif // BITCOIN_SUPPORT_PAGELOCKER_H
|