mapped_region.hpp 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920
  1. //////////////////////////////////////////////////////////////////////////////
  2. //
  3. // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
  4. // Software License, Version 1.0. (See accompanying file
  5. // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. // See http://www.boost.org/libs/interprocess for documentation.
  8. //
  9. //////////////////////////////////////////////////////////////////////////////
  10. #ifndef BOOST_INTERPROCESS_MAPPED_REGION_HPP
  11. #define BOOST_INTERPROCESS_MAPPED_REGION_HPP
  12. #ifndef BOOST_CONFIG_HPP
  13. # include <boost/config.hpp>
  14. #endif
  15. #
  16. #if defined(BOOST_HAS_PRAGMA_ONCE)
  17. # pragma once
  18. #endif
  19. #include <boost/interprocess/detail/config_begin.hpp>
  20. #include <boost/interprocess/detail/workaround.hpp>
  21. #include <boost/interprocess/interprocess_fwd.hpp>
  22. #include <boost/interprocess/exceptions.hpp>
  23. #include <boost/move/utility_core.hpp>
  24. #include <boost/interprocess/detail/utilities.hpp>
  25. #include <boost/interprocess/detail/os_file_functions.hpp>
  26. #include <string>
  27. #include <boost/cstdint.hpp>
  28. #include <boost/assert.hpp>
  29. #include <boost/move/adl_move_swap.hpp>
  30. //Some Unixes use caddr_t instead of void * in madvise
  31. // SunOS Tru64 HP-UX AIX
  32. #if defined(sun) || defined(__sun) || defined(__osf__) || defined(__osf) || defined(_hpux) || defined(hpux) || defined(_AIX)
  33. #define BOOST_INTERPROCESS_MADVISE_USES_CADDR_T
  34. #include <sys/types.h>
  35. #endif
  36. //A lot of UNIXes have destructive semantics for MADV_DONTNEED, so
  37. //we need to be careful to allow it.
  38. #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__APPLE__)
  39. #define BOOST_INTERPROCESS_MADV_DONTNEED_HAS_NONDESTRUCTIVE_SEMANTICS
  40. #endif
  41. #if defined (BOOST_INTERPROCESS_WINDOWS)
  42. # include <boost/interprocess/detail/win32_api.hpp>
  43. # include <boost/interprocess/sync/windows/sync_utils.hpp>
  44. #else
  45. # ifdef BOOST_HAS_UNISTD_H
  46. # include <fcntl.h>
  47. # include <sys/mman.h> //mmap
  48. # include <unistd.h>
  49. # include <sys/stat.h>
  50. # include <sys/types.h>
  51. # if defined(BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS)
  52. # include <sys/shm.h> //System V shared memory...
  53. # endif
  54. # include <boost/assert.hpp>
  55. # else
  56. # error Unknown platform
  57. # endif
  58. #endif //#if defined (BOOST_INTERPROCESS_WINDOWS)
  59. //!\file
  60. //!Describes mapped region class
  61. namespace boost {
  62. namespace interprocess {
  63. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  64. //Solaris declares madvise only in some configurations but defines MADV_XXX, a bit confusing.
  65. //Predeclare it here to avoid any compilation error
  66. #if (defined(sun) || defined(__sun)) && defined(MADV_NORMAL)
  67. extern "C" int madvise(caddr_t, size_t, int);
  68. #endif
  69. namespace ipcdetail{ class interprocess_tester; }
  70. namespace ipcdetail{ class raw_mapped_region_creator; }
  71. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  72. //!The mapped_region class represents a portion or region created from a
  73. //!memory_mappable object.
  74. //!
  75. //!The OS can map a region bigger than the requested one, as region must
  76. //!be multiple of the page size, but mapped_region will always refer to
  77. //!the region specified by the user.
  78. class mapped_region
  79. {
  80. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  81. //Non-copyable
  82. BOOST_MOVABLE_BUT_NOT_COPYABLE(mapped_region)
  83. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  84. public:
  85. //!Creates a mapping region of the mapped memory "mapping", starting in
  86. //!offset "offset", and the mapping's size will be "size". The mapping
  87. //!can be opened for read only, read-write or copy-on-write.
  88. //!
  89. //!If an address is specified, both the offset and the address must be
  90. //!multiples of the page size.
  91. //!
  92. //!The map is created using "default_map_options". This flag is OS
  93. //!dependant and it should not be changed unless the user needs to
  94. //!specify special options.
  95. //!
  96. //!In Windows systems "map_options" is a DWORD value passed as
  97. //!"dwDesiredAccess" to "MapViewOfFileEx". If "default_map_options" is passed
  98. //!it's initialized to zero. "map_options" is XORed with FILE_MAP_[COPY|READ|WRITE].
  99. //!
  100. //!In UNIX systems and POSIX mappings "map_options" is an int value passed as "flags"
  101. //!to "mmap". If "default_map_options" is specified it's initialized to MAP_NOSYNC
  102. //!if that option exists and to zero otherwise. "map_options" XORed with MAP_PRIVATE or MAP_SHARED.
  103. //!
  104. //!In UNIX systems and XSI mappings "map_options" is an int value passed as "shmflg"
  105. //!to "shmat". If "default_map_options" is specified it's initialized to zero.
  106. //!"map_options" is XORed with SHM_RDONLY if needed.
  107. //!
  108. //!The OS could allocate more pages than size/page_size(), but get_address()
  109. //!will always return the address passed in this function (if not null) and
  110. //!get_size() will return the specified size.
  111. template<class MemoryMappable>
  112. mapped_region(const MemoryMappable& mapping
  113. ,mode_t mode
  114. ,offset_t offset = 0
  115. ,std::size_t size = 0
  116. ,const void *address = 0
  117. ,map_options_t map_options = default_map_options);
  118. //!Default constructor. Address will be 0 (nullptr).
  119. //!Size will be 0.
  120. //!Does not throw
  121. mapped_region();
  122. //!Move constructor. *this will be constructed taking ownership of "other"'s
  123. //!region and "other" will be left in default constructor state.
  124. mapped_region(BOOST_RV_REF(mapped_region) other)
  125. #if defined (BOOST_INTERPROCESS_WINDOWS)
  126. : m_base(0), m_size(0)
  127. , m_page_offset(0)
  128. , m_mode(read_only)
  129. , m_file_or_mapping_hnd(ipcdetail::invalid_file())
  130. #else
  131. : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only), m_is_xsi(false)
  132. #endif
  133. { this->swap(other); }
  134. //!Destroys the mapped region.
  135. //!Does not throw
  136. ~mapped_region();
  137. //!Move assignment. If *this owns a memory mapped region, it will be
  138. //!destroyed and it will take ownership of "other"'s memory mapped region.
  139. mapped_region &operator=(BOOST_RV_REF(mapped_region) other)
  140. {
  141. mapped_region tmp(boost::move(other));
  142. this->swap(tmp);
  143. return *this;
  144. }
  145. //!Swaps the mapped_region with another
  146. //!mapped region
  147. void swap(mapped_region &other);
  148. //!Returns the size of the mapping. Never throws.
  149. std::size_t get_size() const;
  150. //!Returns the base address of the mapping.
  151. //!Never throws.
  152. void* get_address() const;
  153. //!Returns the mode of the mapping used to construct the mapped region.
  154. //!Never throws.
  155. mode_t get_mode() const;
  156. //!Flushes to the disk a byte range within the mapped memory.
  157. //!If 'async' is true, the function will return before flushing operation is completed
  158. //!If 'async' is false, function will return once data has been written into the underlying
  159. //!device (i.e., in mapped files OS cached information is written to disk).
  160. //!Never throws. Returns false if operation could not be performed.
  161. bool flush(std::size_t mapping_offset = 0, std::size_t numbytes = 0, bool async = true);
  162. //!Shrinks current mapped region. If after shrinking there is no longer need for a previously
  163. //!mapped memory page, accessing that page can trigger a segmentation fault.
  164. //!Depending on the OS, this operation might fail (XSI shared memory), it can decommit storage
  165. //!and free a portion of the virtual address space (e.g.POSIX) or this
  166. //!function can release some physical memory wihout freeing any virtual address space(Windows).
  167. //!Returns true on success. Never throws.
  168. bool shrink_by(std::size_t bytes, bool from_back = true);
  169. //!This enum specifies region usage behaviors that an application can specify
  170. //!to the mapped region implementation.
  171. enum advice_types{
  172. //!Specifies that the application has no advice to give on its behavior with respect to
  173. //!the region. It is the default characteristic if no advice is given for a range of memory.
  174. advice_normal,
  175. //!Specifies that the application expects to access the region sequentially from
  176. //!lower addresses to higher addresses. The implementation can lower the priority of
  177. //!preceding pages within the region once a page have been accessed.
  178. advice_sequential,
  179. //!Specifies that the application expects to access the region in a random order,
  180. //!and prefetching is likely not advantageous.
  181. advice_random,
  182. //!Specifies that the application expects to access the region in the near future.
  183. //!The implementation can prefetch pages of the region.
  184. advice_willneed,
  185. //!Specifies that the application expects that it will not access the region in the near future.
  186. //!The implementation can unload pages within the range to save system resources.
  187. advice_dontneed
  188. };
  189. //!Advises the implementation on the expected behavior of the application with respect to the data
  190. //!in the region. The implementation may use this information to optimize handling of the region data.
  191. //!This function has no effect on the semantics of access to memory in the region, although it may affect
  192. //!the performance of access.
  193. //!If the advise type is not known to the implementation, the function returns false. True otherwise.
  194. bool advise(advice_types advise);
  195. //!Returns the size of the page. This size is the minimum memory that
  196. //!will be used by the system when mapping a memory mappable source and
  197. //!will restrict the address and the offset to map.
  198. static std::size_t get_page_size();
  199. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  200. private:
  201. //!Closes a previously opened memory mapping. Never throws
  202. void priv_close();
  203. void* priv_map_address() const;
  204. std::size_t priv_map_size() const;
  205. bool priv_flush_param_check(std::size_t mapping_offset, void *&addr, std::size_t &numbytes) const;
  206. bool priv_shrink_param_check(std::size_t bytes, bool from_back, void *&shrink_page_start, std::size_t &shrink_page_bytes);
  207. static void priv_size_from_mapping_size
  208. (offset_t mapping_size, offset_t offset, offset_t page_offset, std::size_t &size);
  209. static offset_t priv_page_offset_addr_fixup(offset_t page_offset, const void *&addr);
  210. template<int dummy>
  211. struct page_size_holder
  212. {
  213. static const std::size_t PageSize;
  214. static std::size_t get_page_size();
  215. };
  216. void* m_base;
  217. std::size_t m_size;
  218. std::size_t m_page_offset;
  219. mode_t m_mode;
  220. #if defined(BOOST_INTERPROCESS_WINDOWS)
  221. file_handle_t m_file_or_mapping_hnd;
  222. #else
  223. bool m_is_xsi;
  224. #endif
  225. friend class ipcdetail::interprocess_tester;
  226. friend class ipcdetail::raw_mapped_region_creator;
  227. void dont_close_on_destruction();
  228. #if defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  229. template<int Dummy>
  230. static void destroy_syncs_in_range(const void *addr, std::size_t size);
  231. #endif
  232. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  233. };
  234. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  235. inline void swap(mapped_region &x, mapped_region &y)
  236. { x.swap(y); }
  237. inline mapped_region::~mapped_region()
  238. { this->priv_close(); }
  239. inline std::size_t mapped_region::get_size() const
  240. { return m_size; }
  241. inline mode_t mapped_region::get_mode() const
  242. { return m_mode; }
  243. inline void* mapped_region::get_address() const
  244. { return m_base; }
  245. inline void* mapped_region::priv_map_address() const
  246. { return static_cast<char*>(m_base) - m_page_offset; }
  247. inline std::size_t mapped_region::priv_map_size() const
  248. { return m_size + m_page_offset; }
  249. inline bool mapped_region::priv_flush_param_check
  250. (std::size_t mapping_offset, void *&addr, std::size_t &numbytes) const
  251. {
  252. //Check some errors
  253. if(m_base == 0)
  254. return false;
  255. if(mapping_offset >= m_size || (mapping_offset + numbytes) > m_size){
  256. return false;
  257. }
  258. //Update flush size if the user does not provide it
  259. if(numbytes == 0){
  260. numbytes = m_size - mapping_offset;
  261. }
  262. addr = (char*)this->priv_map_address() + mapping_offset;
  263. numbytes += m_page_offset;
  264. return true;
  265. }
  266. inline bool mapped_region::priv_shrink_param_check
  267. (std::size_t bytes, bool from_back, void *&shrink_page_start, std::size_t &shrink_page_bytes)
  268. {
  269. //Check some errors
  270. if(m_base == 0 || bytes > m_size){
  271. return false;
  272. }
  273. else if(bytes == m_size){
  274. this->priv_close();
  275. return true;
  276. }
  277. else{
  278. const std::size_t page_size = mapped_region::get_page_size();
  279. if(from_back){
  280. const std::size_t new_pages = (m_size + m_page_offset - bytes - 1)/page_size + 1;
  281. shrink_page_start = static_cast<char*>(this->priv_map_address()) + new_pages*page_size;
  282. shrink_page_bytes = m_page_offset + m_size - new_pages*page_size;
  283. m_size -= bytes;
  284. }
  285. else{
  286. shrink_page_start = this->priv_map_address();
  287. m_page_offset += bytes;
  288. shrink_page_bytes = (m_page_offset/page_size)*page_size;
  289. m_page_offset = m_page_offset % page_size;
  290. m_size -= bytes;
  291. m_base = static_cast<char *>(m_base) + bytes;
  292. BOOST_ASSERT(shrink_page_bytes%page_size == 0);
  293. }
  294. return true;
  295. }
  296. }
  297. inline void mapped_region::priv_size_from_mapping_size
  298. (offset_t mapping_size, offset_t offset, offset_t page_offset, std::size_t &size)
  299. {
  300. //Check if mapping size fits in the user address space
  301. //as offset_t is the maximum file size and its signed.
  302. if(mapping_size < offset ||
  303. boost::uintmax_t(mapping_size - (offset - page_offset)) >
  304. boost::uintmax_t(std::size_t(-1))){
  305. error_info err(size_error);
  306. throw interprocess_exception(err);
  307. }
  308. size = static_cast<std::size_t>(mapping_size - (offset - page_offset));
  309. }
  310. inline offset_t mapped_region::priv_page_offset_addr_fixup(offset_t offset, const void *&address)
  311. {
  312. //We can't map any offset so we have to obtain system's
  313. //memory granularity
  314. const std::size_t page_size = mapped_region::get_page_size();
  315. //We calculate the difference between demanded and valid offset
  316. //(always less than a page in std::size_t, thus, representable by std::size_t)
  317. const std::size_t page_offset =
  318. static_cast<std::size_t>(offset - (offset / page_size) * page_size);
  319. //Update the mapping address
  320. if(address){
  321. address = static_cast<const char*>(address) - page_offset;
  322. }
  323. return page_offset;
  324. }
  325. #if defined (BOOST_INTERPROCESS_WINDOWS)
  326. inline mapped_region::mapped_region()
  327. : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only)
  328. , m_file_or_mapping_hnd(ipcdetail::invalid_file())
  329. {}
  330. template<int dummy>
  331. inline std::size_t mapped_region::page_size_holder<dummy>::get_page_size()
  332. {
  333. winapi::system_info info;
  334. winapi::get_system_info(&info);
  335. return std::size_t(info.dwAllocationGranularity);
  336. }
  337. template<class MemoryMappable>
  338. inline mapped_region::mapped_region
  339. (const MemoryMappable &mapping
  340. ,mode_t mode
  341. ,offset_t offset
  342. ,std::size_t size
  343. ,const void *address
  344. ,map_options_t map_options)
  345. : m_base(0), m_size(0), m_page_offset(0), m_mode(mode)
  346. , m_file_or_mapping_hnd(ipcdetail::invalid_file())
  347. {
  348. mapping_handle_t mhandle = mapping.get_mapping_handle();
  349. {
  350. file_handle_t native_mapping_handle = 0;
  351. //Set accesses
  352. //For "create_file_mapping"
  353. unsigned long protection = 0;
  354. //For "mapviewoffile"
  355. unsigned long map_access = map_options == default_map_options ? 0 : map_options;
  356. switch(mode)
  357. {
  358. case read_only:
  359. case read_private:
  360. protection |= winapi::page_readonly;
  361. map_access |= winapi::file_map_read;
  362. break;
  363. case read_write:
  364. protection |= winapi::page_readwrite;
  365. map_access |= winapi::file_map_write;
  366. break;
  367. case copy_on_write:
  368. protection |= winapi::page_writecopy;
  369. map_access |= winapi::file_map_copy;
  370. break;
  371. default:
  372. {
  373. error_info err(mode_error);
  374. throw interprocess_exception(err);
  375. }
  376. break;
  377. }
  378. //For file mapping (including emulated shared memory through temporary files),
  379. //the device is a file handle so we need to obtain file's size and call create_file_mapping
  380. //to obtain the mapping handle.
  381. //For files we don't need the file mapping after mapping the memory, as the file is there
  382. //so we'll program the handle close
  383. void * handle_to_close = winapi::invalid_handle_value;
  384. if(!mhandle.is_shm){
  385. //Create mapping handle
  386. native_mapping_handle = winapi::create_file_mapping
  387. ( ipcdetail::file_handle_from_mapping_handle(mapping.get_mapping_handle())
  388. , protection, 0, 0, 0);
  389. //Check if all is correct
  390. if(!native_mapping_handle){
  391. error_info err = winapi::get_last_error();
  392. throw interprocess_exception(err);
  393. }
  394. handle_to_close = native_mapping_handle;
  395. }
  396. else{
  397. //For windows_shared_memory the device handle is already a mapping handle
  398. //and we need to maintain it
  399. native_mapping_handle = mhandle.handle;
  400. }
  401. //RAII handle close on scope exit
  402. const winapi::handle_closer close_handle(handle_to_close);
  403. (void)close_handle;
  404. const offset_t page_offset = priv_page_offset_addr_fixup(offset, address);
  405. //Obtain mapping size if user provides 0 size
  406. if(size == 0){
  407. offset_t mapping_size;
  408. if(!winapi::get_file_mapping_size(native_mapping_handle, mapping_size)){
  409. error_info err = winapi::get_last_error();
  410. throw interprocess_exception(err);
  411. }
  412. //This can throw
  413. priv_size_from_mapping_size(mapping_size, offset, page_offset, size);
  414. }
  415. //Map with new offsets and size
  416. void *base = winapi::map_view_of_file_ex
  417. (native_mapping_handle,
  418. map_access,
  419. offset - page_offset,
  420. static_cast<std::size_t>(page_offset + size),
  421. const_cast<void*>(address));
  422. //Check error
  423. if(!base){
  424. error_info err = winapi::get_last_error();
  425. throw interprocess_exception(err);
  426. }
  427. //Calculate new base for the user
  428. m_base = static_cast<char*>(base) + page_offset;
  429. m_page_offset = page_offset;
  430. m_size = size;
  431. }
  432. //Windows shared memory needs the duplication of the handle if we want to
  433. //make mapped_region independent from the mappable device
  434. //
  435. //For mapped files, we duplicate the file handle to be able to FlushFileBuffers
  436. if(!winapi::duplicate_current_process_handle(mhandle.handle, &m_file_or_mapping_hnd)){
  437. error_info err = winapi::get_last_error();
  438. this->priv_close();
  439. throw interprocess_exception(err);
  440. }
  441. }
  442. inline bool mapped_region::flush(std::size_t mapping_offset, std::size_t numbytes, bool async)
  443. {
  444. void *addr;
  445. if(!this->priv_flush_param_check(mapping_offset, addr, numbytes)){
  446. return false;
  447. }
  448. //Flush it all
  449. if(!winapi::flush_view_of_file(addr, numbytes)){
  450. return false;
  451. }
  452. //m_file_or_mapping_hnd can be a file handle or a mapping handle.
  453. //so flushing file buffers has only sense for files...
  454. else if(!async && m_file_or_mapping_hnd != winapi::invalid_handle_value &&
  455. winapi::get_file_type(m_file_or_mapping_hnd) == winapi::file_type_disk){
  456. return winapi::flush_file_buffers(m_file_or_mapping_hnd);
  457. }
  458. return true;
  459. }
  460. inline bool mapped_region::shrink_by(std::size_t bytes, bool from_back)
  461. {
  462. void *shrink_page_start = 0;
  463. std::size_t shrink_page_bytes = 0;
  464. if(!this->priv_shrink_param_check(bytes, from_back, shrink_page_start, shrink_page_bytes)){
  465. return false;
  466. }
  467. else if(shrink_page_bytes){
  468. //In Windows, we can't decommit the storage or release the virtual address space,
  469. //the best we can do is try to remove some memory from the process working set.
  470. //With a bit of luck we can free some physical memory.
  471. unsigned long old_protect_ignored;
  472. bool b_ret = winapi::virtual_unlock(shrink_page_start, shrink_page_bytes)
  473. || (winapi::get_last_error() == winapi::error_not_locked);
  474. (void)old_protect_ignored;
  475. //Change page protection to forbid any further access
  476. b_ret = b_ret && winapi::virtual_protect
  477. (shrink_page_start, shrink_page_bytes, winapi::page_noaccess, old_protect_ignored);
  478. return b_ret;
  479. }
  480. else{
  481. return true;
  482. }
  483. }
  484. inline bool mapped_region::advise(advice_types)
  485. {
  486. //Windows has no madvise/posix_madvise equivalent
  487. return false;
  488. }
  489. inline void mapped_region::priv_close()
  490. {
  491. if(m_base){
  492. void *addr = this->priv_map_address();
  493. #if !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  494. mapped_region::destroy_syncs_in_range<0>(addr, m_size);
  495. #endif
  496. winapi::unmap_view_of_file(addr);
  497. m_base = 0;
  498. }
  499. if(m_file_or_mapping_hnd != ipcdetail::invalid_file()){
  500. winapi::close_handle(m_file_or_mapping_hnd);
  501. m_file_or_mapping_hnd = ipcdetail::invalid_file();
  502. }
  503. }
  504. inline void mapped_region::dont_close_on_destruction()
  505. {}
  506. #else //#if defined (BOOST_INTERPROCESS_WINDOWS)
  507. inline mapped_region::mapped_region()
  508. : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only), m_is_xsi(false)
  509. {}
  510. template<int dummy>
  511. inline std::size_t mapped_region::page_size_holder<dummy>::get_page_size()
  512. { return std::size_t(sysconf(_SC_PAGESIZE)); }
  513. template<class MemoryMappable>
  514. inline mapped_region::mapped_region
  515. ( const MemoryMappable &mapping
  516. , mode_t mode
  517. , offset_t offset
  518. , std::size_t size
  519. , const void *address
  520. , map_options_t map_options)
  521. : m_base(0), m_size(0), m_page_offset(0), m_mode(mode), m_is_xsi(false)
  522. {
  523. mapping_handle_t map_hnd = mapping.get_mapping_handle();
  524. //Some systems dont' support XSI shared memory
  525. #ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  526. if(map_hnd.is_xsi){
  527. //Get the size
  528. ::shmid_ds xsi_ds;
  529. int ret = ::shmctl(map_hnd.handle, IPC_STAT, &xsi_ds);
  530. if(ret == -1){
  531. error_info err(system_error_code());
  532. throw interprocess_exception(err);
  533. }
  534. //Compare sizess
  535. if(size == 0){
  536. size = (std::size_t)xsi_ds.shm_segsz;
  537. }
  538. else if(size != (std::size_t)xsi_ds.shm_segsz){
  539. error_info err(size_error);
  540. throw interprocess_exception(err);
  541. }
  542. //Calculate flag
  543. int flag = map_options == default_map_options ? 0 : map_options;
  544. if(m_mode == read_only){
  545. flag |= SHM_RDONLY;
  546. }
  547. else if(m_mode != read_write){
  548. error_info err(mode_error);
  549. throw interprocess_exception(err);
  550. }
  551. //Attach memory
  552. void *base = ::shmat(map_hnd.handle, (void*)address, flag);
  553. if(base == (void*)-1){
  554. error_info err(system_error_code());
  555. throw interprocess_exception(err);
  556. }
  557. //Update members
  558. m_base = base;
  559. m_size = size;
  560. m_mode = mode;
  561. m_page_offset = 0;
  562. m_is_xsi = true;
  563. return;
  564. }
  565. #endif //ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  566. //We calculate the difference between demanded and valid offset
  567. const offset_t page_offset = priv_page_offset_addr_fixup(offset, address);
  568. if(size == 0){
  569. struct ::stat buf;
  570. if(0 != fstat(map_hnd.handle, &buf)){
  571. error_info err(system_error_code());
  572. throw interprocess_exception(err);
  573. }
  574. //This can throw
  575. priv_size_from_mapping_size(buf.st_size, offset, page_offset, size);
  576. }
  577. #ifdef MAP_NOSYNC
  578. #define BOOST_INTERPROCESS_MAP_NOSYNC MAP_NOSYNC
  579. #else
  580. #define BOOST_INTERPROCESS_MAP_NOSYNC 0
  581. #endif //MAP_NOSYNC
  582. //Create new mapping
  583. int prot = 0;
  584. int flags = map_options == default_map_options ? BOOST_INTERPROCESS_MAP_NOSYNC : map_options;
  585. #undef BOOST_INTERPROCESS_MAP_NOSYNC
  586. switch(mode)
  587. {
  588. case read_only:
  589. prot |= PROT_READ;
  590. flags |= MAP_SHARED;
  591. break;
  592. case read_private:
  593. prot |= (PROT_READ);
  594. flags |= MAP_PRIVATE;
  595. break;
  596. case read_write:
  597. prot |= (PROT_WRITE | PROT_READ);
  598. flags |= MAP_SHARED;
  599. break;
  600. case copy_on_write:
  601. prot |= (PROT_WRITE | PROT_READ);
  602. flags |= MAP_PRIVATE;
  603. break;
  604. default:
  605. {
  606. error_info err(mode_error);
  607. throw interprocess_exception(err);
  608. }
  609. break;
  610. }
  611. //Map it to the address space
  612. void* base = mmap ( const_cast<void*>(address)
  613. , static_cast<std::size_t>(page_offset + size)
  614. , prot
  615. , flags
  616. , mapping.get_mapping_handle().handle
  617. , offset - page_offset);
  618. //Check if mapping was successful
  619. if(base == MAP_FAILED){
  620. error_info err = system_error_code();
  621. throw interprocess_exception(err);
  622. }
  623. //Calculate new base for the user
  624. m_base = static_cast<char*>(base) + page_offset;
  625. m_page_offset = page_offset;
  626. m_size = size;
  627. //Check for fixed mapping error
  628. if(address && (base != address)){
  629. error_info err(busy_error);
  630. this->priv_close();
  631. throw interprocess_exception(err);
  632. }
  633. }
  634. inline bool mapped_region::shrink_by(std::size_t bytes, bool from_back)
  635. {
  636. void *shrink_page_start = 0;
  637. std::size_t shrink_page_bytes = 0;
  638. if(m_is_xsi || !this->priv_shrink_param_check(bytes, from_back, shrink_page_start, shrink_page_bytes)){
  639. return false;
  640. }
  641. else if(shrink_page_bytes){
  642. //In UNIX we can decommit and free virtual address space.
  643. return 0 == munmap(shrink_page_start, shrink_page_bytes);
  644. }
  645. else{
  646. return true;
  647. }
  648. }
  649. inline bool mapped_region::flush(std::size_t mapping_offset, std::size_t numbytes, bool async)
  650. {
  651. void *addr;
  652. if(m_is_xsi || !this->priv_flush_param_check(mapping_offset, addr, numbytes)){
  653. return false;
  654. }
  655. //Flush it all
  656. return msync(addr, numbytes, async ? MS_ASYNC : MS_SYNC) == 0;
  657. }
  658. inline bool mapped_region::advise(advice_types advice)
  659. {
  660. int unix_advice = 0;
  661. //Modes; 0: none, 2: posix, 1: madvise
  662. const unsigned int mode_none = 0;
  663. const unsigned int mode_padv = 1;
  664. const unsigned int mode_madv = 2;
  665. // Suppress "unused variable" warnings
  666. (void)mode_padv;
  667. (void)mode_madv;
  668. unsigned int mode = mode_none;
  669. //Choose advice either from POSIX (preferred) or native Unix
  670. switch(advice){
  671. case advice_normal:
  672. #if defined(POSIX_MADV_NORMAL)
  673. unix_advice = POSIX_MADV_NORMAL;
  674. mode = mode_padv;
  675. #elif defined(MADV_NORMAL)
  676. unix_advice = MADV_NORMAL;
  677. mode = mode_madv;
  678. #endif
  679. break;
  680. case advice_sequential:
  681. #if defined(POSIX_MADV_SEQUENTIAL)
  682. unix_advice = POSIX_MADV_SEQUENTIAL;
  683. mode = mode_padv;
  684. #elif defined(MADV_SEQUENTIAL)
  685. unix_advice = MADV_SEQUENTIAL;
  686. mode = mode_madv;
  687. #endif
  688. break;
  689. case advice_random:
  690. #if defined(POSIX_MADV_RANDOM)
  691. unix_advice = POSIX_MADV_RANDOM;
  692. mode = mode_padv;
  693. #elif defined(MADV_RANDOM)
  694. unix_advice = MADV_RANDOM;
  695. mode = mode_madv;
  696. #endif
  697. break;
  698. case advice_willneed:
  699. #if defined(POSIX_MADV_WILLNEED)
  700. unix_advice = POSIX_MADV_WILLNEED;
  701. mode = mode_padv;
  702. #elif defined(MADV_WILLNEED)
  703. unix_advice = MADV_WILLNEED;
  704. mode = mode_madv;
  705. #endif
  706. break;
  707. case advice_dontneed:
  708. #if defined(POSIX_MADV_DONTNEED)
  709. unix_advice = POSIX_MADV_DONTNEED;
  710. mode = mode_padv;
  711. #elif defined(MADV_DONTNEED) && defined(BOOST_INTERPROCESS_MADV_DONTNEED_HAS_NONDESTRUCTIVE_SEMANTICS)
  712. unix_advice = MADV_DONTNEED;
  713. mode = mode_madv;
  714. #endif
  715. break;
  716. default:
  717. return false;
  718. }
  719. switch(mode){
  720. #if defined(POSIX_MADV_NORMAL)
  721. case mode_padv:
  722. return 0 == posix_madvise(this->priv_map_address(), this->priv_map_size(), unix_advice);
  723. #endif
  724. #if defined(MADV_NORMAL)
  725. case mode_madv:
  726. return 0 == madvise(
  727. #if defined(BOOST_INTERPROCESS_MADVISE_USES_CADDR_T)
  728. (caddr_t)
  729. #endif
  730. this->priv_map_address(), this->priv_map_size(), unix_advice);
  731. #endif
  732. default:
  733. return false;
  734. }
  735. }
  736. inline void mapped_region::priv_close()
  737. {
  738. if(m_base != 0){
  739. #ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  740. if(m_is_xsi){
  741. int ret = ::shmdt(m_base);
  742. BOOST_ASSERT(ret == 0);
  743. (void)ret;
  744. return;
  745. }
  746. #endif //#ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  747. munmap(this->priv_map_address(), this->priv_map_size());
  748. m_base = 0;
  749. }
  750. }
  751. inline void mapped_region::dont_close_on_destruction()
  752. { m_base = 0; }
  753. #endif //#if defined (BOOST_INTERPROCESS_WINDOWS)
  754. template<int dummy>
  755. const std::size_t mapped_region::page_size_holder<dummy>::PageSize
  756. = mapped_region::page_size_holder<dummy>::get_page_size();
  757. inline std::size_t mapped_region::get_page_size()
  758. {
  759. if(!page_size_holder<0>::PageSize)
  760. return page_size_holder<0>::get_page_size();
  761. else
  762. return page_size_holder<0>::PageSize;
  763. }
  764. inline void mapped_region::swap(mapped_region &other)
  765. {
  766. ::boost::adl_move_swap(this->m_base, other.m_base);
  767. ::boost::adl_move_swap(this->m_size, other.m_size);
  768. ::boost::adl_move_swap(this->m_page_offset, other.m_page_offset);
  769. ::boost::adl_move_swap(this->m_mode, other.m_mode);
  770. #if defined (BOOST_INTERPROCESS_WINDOWS)
  771. ::boost::adl_move_swap(this->m_file_or_mapping_hnd, other.m_file_or_mapping_hnd);
  772. #else
  773. ::boost::adl_move_swap(this->m_is_xsi, other.m_is_xsi);
  774. #endif
  775. }
  776. //!No-op functor
  777. struct null_mapped_region_function
  778. {
  779. bool operator()(void *, std::size_t , bool) const
  780. { return true; }
  781. std::size_t get_min_size() const
  782. { return 0; }
  783. };
  784. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  785. } //namespace interprocess {
  786. } //namespace boost {
  787. #include <boost/interprocess/detail/config_end.hpp>
  788. #endif //BOOST_INTERPROCESS_MAPPED_REGION_HPP
  789. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  790. #ifndef BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
  791. #define BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
  792. #if defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  793. # include <boost/interprocess/sync/windows/sync_utils.hpp>
  794. # include <boost/interprocess/detail/windows_intermodule_singleton.hpp>
  795. namespace boost {
  796. namespace interprocess {
  797. template<int Dummy>
  798. inline void mapped_region::destroy_syncs_in_range(const void *addr, std::size_t size)
  799. {
  800. ipcdetail::sync_handles &handles =
  801. ipcdetail::windows_intermodule_singleton<ipcdetail::sync_handles>::get();
  802. handles.destroy_syncs_in_range(addr, size);
  803. }
  804. } //namespace interprocess {
  805. } //namespace boost {
  806. #endif //defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  807. #endif //#ifdef BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
  808. #endif //#if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)