etnaviv_drm.h 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. /*
  2. * Copyright (C) 2015 Etnaviv Project
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License version 2 as published by
  6. * the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #ifndef __ETNAVIV_DRM_H__
  17. #define __ETNAVIV_DRM_H__
  18. #include "drm.h"
  19. #if defined(__cplusplus)
  20. extern "C" {
  21. #endif
  22. /* Please note that modifications to all structs defined here are
  23. * subject to backwards-compatibility constraints:
  24. * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
  25. * user/kernel compatibility
  26. * 2) Keep fields aligned to their size
  27. * 3) Because of how drm_ioctl() works, we can add new fields at
  28. * the end of an ioctl if some care is taken: drm_ioctl() will
  29. * zero out the new fields at the tail of the ioctl, so a zero
  30. * value should have a backwards compatible meaning. And for
  31. * output params, userspace won't see the newly added output
  32. * fields.. so that has to be somehow ok.
  33. */
  34. /* timeouts are specified in clock-monotonic absolute times (to simplify
  35. * restarting interrupted ioctls). The following struct is logically the
  36. * same as 'struct timespec' but 32/64b ABI safe.
  37. */
  38. struct drm_etnaviv_timespec {
  39. __s64 tv_sec; /* seconds */
  40. __s64 tv_nsec; /* nanoseconds */
  41. };
  42. #define ETNAVIV_PARAM_GPU_MODEL 0x01
  43. #define ETNAVIV_PARAM_GPU_REVISION 0x02
  44. #define ETNAVIV_PARAM_GPU_FEATURES_0 0x03
  45. #define ETNAVIV_PARAM_GPU_FEATURES_1 0x04
  46. #define ETNAVIV_PARAM_GPU_FEATURES_2 0x05
  47. #define ETNAVIV_PARAM_GPU_FEATURES_3 0x06
  48. #define ETNAVIV_PARAM_GPU_FEATURES_4 0x07
  49. #define ETNAVIV_PARAM_GPU_FEATURES_5 0x08
  50. #define ETNAVIV_PARAM_GPU_FEATURES_6 0x09
  51. #define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10
  52. #define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11
  53. #define ETNAVIV_PARAM_GPU_THREAD_COUNT 0x12
  54. #define ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE 0x13
  55. #define ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT 0x14
  56. #define ETNAVIV_PARAM_GPU_PIXEL_PIPES 0x15
  57. #define ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE 0x16
  58. #define ETNAVIV_PARAM_GPU_BUFFER_SIZE 0x17
  59. #define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18
  60. #define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19
  61. #define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a
  62. #define ETNA_MAX_PIPES 4
  63. struct drm_etnaviv_param {
  64. __u32 pipe; /* in */
  65. __u32 param; /* in, ETNAVIV_PARAM_x */
  66. __u64 value; /* out (get_param) or in (set_param) */
  67. };
  68. /*
  69. * GEM buffers:
  70. */
  71. #define ETNA_BO_CACHE_MASK 0x000f0000
  72. /* cache modes */
  73. #define ETNA_BO_CACHED 0x00010000
  74. #define ETNA_BO_WC 0x00020000
  75. #define ETNA_BO_UNCACHED 0x00040000
  76. /* map flags */
  77. #define ETNA_BO_FORCE_MMU 0x00100000
  78. struct drm_etnaviv_gem_new {
  79. __u64 size; /* in */
  80. __u32 flags; /* in, mask of ETNA_BO_x */
  81. __u32 handle; /* out */
  82. };
  83. struct drm_etnaviv_gem_info {
  84. __u32 handle; /* in */
  85. __u32 pad;
  86. __u64 offset; /* out, offset to pass to mmap() */
  87. };
  88. #define ETNA_PREP_READ 0x01
  89. #define ETNA_PREP_WRITE 0x02
  90. #define ETNA_PREP_NOSYNC 0x04
  91. struct drm_etnaviv_gem_cpu_prep {
  92. __u32 handle; /* in */
  93. __u32 op; /* in, mask of ETNA_PREP_x */
  94. struct drm_etnaviv_timespec timeout; /* in */
  95. };
  96. struct drm_etnaviv_gem_cpu_fini {
  97. __u32 handle; /* in */
  98. __u32 flags; /* in, placeholder for now, no defined values */
  99. };
  100. /*
  101. * Cmdstream Submission:
  102. */
  103. /* The value written into the cmdstream is logically:
  104. * relocbuf->gpuaddr + reloc_offset
  105. *
  106. * NOTE that reloc's must be sorted by order of increasing submit_offset,
  107. * otherwise EINVAL.
  108. */
  109. struct drm_etnaviv_gem_submit_reloc {
  110. __u32 submit_offset; /* in, offset from submit_bo */
  111. __u32 reloc_idx; /* in, index of reloc_bo buffer */
  112. __u64 reloc_offset; /* in, offset from start of reloc_bo */
  113. __u32 flags; /* in, placeholder for now, no defined values */
  114. };
  115. /* Each buffer referenced elsewhere in the cmdstream submit (ie. the
  116. * cmdstream buffer(s) themselves or reloc entries) has one (and only
  117. * one) entry in the submit->bos[] table.
  118. *
  119. * As a optimization, the current buffer (gpu virtual address) can be
  120. * passed back through the 'presumed' field. If on a subsequent reloc,
  121. * userspace passes back a 'presumed' address that is still valid,
  122. * then patching the cmdstream for this entry is skipped. This can
  123. * avoid kernel needing to map/access the cmdstream bo in the common
  124. * case.
  125. */
  126. #define ETNA_SUBMIT_BO_READ 0x0001
  127. #define ETNA_SUBMIT_BO_WRITE 0x0002
  128. struct drm_etnaviv_gem_submit_bo {
  129. __u32 flags; /* in, mask of ETNA_SUBMIT_BO_x */
  130. __u32 handle; /* in, GEM handle */
  131. __u64 presumed; /* in/out, presumed buffer address */
  132. };
  133. /* Each cmdstream submit consists of a table of buffers involved, and
  134. * one or more cmdstream buffers. This allows for conditional execution
  135. * (context-restore), and IB buffers needed for per tile/bin draw cmds.
  136. */
  137. #define ETNA_PIPE_3D 0x00
  138. #define ETNA_PIPE_2D 0x01
  139. #define ETNA_PIPE_VG 0x02
  140. struct drm_etnaviv_gem_submit {
  141. __u32 fence; /* out */
  142. __u32 pipe; /* in */
  143. __u32 exec_state; /* in, initial execution state (ETNA_PIPE_x) */
  144. __u32 nr_bos; /* in, number of submit_bo's */
  145. __u32 nr_relocs; /* in, number of submit_reloc's */
  146. __u32 stream_size; /* in, cmdstream size */
  147. __u64 bos; /* in, ptr to array of submit_bo's */
  148. __u64 relocs; /* in, ptr to array of submit_reloc's */
  149. __u64 stream; /* in, ptr to cmdstream */
  150. };
  151. /* The normal way to synchronize with the GPU is just to CPU_PREP on
  152. * a buffer if you need to access it from the CPU (other cmdstream
  153. * submission from same or other contexts, PAGE_FLIP ioctl, etc, all
  154. * handle the required synchronization under the hood). This ioctl
  155. * mainly just exists as a way to implement the gallium pipe_fence
  156. * APIs without requiring a dummy bo to synchronize on.
  157. */
  158. #define ETNA_WAIT_NONBLOCK 0x01
  159. struct drm_etnaviv_wait_fence {
  160. __u32 pipe; /* in */
  161. __u32 fence; /* in */
  162. __u32 flags; /* in, mask of ETNA_WAIT_x */
  163. __u32 pad;
  164. struct drm_etnaviv_timespec timeout; /* in */
  165. };
  166. #define ETNA_USERPTR_READ 0x01
  167. #define ETNA_USERPTR_WRITE 0x02
  168. struct drm_etnaviv_gem_userptr {
  169. __u64 user_ptr; /* in, page aligned user pointer */
  170. __u64 user_size; /* in, page aligned user size */
  171. __u32 flags; /* in, flags */
  172. __u32 handle; /* out, non-zero handle */
  173. };
  174. struct drm_etnaviv_gem_wait {
  175. __u32 pipe; /* in */
  176. __u32 handle; /* in, bo to be waited for */
  177. __u32 flags; /* in, mask of ETNA_WAIT_x */
  178. __u32 pad;
  179. struct drm_etnaviv_timespec timeout; /* in */
  180. };
  181. #define DRM_ETNAVIV_GET_PARAM 0x00
  182. /* placeholder:
  183. #define DRM_ETNAVIV_SET_PARAM 0x01
  184. */
  185. #define DRM_ETNAVIV_GEM_NEW 0x02
  186. #define DRM_ETNAVIV_GEM_INFO 0x03
  187. #define DRM_ETNAVIV_GEM_CPU_PREP 0x04
  188. #define DRM_ETNAVIV_GEM_CPU_FINI 0x05
  189. #define DRM_ETNAVIV_GEM_SUBMIT 0x06
  190. #define DRM_ETNAVIV_WAIT_FENCE 0x07
  191. #define DRM_ETNAVIV_GEM_USERPTR 0x08
  192. #define DRM_ETNAVIV_GEM_WAIT 0x09
  193. #define DRM_ETNAVIV_NUM_IOCTLS 0x0a
  194. #define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param)
  195. #define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new)
  196. #define DRM_IOCTL_ETNAVIV_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_INFO, struct drm_etnaviv_gem_info)
  197. #define DRM_IOCTL_ETNAVIV_GEM_CPU_PREP DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_PREP, struct drm_etnaviv_gem_cpu_prep)
  198. #define DRM_IOCTL_ETNAVIV_GEM_CPU_FINI DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_FINI, struct drm_etnaviv_gem_cpu_fini)
  199. #define DRM_IOCTL_ETNAVIV_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_SUBMIT, struct drm_etnaviv_gem_submit)
  200. #define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence)
  201. #define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
  202. #define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
  203. #if defined(__cplusplus)
  204. }
  205. #endif
  206. #endif /* __ETNAVIV_DRM_H__ */