hab.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef __HAB_H
  7. #define __HAB_H
  8. #include "hab_os.h" /* OS-specific part in the core header file */
  9. enum hab_payload_type {
  10. HAB_PAYLOAD_TYPE_MSG = 0x0,
  11. HAB_PAYLOAD_TYPE_INIT,
  12. HAB_PAYLOAD_TYPE_INIT_ACK,
  13. HAB_PAYLOAD_TYPE_INIT_DONE,
  14. HAB_PAYLOAD_TYPE_EXPORT,
  15. HAB_PAYLOAD_TYPE_EXPORT_ACK,
  16. HAB_PAYLOAD_TYPE_PROFILE,
  17. HAB_PAYLOAD_TYPE_CLOSE,
  18. HAB_PAYLOAD_TYPE_INIT_CANCEL,
  19. HAB_PAYLOAD_TYPE_SCHE_MSG,
  20. HAB_PAYLOAD_TYPE_SCHE_MSG_ACK,
  21. HAB_PAYLOAD_TYPE_SCHE_RESULT_REQ,
  22. HAB_PAYLOAD_TYPE_SCHE_RESULT_RSP,
  23. HAB_PAYLOAD_TYPE_IMPORT,
  24. HAB_PAYLOAD_TYPE_IMPORT_ACK,
  25. HAB_PAYLOAD_TYPE_IMPORT_ACK_FAIL,
  26. HAB_PAYLOAD_TYPE_UNIMPORT,
  27. HAB_PAYLOAD_TYPE_MAX,
  28. };
  29. #define LOOPBACK_DOM 0xFF
  30. /*
  31. * Tuning required. If there are multiple clients, the aging of previous
  32. * "request" might be discarded
  33. */
  34. #define Q_AGE_THRESHOLD 1000000
  35. /* match the name to dtsi if for real HYP framework */
  36. #define DEVICE_AUD1_NAME "hab_aud1"
  37. #define DEVICE_AUD2_NAME "hab_aud2"
  38. #define DEVICE_AUD3_NAME "hab_aud3"
  39. #define DEVICE_AUD4_NAME "hab_aud4"
  40. #define DEVICE_CAM1_NAME "hab_cam1"
  41. #define DEVICE_CAM2_NAME "hab_cam2"
  42. #define DEVICE_DISP1_NAME "hab_disp1"
  43. #define DEVICE_DISP2_NAME "hab_disp2"
  44. #define DEVICE_DISP3_NAME "hab_disp3"
  45. #define DEVICE_DISP4_NAME "hab_disp4"
  46. #define DEVICE_DISP5_NAME "hab_disp5"
  47. #define DEVICE_GFX_NAME "hab_ogles"
  48. #define DEVICE_VID_NAME "hab_vid"
  49. #define DEVICE_VID2_NAME "hab_vid2"
  50. #define DEVICE_VID3_NAME "hab_vid3"
  51. #define DEVICE_MISC_NAME "hab_misc"
  52. #define DEVICE_QCPE1_NAME "hab_qcpe_vm1"
  53. #define DEVICE_CLK1_NAME "hab_clock_vm1"
  54. #define DEVICE_CLK2_NAME "hab_clock_vm2"
  55. #define DEVICE_FDE1_NAME "hab_fde1"
  56. #define DEVICE_BUFFERQ1_NAME "hab_bufferq1"
  57. #define DEVICE_DATA1_NAME "hab_data_network1"
  58. #define DEVICE_DATA2_NAME "hab_data_network2"
  59. #define DEVICE_HSI2S1_NAME "hab_hsi2s1"
  60. #define DEVICE_XVM1_NAME "hab_xvm1"
  61. #define DEVICE_XVM2_NAME "hab_xvm2"
  62. #define DEVICE_XVM3_NAME "hab_xvm3"
  63. #define DEVICE_VNW1_NAME "hab_vnw1"
  64. #define DEVICE_EXT1_NAME "hab_ext1"
  65. #define DEVICE_GPCE1_NAME "hab_gpce1"
  66. #define HABCFG_MMID_NUM 26
  67. #define HAB_MMID_ALL_AREA 0
  68. /* make sure concascaded name is less than this value */
  69. #define MAX_VMID_NAME_SIZE 30
  70. /*
  71. * The maximum value of payload_count in struct export_desc
  72. * Max u32_t size_bytes from hab_ioctl.h(0xFFFFFFFF) / page size(0x1000)
  73. */
  74. #define MAX_EXP_PAYLOAD_COUNT 0xFFFFF
  75. #define HABCFG_FILE_SIZE_MAX 256
  76. #define HABCFG_MMID_AREA_MAX (MM_ID_MAX/100)
  77. #define HABCFG_VMID_MAX 16
  78. #define HABCFG_VMID_INVALID (-1)
  79. #define HABCFG_VMID_DONT_CARE (-2)
  80. #define HABCFG_ID_LINE_LIMIT ","
  81. #define HABCFG_ID_VMID "VMID="
  82. #define HABCFG_ID_BE "BE="
  83. #define HABCFG_ID_FE "FE="
  84. #define HABCFG_ID_MMID "MMID="
  85. #define HABCFG_ID_RANGE "-"
  86. #define HABCFG_ID_DONTCARE "X"
  87. #define HABCFG_FOUND_VMID 1
  88. #define HABCFG_FOUND_FE_MMIDS 2
  89. #define HABCFG_FOUND_BE_MMIDS 3
  90. #define HABCFG_FOUND_NOTHING (-1)
  91. #define HABCFG_BE_FALSE 0
  92. #define HABCFG_BE_TRUE 1
  93. #define HABCFG_GET_VMID(_local_cfg_, _vmid_) \
  94. ((settings)->vmid_mmid_list[_vmid_].vmid)
  95. #define HABCFG_GET_MMID(_local_cfg_, _vmid_, _mmid_) \
  96. ((settings)->vmid_mmid_list[_vmid_].mmid[_mmid_])
  97. #define HABCFG_GET_BE(_local_cfg_, _vmid_, _mmid_) \
  98. ((settings)->vmid_mmid_list[_vmid_].is_listener[_mmid_])
  99. #define HABCFG_GET_KERNEL(_local_cfg_, _vmid_, _mmid_) \
  100. ((settings)->vmid_mmid_list[_vmid_].kernel_only[_mmid_])
  101. struct hab_header {
  102. uint32_t id_type;
  103. uint32_t payload_size;
  104. uint32_t session_id;
  105. uint32_t signature;
  106. uint32_t sequence;
  107. } __packed;
  108. /* "Size" of the HAB_HEADER_ID and HAB_VCID_ID must match */
  109. #define HAB_HEADER_TYPE_SHIFT 16
  110. #define HAB_HEADER_EXT_TYPE_SHIFT 0
  111. #define HAB_HEADER_ID_SHIFT 20
  112. /*
  113. * On HQX platforms, the maximum payload size is
  114. * PIPE_SHMEM_SIZE - sizeof(hab_header)
  115. * 500KB is big enough for now and leave a margin for other usage
  116. */
  117. #define HAB_HEADER_SIZE_MAX 0x0007D000
  118. #define HAB_HEADER_TYPE_MASK 0x000F0000
  119. /* TYPE_LEN is the number of 1 bit in TYPE_MASK */
  120. #define HAB_HEADER_TYPE_LEN 4
  121. #define HAB_HEADER_EXT_TYPE_MASK 0x0000000F
  122. #define HAB_HEADER_ID_MASK 0xFFF00000
  123. #define HAB_HEADER_INITIALIZER {0}
  124. #define HAB_MMID_GET_MAJOR(mmid) (mmid & 0xFFFF)
  125. #define HAB_MMID_GET_MINOR(mmid) ((mmid>>16) & 0xFF)
  126. #define HAB_VCID_ID_SHIFT 0
  127. #define HAB_VCID_DOMID_SHIFT 12
  128. #define HAB_VCID_MMID_SHIFT 20
  129. #define HAB_VCID_ID_MASK 0x00000FFF
  130. #define HAB_VCID_DOMID_MASK 0x000FF000
  131. #define HAB_VCID_MMID_MASK 0xFFF00000
  132. #define HAB_VCID_GET_ID(vcid) \
  133. (((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT)
  134. #define HAB_HEADER_SET_SESSION_ID(header, sid) \
  135. ((header).session_id = (sid))
  136. #define HAB_HEADER_SET_SIZE(header, size) \
  137. ((header).payload_size = (size))
  138. #define HAB_HEADER_SET_TYPE(header, type) \
  139. ((header).id_type = ((header).id_type & \
  140. (~(HAB_HEADER_TYPE_MASK | HAB_HEADER_EXT_TYPE_MASK))) | \
  141. (((type) << HAB_HEADER_TYPE_SHIFT) & \
  142. HAB_HEADER_TYPE_MASK) | \
  143. ((((type) >> HAB_HEADER_TYPE_LEN) << HAB_HEADER_EXT_TYPE_SHIFT) & \
  144. HAB_HEADER_EXT_TYPE_MASK))
  145. #define HAB_HEADER_SET_ID(header, id) \
  146. ((header).id_type = ((header).id_type & \
  147. (~HAB_HEADER_ID_MASK)) | \
  148. ((HAB_VCID_GET_ID(id) << HAB_HEADER_ID_SHIFT) & \
  149. HAB_HEADER_ID_MASK))
  150. #define HAB_HEADER_GET_SIZE(header) \
  151. ((header).payload_size)
  152. #define HAB_HEADER_GET_TYPE(header) \
  153. ((((header).id_type & \
  154. HAB_HEADER_TYPE_MASK) >> HAB_HEADER_TYPE_SHIFT) | \
  155. (((header).id_type & HAB_HEADER_EXT_TYPE_MASK) << HAB_HEADER_TYPE_LEN))
  156. #define HAB_HEADER_GET_ID(header) \
  157. ((((header).id_type & HAB_HEADER_ID_MASK) >> \
  158. (HAB_HEADER_ID_SHIFT - HAB_VCID_ID_SHIFT)) & HAB_VCID_ID_MASK)
  159. #define HAB_HEADER_GET_SESSION_ID(header) ((header).session_id)
  160. #define HAB_HS_TIMEOUT (10*1000*1000)
  161. #define HAB_HEAD_SIGNATURE 0xBEE1BEE1
  162. /* only used when vchan is not existed */
  163. #define HAB_VCID_UNIMPORT 0x1
  164. #define HAB_SESSIONID_UNIMPORT 0x1
  165. /* 1 - enhanced memory sharing protocol with sync import and async unimport */
  166. #define HAB_VER_PROT 1
  167. struct physical_channel {
  168. struct list_head node;
  169. char name[MAX_VMID_NAME_SIZE];
  170. int is_be;
  171. struct kref refcount;
  172. struct hab_device *habdev;
  173. struct idr vchan_idr;
  174. spinlock_t vid_lock;
  175. struct idr expid_idr;
  176. spinlock_t expid_lock;
  177. void *hyp_data;
  178. int dom_id; /* BE role: remote vmid; FE role: don't care */
  179. int vmid_local; /* from DT or hab_config */
  180. int vmid_remote;
  181. char vmname_local[12]; /* from DT */
  182. char vmname_remote[12];
  183. int closed;
  184. spinlock_t rxbuf_lock;
  185. /* debug only */
  186. uint32_t sequence_tx;
  187. uint32_t sequence_rx;
  188. uint32_t status;
  189. /* vchans on this pchan */
  190. struct list_head vchannels;
  191. int vcnt;
  192. rwlock_t vchans_lock;
  193. int kernel_only;
  194. uint32_t mem_proto;
  195. };
  196. /* this payload has to be used together with type */
  197. struct hab_open_send_data {
  198. int vchan_id;
  199. int sub_id;
  200. int open_id;
  201. int ver_fe;
  202. int ver_be;
  203. int ver_proto;
  204. };
  205. struct hab_open_request {
  206. int type;
  207. struct physical_channel *pchan;
  208. struct hab_open_send_data xdata;
  209. };
  210. struct hab_open_node {
  211. struct hab_open_request request;
  212. struct list_head node;
  213. int64_t age; /* sec */
  214. };
  215. struct hab_export_ack {
  216. uint32_t export_id;
  217. int32_t vcid_local;
  218. int32_t vcid_remote;
  219. };
  220. struct hab_export_ack_recvd {
  221. struct hab_export_ack ack;
  222. struct list_head node;
  223. int age;
  224. };
  225. struct hab_import_ack {
  226. uint32_t export_id;
  227. int32_t vcid_local;
  228. int32_t vcid_remote;
  229. uint32_t imp_whse_added; /* indicating exp node added into imp whse */
  230. };
  231. struct hab_import_ack_recvd {
  232. struct hab_import_ack ack;
  233. struct list_head node;
  234. int age;
  235. };
  236. struct hab_import_data {
  237. uint32_t exp_id;
  238. uint32_t page_cnt;
  239. uint32_t reserved;
  240. } __packed;
  241. struct hab_message {
  242. struct list_head node;
  243. size_t sizebytes;
  244. bool scatter;
  245. uint32_t sequence_rx;
  246. uint32_t data[];
  247. };
  248. /* for all the pchans of same kind */
  249. struct hab_device {
  250. char name[MAX_VMID_NAME_SIZE];
  251. uint32_t id;
  252. struct list_head pchannels;
  253. int pchan_cnt;
  254. rwlock_t pchan_lock;
  255. struct list_head openq_list; /* received */
  256. spinlock_t openlock;
  257. wait_queue_head_t openq;
  258. int openq_cnt;
  259. };
  260. struct uhab_context {
  261. struct list_head node; /* managed by the driver */
  262. struct kref refcount;
  263. struct work_struct destroy_work;
  264. struct list_head vchannels;
  265. int vcnt;
  266. struct list_head exp_whse;
  267. rwlock_t exp_lock;
  268. uint32_t export_total;
  269. wait_queue_head_t exp_wq;
  270. struct list_head exp_rxq;
  271. spinlock_t expq_lock;
  272. struct list_head imp_whse;
  273. spinlock_t imp_lock;
  274. uint32_t import_total;
  275. wait_queue_head_t imp_wq;
  276. struct list_head imp_rxq;
  277. spinlock_t impq_lock;
  278. void *import_ctx;
  279. struct list_head pending_open; /* sent to remote */
  280. int pending_cnt;
  281. rwlock_t ctx_lock;
  282. int closing;
  283. int kernel;
  284. int owner;
  285. /*
  286. * only used for user-space hab client
  287. * if created through /dev/hab-* node, mmid_grp_index = MMID / 100
  288. * if created through /dev/hab node, mmid_grp_index = 0
  289. */
  290. int mmid_grp_index;
  291. int lb_be; /* loopback only */
  292. };
  293. /*
  294. * array to describe the VM and its MMID configuration as
  295. * what is connected to so this is describing a pchan's remote side
  296. */
  297. struct vmid_mmid_desc {
  298. int vmid; /* remote vmid */
  299. int mmid[HABCFG_MMID_AREA_MAX+1]; /* selected or not */
  300. int is_listener[HABCFG_MMID_AREA_MAX+1]; /* yes or no */
  301. int kernel_only[HABCFG_MMID_AREA_MAX+1]; /* yes or no */
  302. };
  303. struct local_vmid {
  304. int32_t self; /* only this field is for local */
  305. struct vmid_mmid_desc vmid_mmid_list[HABCFG_VMID_MAX];
  306. };
  307. struct hab_driver {
  308. /* hab driver has many char devices, so we need an array of struct device pointers. */
  309. struct device **dev;
  310. struct cdev *cdev;
  311. dev_t major;
  312. struct class *class;
  313. int ndevices;
  314. struct hab_device *devp;
  315. struct uhab_context *kctx;
  316. struct list_head uctx_list;
  317. int ctx_cnt;
  318. spinlock_t drvlock;
  319. struct list_head imp_list;
  320. int imp_cnt;
  321. spinlock_t imp_lock;
  322. struct list_head reclaim_list;
  323. spinlock_t reclaim_lock;
  324. struct work_struct reclaim_work;
  325. struct local_vmid settings; /* parser results */
  326. int b_server_dom;
  327. int b_loopback_be; /* only allow 2 apps simultaneously 1 fe 1 be */
  328. int b_loopback;
  329. void *hyp_priv; /* hypervisor plug-in storage */
  330. void *hab_vmm_handle;
  331. int hab_init_success;
  332. };
  333. struct virtual_channel {
  334. struct list_head node; /* for ctx */
  335. struct list_head pnode; /* for pchan */
  336. /*
  337. * refcount is used to track the references from hab core to the virtual
  338. * channel such as references from physical channels,
  339. * i.e. references from the "other" side
  340. */
  341. struct kref refcount;
  342. struct physical_channel *pchan;
  343. struct uhab_context *ctx;
  344. struct list_head rx_list;
  345. wait_queue_head_t rx_queue;
  346. spinlock_t rx_lock;
  347. int id;
  348. int otherend_id;
  349. int otherend_closed;
  350. uint32_t session_id;
  351. /*
  352. * set when local close() is called explicitly. vchan could be
  353. * used in hab-recv-msg() path (2) then close() is called (1).
  354. * this is same case as close is not called and no msg path
  355. */
  356. int closed;
  357. int forked; /* if fork is detected and assume only once */
  358. /* stats */
  359. atomic64_t tx_cnt; /* total succeeded tx */
  360. atomic64_t rx_cnt; /* total succeeded rx */
  361. int rx_inflight; /* rx in progress/blocking */
  362. };
  363. /*
  364. * Struct shared between local and remote, contents
  365. * are composed by exporter, the importer only writes
  366. * to pdata and local (exporter) domID
  367. */
  368. struct export_desc {
  369. uint32_t export_id;
  370. int readonly;
  371. uint64_t import_index;
  372. struct virtual_channel *vchan; /* vchan could be freed earlier */
  373. struct uhab_context *ctx;
  374. struct physical_channel *pchan;
  375. int32_t vcid_local;
  376. int32_t vcid_remote;
  377. int domid_local;
  378. int domid_remote;
  379. int flags;
  380. struct list_head node;
  381. void *kva;
  382. int payload_count; /* number of the pages */
  383. unsigned char payload[1];
  384. } __packed;
  385. /*
  386. * hab_mem_import hab_mem_unimport
  387. * -------------- ----------------
  388. * lock lock
  389. * query query
  390. * unlock unlock
  391. *
  392. * use free
  393. *
  394. * ret ret
  395. *
  396. * There are three scenarios to handle.
  397. * First is:
  398. * 1.thread1 enters import and finds out the exp desc, then unlock,
  399. * 2.thread2 is scheduled to run on the same CPU,
  400. * 3.it enters unimport, finds out the same exp desc, frees it and returns,
  401. * 4.cpu is back to run thread1,
  402. * 5.UAF occurs once thread1 uses this exp desc.
  403. * We could use EXP_DESC_IMPORTED at the end of import and add query check
  404. * in unimport to sync this access.
  405. * A more complicated case is:
  406. * 1.thread1 has completed the import,
  407. * 2.thread2 enters import and gets the exp desc,
  408. * 3.at this time point, thread3 which calls unimport could find out this
  409. * exp desc due to its current state is EXP_DESC_IMPORTED,
  410. * 4.if thread3 frees it, thread2 uses it afterward, will also occur UAF.
  411. * Add query check with EXP_DESC_IMPORTED in import could avoid this,
  412. * but it can not deal with the 3rd scenario:
  413. * 1.thread1 and thread2 call import and both find out this exp desc,
  414. * 2.thread1 runs quickly and returns from import,
  415. * 3.then thread3 calls unimport and frees the exp desc,
  416. * 4.UAF occurs once thread2 uses this exp desc afterward.
  417. * In import, querying exp desc is a critical section, should prevent
  418. * thread2 entering if thread1 is in. so EXP_DESC_IMPORTING is here.
  419. */
  420. enum exp_desc_state {
  421. EXP_DESC_INIT,
  422. EXP_DESC_IMPORTING, /* hab_mem_import is in progress */
  423. EXP_DESC_IMPORTED, /* hab_mem_import is called and returns success */
  424. };
  425. enum export_state {
  426. HAB_EXP_EXPORTING,
  427. HAB_EXP_SUCCESS,
  428. };
  429. struct export_desc_super {
  430. struct kref refcount;
  431. void *platform_data;
  432. unsigned long offset;
  433. unsigned int payload_size; /* size of the compressed pfn structure */
  434. enum exp_desc_state import_state;
  435. enum export_state exp_state;
  436. uint32_t remote_imported;
  437. /*
  438. * exp must be the last member
  439. * because it is a variable length struct with pfns as payload
  440. */
  441. struct export_desc exp;
  442. };
  443. int hab_vchan_open(struct uhab_context *ctx,
  444. unsigned int mmid, int32_t *vcid,
  445. int32_t timeout, uint32_t flags);
  446. int hab_vchan_close(struct uhab_context *ctx,
  447. int32_t vcid);
  448. long hab_vchan_send(struct uhab_context *ctx,
  449. int vcid,
  450. size_t sizebytes,
  451. void *data,
  452. unsigned int flags);
  453. int hab_vchan_recv(struct uhab_context *ctx,
  454. struct hab_message **msg,
  455. int vcid,
  456. int *rsize,
  457. unsigned int timeout,
  458. unsigned int flags);
  459. void hab_vchan_stop(struct virtual_channel *vchan);
  460. void hab_vchans_stop(struct physical_channel *pchan);
  461. void hab_vchan_stop_notify(struct virtual_channel *vchan);
  462. void hab_vchans_empty_wait(int vmid);
  463. int hab_mem_export(struct uhab_context *ctx,
  464. struct hab_export *param, int kernel);
  465. int hab_mem_import(struct uhab_context *ctx,
  466. struct hab_import *param, int kernel);
  467. int hab_mem_unexport(struct uhab_context *ctx,
  468. struct hab_unexport *param, int kernel);
  469. void habmem_export_get(struct export_desc_super *exp_super);
  470. int habmem_export_put(struct export_desc_super *exp_super);
  471. int hab_mem_unimport(struct uhab_context *ctx,
  472. struct hab_unimport *param, int kernel);
  473. void habmem_remove_export(struct export_desc *exp);
  474. /* memory hypervisor framework plugin I/F */
  475. struct export_desc_super *habmem_add_export(
  476. struct virtual_channel *vchan,
  477. int sizebytes,
  478. uint32_t flags);
  479. int habmem_hyp_grant_user(struct virtual_channel *vchan,
  480. unsigned long address,
  481. int page_count,
  482. int flags,
  483. int remotedom,
  484. int *compressed,
  485. int *compressed_size,
  486. int *export_id);
  487. int habmem_hyp_grant(struct virtual_channel *vchan,
  488. unsigned long address,
  489. int page_count,
  490. int flags,
  491. int remotedom,
  492. int *compressed,
  493. int *compressed_size,
  494. int *export_id);
  495. int habmem_hyp_revoke(void *expdata, uint32_t count);
  496. int habmem_exp_release(struct export_desc_super *exp_super);
  497. void *habmem_imp_hyp_open(void);
  498. void habmem_imp_hyp_close(void *priv, int kernel);
  499. int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
  500. struct export_desc *exp, int kernel);
  501. int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp, int kernel);
  502. int habmem_imp_hyp_mmap(struct file *flip, struct vm_area_struct *vma);
  503. int habmm_imp_hyp_map_check(void *imp_ctx, struct export_desc *exp);
  504. void hab_msg_free(struct hab_message *message);
  505. int hab_msg_dequeue(struct virtual_channel *vchan,
  506. struct hab_message **msg, int *rsize, unsigned int timeout,
  507. unsigned int flags);
  508. int hab_msg_recv(struct physical_channel *pchan,
  509. struct hab_header *header);
  510. void hab_open_request_init(struct hab_open_request *request,
  511. int type,
  512. struct physical_channel *pchan,
  513. int vchan_id,
  514. int sub_id,
  515. int open_id);
  516. int hab_open_request_send(struct hab_open_request *request);
  517. int hab_open_request_add(struct physical_channel *pchan,
  518. size_t sizebytes, int request_type);
  519. void hab_open_request_free(struct hab_open_request *request);
  520. int hab_open_listen(struct uhab_context *ctx,
  521. struct hab_device *dev,
  522. struct hab_open_request *listen,
  523. struct hab_open_request **recv_request,
  524. int ms_timeout,
  525. uint32_t flags);
  526. struct virtual_channel *hab_vchan_alloc(struct uhab_context *ctx,
  527. struct physical_channel *pchan, int openid);
  528. struct virtual_channel *hab_vchan_get(struct physical_channel *pchan,
  529. struct hab_header *header);
  530. void hab_vchan_put(struct virtual_channel *vchan);
  531. struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
  532. struct uhab_context *ctx, int ignore_remote);
  533. struct physical_channel *hab_pchan_alloc(struct hab_device *habdev,
  534. int otherend_id);
  535. struct physical_channel *hab_pchan_find_domid(struct hab_device *dev,
  536. int dom_id);
  537. int hab_vchan_find_domid(struct virtual_channel *vchan);
  538. void hab_pchan_get(struct physical_channel *pchan);
  539. void hab_pchan_put(struct physical_channel *pchan);
  540. struct uhab_context *hab_ctx_alloc(int kernel);
  541. void hab_ctx_free(struct kref *ref);
  542. void hab_ctx_free_fn(struct uhab_context *ctx);
  543. void hab_ctx_free_os(struct kref *ref);
  544. static inline void hab_ctx_get(struct uhab_context *ctx)
  545. {
  546. if (ctx)
  547. kref_get(&ctx->refcount);
  548. }
  549. static inline void hab_ctx_put(struct uhab_context *ctx)
  550. {
  551. if (ctx)
  552. kref_put(&ctx->refcount, hab_ctx_free);
  553. }
  554. void hab_send_close_msg(struct virtual_channel *vchan);
  555. void hab_send_unimport_msg(struct virtual_channel *vchan, uint32_t exp_id);
  556. int hab_hypervisor_register(void);
  557. int hab_hypervisor_register_post(void);
  558. int hab_hypervisor_register_os(void);
  559. int hab_hypervisor_unregister_os(void);
  560. void hab_hypervisor_unregister(void);
  561. void hab_hypervisor_unregister_common(void);
  562. int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
  563. int vmid_remote, struct hab_device *mmid_device);
  564. int habhyp_commdev_dealloc(void *commdev);
  565. void habhyp_commdev_dealloc_os(void *commdev);
  566. int habhyp_commdev_create_dispatcher(struct physical_channel *pchan);
  567. int physical_channel_read(struct physical_channel *pchan,
  568. void *payload,
  569. size_t read_size);
  570. int physical_channel_send(struct physical_channel *pchan,
  571. struct hab_header *header,
  572. void *payload,
  573. unsigned int flags);
  574. void physical_channel_rx_dispatch(unsigned long physical_channel);
  575. void physical_channel_rx_dispatch_common(unsigned long physical_channel);
  576. int loopback_pchan_create(struct hab_device *dev, char *pchan_name);
  577. int hab_parse(struct local_vmid *settings);
  578. int do_hab_parse(void);
  579. int fill_default_gvm_settings(struct local_vmid *settings,
  580. int vmid_local, int mmid_start, int mmid_end);
  581. bool hab_is_loopback(void);
  582. int hab_vchan_query(struct uhab_context *ctx, int32_t vcid, uint64_t *ids,
  583. char *names, size_t name_size, uint32_t flags);
  584. struct hab_device *find_hab_device(unsigned int mm_id);
  585. unsigned int get_refcnt(struct kref ref);
  586. int hab_open_pending_enter(struct uhab_context *ctx,
  587. struct physical_channel *pchan,
  588. struct hab_open_node *pending);
  589. int hab_open_pending_exit(struct uhab_context *ctx,
  590. struct physical_channel *pchan,
  591. struct hab_open_node *pending);
  592. int hab_open_cancel_notify(struct hab_open_request *request);
  593. int hab_open_receive_cancel(struct physical_channel *pchan,
  594. size_t sizebytes);
  595. int hab_stat_init(struct hab_driver *drv);
  596. int hab_stat_deinit(struct hab_driver *drv);
  597. int hab_stat_show_vchan(struct hab_driver *drv, char *buf, int sz);
  598. int hab_stat_show_ctx(struct hab_driver *drv, char *buf, int sz);
  599. int hab_stat_show_expimp(struct hab_driver *drv, int pid, char *buf, int sz);
  600. int hab_stat_show_reclaim(struct hab_driver *drv, char *buf, int sz);
  601. int hab_stat_init_sub(struct hab_driver *drv);
  602. int hab_stat_deinit_sub(struct hab_driver *drv);
  603. static inline void hab_spin_lock(spinlock_t *lock, int irqs_disabled)
  604. {
  605. if (irqs_disabled)
  606. spin_lock(lock);
  607. else
  608. spin_lock_bh(lock);
  609. }
  610. static inline void hab_spin_unlock(spinlock_t *lock, int irqs_disabled)
  611. {
  612. if (irqs_disabled)
  613. spin_unlock(lock);
  614. else
  615. spin_unlock_bh(lock);
  616. }
  617. static inline void hab_write_lock(rwlock_t *lock, int no_touch_bh)
  618. {
  619. if (no_touch_bh)
  620. write_lock(lock);
  621. else
  622. write_lock_bh(lock);
  623. }
  624. static inline void hab_write_unlock(rwlock_t *lock, int no_touch_bh)
  625. {
  626. if (no_touch_bh)
  627. write_unlock(lock);
  628. else
  629. write_unlock_bh(lock);
  630. }
  631. /* Global singleton HAB instance */
  632. extern struct hab_driver hab_driver;
  633. int dump_hab_get_file_name(char *file_time, int ft_size);
  634. int dump_hab_open(void);
  635. void dump_hab_close(void);
  636. int dump_hab_buf(void *buf, int size);
  637. void hab_pipe_read_dump(struct physical_channel *pchan);
  638. void dump_hab(int mmid);
  639. void dump_hab_wq(struct physical_channel *pchan);
  640. int hab_stat_log(struct physical_channel **pchans, int pchan_cnt, char *dest,
  641. int dest_size);
  642. int hab_stat_buffer_print(char *dest,
  643. int dest_size, const char *fmt, ...);
  644. int hab_create_cdev_node(int mmid_grp_index);
  645. #endif /* __HAB_H */