synx_util.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef __SYNX_UTIL_H__
  7. #define __SYNX_UTIL_H__
  8. #include "synx_api.h"
  9. #include "synx_private.h"
  10. extern struct synx_device *synx_dev;
  11. u32 __fence_state(struct dma_fence *fence, bool locked);
  12. void synx_util_destroy_coredata(struct kref *kref);
  13. extern void synx_fence_callback(struct dma_fence *fence,
  14. struct dma_fence_cb *cb);
  15. extern int synx_native_signal_fence(struct synx_coredata *synx_obj,
  16. u32 status);
  17. static inline bool synx_util_is_valid_bind_type(u32 type)
  18. {
  19. if (type < SYNX_MAX_BIND_TYPES)
  20. return true;
  21. return false;
  22. }
  23. static inline bool synx_util_is_global_handle(u32 h_synx)
  24. {
  25. return (h_synx & SYNX_OBJ_GLOBAL_FLAG_MASK) ? true : false;
  26. }
  27. static inline u32 synx_util_get_object_type(
  28. struct synx_coredata *synx_obj)
  29. {
  30. return synx_obj ? synx_obj->type : 0;
  31. }
  32. static inline bool synx_util_is_merged_object(
  33. struct synx_coredata *synx_obj)
  34. {
  35. if (synx_obj &&
  36. (synx_obj->type & SYNX_CREATE_MERGED_FENCE))
  37. return true;
  38. return false;
  39. }
  40. static inline bool synx_util_is_global_object(
  41. struct synx_coredata *synx_obj)
  42. {
  43. if (synx_obj &&
  44. (synx_obj->type & SYNX_CREATE_GLOBAL_FENCE))
  45. return true;
  46. return false;
  47. }
  48. static inline bool synx_util_is_external_object(
  49. struct synx_coredata *synx_obj)
  50. {
  51. if (synx_obj &&
  52. !(synx_obj->type & SYNX_CREATE_MERGED_FENCE) &&
  53. (synx_obj->type & SYNX_CREATE_DMA_FENCE))
  54. return true;
  55. return false;
  56. }
  57. static inline u32 synx_util_map_params_to_type(u32 flags)
  58. {
  59. if (flags & SYNX_CREATE_CSL_FENCE)
  60. return SYNX_TYPE_CSL;
  61. return SYNX_MAX_BIND_TYPES;
  62. }
  63. static inline u32 synx_util_global_idx(u32 h_synx)
  64. {
  65. return (h_synx & SYNX_OBJ_HANDLE_MASK);
  66. }
  67. /* coredata memory functions */
  68. void synx_util_get_object(struct synx_coredata *synx_obj);
  69. void synx_util_put_object(struct synx_coredata *synx_obj);
  70. void synx_util_object_destroy(struct synx_coredata *synx_obj);
  71. static inline struct synx_coredata *synx_util_obtain_object(
  72. struct synx_handle_coredata *synx_data)
  73. {
  74. if (IS_ERR_OR_NULL(synx_data))
  75. return NULL;
  76. return synx_data->synx_obj;
  77. }
  78. /* global/local map functions */
  79. struct synx_map_entry *synx_util_insert_to_map(struct synx_coredata *synx_obj,
  80. u32 h_synx, u32 flags);
  81. struct synx_map_entry *synx_util_get_map_entry(u32 h_synx);
  82. void synx_util_release_map_entry(struct synx_map_entry *map_entry);
  83. void synx_util_destroy_map_entry(struct kref *kref);
  84. /* fence map functions */
  85. int synx_util_insert_fence_entry(struct synx_fence_entry *entry, u32 *h_synx,
  86. u32 global);
  87. u32 synx_util_get_fence_entry(u64 key, u32 global);
  88. void synx_util_release_fence_entry(u64 key);
  89. /* coredata initialize functions */
  90. int synx_util_init_coredata(struct synx_coredata *synx_obj,
  91. struct synx_create_params *params,
  92. struct dma_fence_ops *ops,
  93. u64 dma_context);
  94. int synx_util_init_group_coredata(struct synx_coredata *synx_obj,
  95. struct dma_fence **fences,
  96. struct synx_merge_params *params,
  97. u32 num_objs,
  98. u64 dma_context);
  99. /* handle related functions */
  100. int synx_alloc_global_handle(u32 *new_synx);
  101. int synx_alloc_local_handle(u32 *new_synx);
  102. long synx_util_get_free_handle(unsigned long *bitmap, unsigned int size);
  103. int synx_util_init_handle(struct synx_client *client, struct synx_coredata *obj,
  104. u32 *new_h_synx,
  105. void *map_entry);
  106. u32 synx_encode_handle(u32 idx, u32 core_id, bool global_idx);
  107. /* callback related functions */
  108. int synx_util_alloc_cb_entry(struct synx_client *client,
  109. struct synx_kernel_payload *data,
  110. u32 *cb_idx);
  111. int synx_util_clear_cb_entry(struct synx_client *client,
  112. struct synx_client_cb *cb);
  113. void synx_util_default_user_callback(u32 h_synx, int status, void *data);
  114. void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 state);
  115. void synx_util_cb_dispatch(struct work_struct *cb_dispatch);
  116. /* external fence functions */
  117. int synx_util_activate(struct synx_coredata *synx_obj);
  118. int synx_util_add_callback(struct synx_coredata *synx_obj, u32 h_synx);
  119. /* merge related helper functions */
  120. s32 synx_util_merge_error(struct synx_client *client, u32 *h_synxs, u32 num_objs);
  121. int synx_util_validate_merge(struct synx_client *client, u32 *h_synxs, u32 num_objs,
  122. struct dma_fence ***fences,
  123. u32 *fence_cnt);
  124. /* coredata status functions */
  125. u32 synx_util_get_object_status(struct synx_coredata *synx_obj);
  126. u32 synx_util_get_object_status_locked(struct synx_coredata *synx_obj);
  127. /* client handle map related functions */
  128. struct synx_handle_coredata *synx_util_acquire_handle(struct synx_client *client,
  129. u32 h_synx);
  130. void synx_util_release_handle(struct synx_handle_coredata *synx_data);
  131. int synx_util_update_handle(struct synx_client *client, u32 h_synx, u32 sync_id,
  132. u32 type, struct synx_handle_coredata **handle);
  133. void synx_client_destroy(struct kref *kref);
  134. void synx_util_destroy_handle(struct kref *kref);
  135. /* client memory handler functions */
  136. struct synx_client *synx_get_client(struct synx_session *session);
  137. void synx_put_client(struct synx_client *client);
  138. /* error log functions */
  139. void synx_util_generate_timestamp(char *timestamp, size_t size);
  140. void synx_util_log_error(u32 id, u32 h_synx, s32 err);
  141. /* external fence map functions */
  142. int synx_util_save_data(void *fence, u32 flags, u32 data);
  143. struct synx_entry_64 *synx_util_retrieve_data(void *fence, u32 type);
  144. void synx_util_remove_data(void *fence, u32 type);
  145. /* misc */
  146. void synx_util_destroy_data(struct kref *kref);
  147. void synx_util_map_import_params_to_create(
  148. struct synx_import_indv_params *params,
  149. struct synx_create_params *c_params);
  150. struct bind_operations *synx_util_get_bind_ops(u32 type);
  151. u32 synx_util_map_client_id_to_core(enum synx_client_id id);
  152. int synx_get_child_coredata(struct synx_coredata *synx_obj, struct synx_coredata ***child_synx_obj, int *num_fences);
  153. #endif /* __SYNX_UTIL_H__ */