hv_util.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2010, Microsoft Corporation.
  4. *
  5. * Authors:
  6. * Haiyang Zhang <[email protected]>
  7. * Hank Janssen <[email protected]>
  8. */
  9. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10. #include <linux/kernel.h>
  11. #include <linux/init.h>
  12. #include <linux/module.h>
  13. #include <linux/slab.h>
  14. #include <linux/sysctl.h>
  15. #include <linux/reboot.h>
  16. #include <linux/hyperv.h>
  17. #include <linux/clockchips.h>
  18. #include <linux/ptp_clock_kernel.h>
  19. #include <asm/mshyperv.h>
  20. #include "hyperv_vmbus.h"
  21. #define SD_MAJOR 3
  22. #define SD_MINOR 0
  23. #define SD_MINOR_1 1
  24. #define SD_MINOR_2 2
  25. #define SD_VERSION_3_1 (SD_MAJOR << 16 | SD_MINOR_1)
  26. #define SD_VERSION_3_2 (SD_MAJOR << 16 | SD_MINOR_2)
  27. #define SD_VERSION (SD_MAJOR << 16 | SD_MINOR)
  28. #define SD_MAJOR_1 1
  29. #define SD_VERSION_1 (SD_MAJOR_1 << 16 | SD_MINOR)
  30. #define TS_MAJOR 4
  31. #define TS_MINOR 0
  32. #define TS_VERSION (TS_MAJOR << 16 | TS_MINOR)
  33. #define TS_MAJOR_1 1
  34. #define TS_VERSION_1 (TS_MAJOR_1 << 16 | TS_MINOR)
  35. #define TS_MAJOR_3 3
  36. #define TS_VERSION_3 (TS_MAJOR_3 << 16 | TS_MINOR)
  37. #define HB_MAJOR 3
  38. #define HB_MINOR 0
  39. #define HB_VERSION (HB_MAJOR << 16 | HB_MINOR)
  40. #define HB_MAJOR_1 1
  41. #define HB_VERSION_1 (HB_MAJOR_1 << 16 | HB_MINOR)
  42. static int sd_srv_version;
  43. static int ts_srv_version;
  44. static int hb_srv_version;
  45. #define SD_VER_COUNT 4
  46. static const int sd_versions[] = {
  47. SD_VERSION_3_2,
  48. SD_VERSION_3_1,
  49. SD_VERSION,
  50. SD_VERSION_1
  51. };
  52. #define TS_VER_COUNT 3
  53. static const int ts_versions[] = {
  54. TS_VERSION,
  55. TS_VERSION_3,
  56. TS_VERSION_1
  57. };
  58. #define HB_VER_COUNT 2
  59. static const int hb_versions[] = {
  60. HB_VERSION,
  61. HB_VERSION_1
  62. };
  63. #define FW_VER_COUNT 2
  64. static const int fw_versions[] = {
  65. UTIL_FW_VERSION,
  66. UTIL_WS2K8_FW_VERSION
  67. };
  68. /*
  69. * Send the "hibernate" udev event in a thread context.
  70. */
  71. struct hibernate_work_context {
  72. struct work_struct work;
  73. struct hv_device *dev;
  74. };
  75. static struct hibernate_work_context hibernate_context;
  76. static bool hibernation_supported;
  77. static void send_hibernate_uevent(struct work_struct *work)
  78. {
  79. char *uevent_env[2] = { "EVENT=hibernate", NULL };
  80. struct hibernate_work_context *ctx;
  81. ctx = container_of(work, struct hibernate_work_context, work);
  82. kobject_uevent_env(&ctx->dev->device.kobj, KOBJ_CHANGE, uevent_env);
  83. pr_info("Sent hibernation uevent\n");
  84. }
  85. static int hv_shutdown_init(struct hv_util_service *srv)
  86. {
  87. struct vmbus_channel *channel = srv->channel;
  88. INIT_WORK(&hibernate_context.work, send_hibernate_uevent);
  89. hibernate_context.dev = channel->device_obj;
  90. hibernation_supported = hv_is_hibernation_supported();
  91. return 0;
  92. }
  93. static void shutdown_onchannelcallback(void *context);
  94. static struct hv_util_service util_shutdown = {
  95. .util_cb = shutdown_onchannelcallback,
  96. .util_init = hv_shutdown_init,
  97. };
  98. static int hv_timesync_init(struct hv_util_service *srv);
  99. static int hv_timesync_pre_suspend(void);
  100. static void hv_timesync_deinit(void);
  101. static void timesync_onchannelcallback(void *context);
  102. static struct hv_util_service util_timesynch = {
  103. .util_cb = timesync_onchannelcallback,
  104. .util_init = hv_timesync_init,
  105. .util_pre_suspend = hv_timesync_pre_suspend,
  106. .util_deinit = hv_timesync_deinit,
  107. };
  108. static void heartbeat_onchannelcallback(void *context);
  109. static struct hv_util_service util_heartbeat = {
  110. .util_cb = heartbeat_onchannelcallback,
  111. };
  112. static struct hv_util_service util_kvp = {
  113. .util_cb = hv_kvp_onchannelcallback,
  114. .util_init = hv_kvp_init,
  115. .util_pre_suspend = hv_kvp_pre_suspend,
  116. .util_pre_resume = hv_kvp_pre_resume,
  117. .util_deinit = hv_kvp_deinit,
  118. };
  119. static struct hv_util_service util_vss = {
  120. .util_cb = hv_vss_onchannelcallback,
  121. .util_init = hv_vss_init,
  122. .util_pre_suspend = hv_vss_pre_suspend,
  123. .util_pre_resume = hv_vss_pre_resume,
  124. .util_deinit = hv_vss_deinit,
  125. };
  126. static struct hv_util_service util_fcopy = {
  127. .util_cb = hv_fcopy_onchannelcallback,
  128. .util_init = hv_fcopy_init,
  129. .util_pre_suspend = hv_fcopy_pre_suspend,
  130. .util_pre_resume = hv_fcopy_pre_resume,
  131. .util_deinit = hv_fcopy_deinit,
  132. };
  133. static void perform_shutdown(struct work_struct *dummy)
  134. {
  135. orderly_poweroff(true);
  136. }
  137. static void perform_restart(struct work_struct *dummy)
  138. {
  139. orderly_reboot();
  140. }
  141. /*
  142. * Perform the shutdown operation in a thread context.
  143. */
  144. static DECLARE_WORK(shutdown_work, perform_shutdown);
  145. /*
  146. * Perform the restart operation in a thread context.
  147. */
  148. static DECLARE_WORK(restart_work, perform_restart);
  149. static void shutdown_onchannelcallback(void *context)
  150. {
  151. struct vmbus_channel *channel = context;
  152. struct work_struct *work = NULL;
  153. u32 recvlen;
  154. u64 requestid;
  155. u8 *shut_txf_buf = util_shutdown.recv_buffer;
  156. struct shutdown_msg_data *shutdown_msg;
  157. struct icmsg_hdr *icmsghdrp;
  158. if (vmbus_recvpacket(channel, shut_txf_buf, HV_HYP_PAGE_SIZE, &recvlen, &requestid)) {
  159. pr_err_ratelimited("Shutdown request received. Could not read into shut txf buf\n");
  160. return;
  161. }
  162. if (!recvlen)
  163. return;
  164. /* Ensure recvlen is big enough to read header data */
  165. if (recvlen < ICMSG_HDR) {
  166. pr_err_ratelimited("Shutdown request received. Packet length too small: %d\n",
  167. recvlen);
  168. return;
  169. }
  170. icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[sizeof(struct vmbuspipe_hdr)];
  171. if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
  172. if (vmbus_prep_negotiate_resp(icmsghdrp,
  173. shut_txf_buf, recvlen,
  174. fw_versions, FW_VER_COUNT,
  175. sd_versions, SD_VER_COUNT,
  176. NULL, &sd_srv_version)) {
  177. pr_info("Shutdown IC version %d.%d\n",
  178. sd_srv_version >> 16,
  179. sd_srv_version & 0xFFFF);
  180. }
  181. } else if (icmsghdrp->icmsgtype == ICMSGTYPE_SHUTDOWN) {
  182. /* Ensure recvlen is big enough to contain shutdown_msg_data struct */
  183. if (recvlen < ICMSG_HDR + sizeof(struct shutdown_msg_data)) {
  184. pr_err_ratelimited("Invalid shutdown msg data. Packet length too small: %u\n",
  185. recvlen);
  186. return;
  187. }
  188. shutdown_msg = (struct shutdown_msg_data *)&shut_txf_buf[ICMSG_HDR];
  189. /*
  190. * shutdown_msg->flags can be 0(shut down), 2(reboot),
  191. * or 4(hibernate). It may bitwise-OR 1, which means
  192. * performing the request by force. Linux always tries
  193. * to perform the request by force.
  194. */
  195. switch (shutdown_msg->flags) {
  196. case 0:
  197. case 1:
  198. icmsghdrp->status = HV_S_OK;
  199. work = &shutdown_work;
  200. pr_info("Shutdown request received - graceful shutdown initiated\n");
  201. break;
  202. case 2:
  203. case 3:
  204. icmsghdrp->status = HV_S_OK;
  205. work = &restart_work;
  206. pr_info("Restart request received - graceful restart initiated\n");
  207. break;
  208. case 4:
  209. case 5:
  210. pr_info("Hibernation request received\n");
  211. icmsghdrp->status = hibernation_supported ?
  212. HV_S_OK : HV_E_FAIL;
  213. if (hibernation_supported)
  214. work = &hibernate_context.work;
  215. break;
  216. default:
  217. icmsghdrp->status = HV_E_FAIL;
  218. pr_info("Shutdown request received - Invalid request\n");
  219. break;
  220. }
  221. } else {
  222. icmsghdrp->status = HV_E_FAIL;
  223. pr_err_ratelimited("Shutdown request received. Invalid msg type: %d\n",
  224. icmsghdrp->icmsgtype);
  225. }
  226. icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
  227. | ICMSGHDRFLAG_RESPONSE;
  228. vmbus_sendpacket(channel, shut_txf_buf,
  229. recvlen, requestid,
  230. VM_PKT_DATA_INBAND, 0);
  231. if (work)
  232. schedule_work(work);
  233. }
  234. /*
  235. * Set the host time in a process context.
  236. */
  237. static struct work_struct adj_time_work;
  238. /*
  239. * The last time sample, received from the host. PTP device responds to
  240. * requests by using this data and the current partition-wide time reference
  241. * count.
  242. */
  243. static struct {
  244. u64 host_time;
  245. u64 ref_time;
  246. spinlock_t lock;
  247. } host_ts;
  248. static inline u64 reftime_to_ns(u64 reftime)
  249. {
  250. return (reftime - WLTIMEDELTA) * 100;
  251. }
  252. /*
  253. * Hard coded threshold for host timesync delay: 600 seconds
  254. */
  255. static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * (u64)NSEC_PER_SEC;
  256. static int hv_get_adj_host_time(struct timespec64 *ts)
  257. {
  258. u64 newtime, reftime, timediff_adj;
  259. unsigned long flags;
  260. int ret = 0;
  261. spin_lock_irqsave(&host_ts.lock, flags);
  262. reftime = hv_read_reference_counter();
  263. /*
  264. * We need to let the caller know that last update from host
  265. * is older than the max allowable threshold. clock_gettime()
  266. * and PTP ioctl do not have a documented error that we could
  267. * return for this specific case. Use ESTALE to report this.
  268. */
  269. timediff_adj = reftime - host_ts.ref_time;
  270. if (timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH) {
  271. pr_warn_once("TIMESYNC IC: Stale time stamp, %llu nsecs old\n",
  272. (timediff_adj * 100));
  273. ret = -ESTALE;
  274. }
  275. newtime = host_ts.host_time + timediff_adj;
  276. *ts = ns_to_timespec64(reftime_to_ns(newtime));
  277. spin_unlock_irqrestore(&host_ts.lock, flags);
  278. return ret;
  279. }
  280. static void hv_set_host_time(struct work_struct *work)
  281. {
  282. struct timespec64 ts;
  283. if (!hv_get_adj_host_time(&ts))
  284. do_settimeofday64(&ts);
  285. }
  286. /*
  287. * Synchronize time with host after reboot, restore, etc.
  288. *
  289. * ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
  290. * After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time
  291. * message after the timesync channel is opened. Since the hv_utils module is
  292. * loaded after hv_vmbus, the first message is usually missed. This bit is
  293. * considered a hard request to discipline the clock.
  294. *
  295. * ICTIMESYNCFLAG_SAMPLE bit indicates a time sample from host. This is
  296. * typically used as a hint to the guest. The guest is under no obligation
  297. * to discipline the clock.
  298. */
  299. static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
  300. {
  301. unsigned long flags;
  302. u64 cur_reftime;
  303. /*
  304. * Save the adjusted time sample from the host and the snapshot
  305. * of the current system time.
  306. */
  307. spin_lock_irqsave(&host_ts.lock, flags);
  308. cur_reftime = hv_read_reference_counter();
  309. host_ts.host_time = hosttime;
  310. host_ts.ref_time = cur_reftime;
  311. /*
  312. * TimeSync v4 messages contain reference time (guest's Hyper-V
  313. * clocksource read when the time sample was generated), we can
  314. * improve the precision by adding the delta between now and the
  315. * time of generation. For older protocols we set
  316. * reftime == cur_reftime on call.
  317. */
  318. host_ts.host_time += (cur_reftime - reftime);
  319. spin_unlock_irqrestore(&host_ts.lock, flags);
  320. /* Schedule work to do do_settimeofday64() */
  321. if (adj_flags & ICTIMESYNCFLAG_SYNC)
  322. schedule_work(&adj_time_work);
  323. }
  324. /*
  325. * Time Sync Channel message handler.
  326. */
  327. static void timesync_onchannelcallback(void *context)
  328. {
  329. struct vmbus_channel *channel = context;
  330. u32 recvlen;
  331. u64 requestid;
  332. struct icmsg_hdr *icmsghdrp;
  333. struct ictimesync_data *timedatap;
  334. struct ictimesync_ref_data *refdata;
  335. u8 *time_txf_buf = util_timesynch.recv_buffer;
  336. /*
  337. * Drain the ring buffer and use the last packet to update
  338. * host_ts
  339. */
  340. while (1) {
  341. int ret = vmbus_recvpacket(channel, time_txf_buf,
  342. HV_HYP_PAGE_SIZE, &recvlen,
  343. &requestid);
  344. if (ret) {
  345. pr_err_ratelimited("TimeSync IC pkt recv failed (Err: %d)\n",
  346. ret);
  347. break;
  348. }
  349. if (!recvlen)
  350. break;
  351. /* Ensure recvlen is big enough to read header data */
  352. if (recvlen < ICMSG_HDR) {
  353. pr_err_ratelimited("Timesync request received. Packet length too small: %d\n",
  354. recvlen);
  355. break;
  356. }
  357. icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
  358. sizeof(struct vmbuspipe_hdr)];
  359. if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
  360. if (vmbus_prep_negotiate_resp(icmsghdrp,
  361. time_txf_buf, recvlen,
  362. fw_versions, FW_VER_COUNT,
  363. ts_versions, TS_VER_COUNT,
  364. NULL, &ts_srv_version)) {
  365. pr_info("TimeSync IC version %d.%d\n",
  366. ts_srv_version >> 16,
  367. ts_srv_version & 0xFFFF);
  368. }
  369. } else if (icmsghdrp->icmsgtype == ICMSGTYPE_TIMESYNC) {
  370. if (ts_srv_version > TS_VERSION_3) {
  371. /* Ensure recvlen is big enough to read ictimesync_ref_data */
  372. if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_ref_data)) {
  373. pr_err_ratelimited("Invalid ictimesync ref data. Length too small: %u\n",
  374. recvlen);
  375. break;
  376. }
  377. refdata = (struct ictimesync_ref_data *)&time_txf_buf[ICMSG_HDR];
  378. adj_guesttime(refdata->parenttime,
  379. refdata->vmreferencetime,
  380. refdata->flags);
  381. } else {
  382. /* Ensure recvlen is big enough to read ictimesync_data */
  383. if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_data)) {
  384. pr_err_ratelimited("Invalid ictimesync data. Length too small: %u\n",
  385. recvlen);
  386. break;
  387. }
  388. timedatap = (struct ictimesync_data *)&time_txf_buf[ICMSG_HDR];
  389. adj_guesttime(timedatap->parenttime,
  390. hv_read_reference_counter(),
  391. timedatap->flags);
  392. }
  393. } else {
  394. icmsghdrp->status = HV_E_FAIL;
  395. pr_err_ratelimited("Timesync request received. Invalid msg type: %d\n",
  396. icmsghdrp->icmsgtype);
  397. }
  398. icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
  399. | ICMSGHDRFLAG_RESPONSE;
  400. vmbus_sendpacket(channel, time_txf_buf,
  401. recvlen, requestid,
  402. VM_PKT_DATA_INBAND, 0);
  403. }
  404. }
  405. /*
  406. * Heartbeat functionality.
  407. * Every two seconds, Hyper-V send us a heartbeat request message.
  408. * we respond to this message, and Hyper-V knows we are alive.
  409. */
  410. static void heartbeat_onchannelcallback(void *context)
  411. {
  412. struct vmbus_channel *channel = context;
  413. u32 recvlen;
  414. u64 requestid;
  415. struct icmsg_hdr *icmsghdrp;
  416. struct heartbeat_msg_data *heartbeat_msg;
  417. u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
  418. while (1) {
  419. if (vmbus_recvpacket(channel, hbeat_txf_buf, HV_HYP_PAGE_SIZE,
  420. &recvlen, &requestid)) {
  421. pr_err_ratelimited("Heartbeat request received. Could not read into hbeat txf buf\n");
  422. return;
  423. }
  424. if (!recvlen)
  425. break;
  426. /* Ensure recvlen is big enough to read header data */
  427. if (recvlen < ICMSG_HDR) {
  428. pr_err_ratelimited("Heartbeat request received. Packet length too small: %d\n",
  429. recvlen);
  430. break;
  431. }
  432. icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
  433. sizeof(struct vmbuspipe_hdr)];
  434. if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
  435. if (vmbus_prep_negotiate_resp(icmsghdrp,
  436. hbeat_txf_buf, recvlen,
  437. fw_versions, FW_VER_COUNT,
  438. hb_versions, HB_VER_COUNT,
  439. NULL, &hb_srv_version)) {
  440. pr_info("Heartbeat IC version %d.%d\n",
  441. hb_srv_version >> 16,
  442. hb_srv_version & 0xFFFF);
  443. }
  444. } else if (icmsghdrp->icmsgtype == ICMSGTYPE_HEARTBEAT) {
  445. /*
  446. * Ensure recvlen is big enough to read seq_num. Reserved area is not
  447. * included in the check as the host may not fill it up entirely
  448. */
  449. if (recvlen < ICMSG_HDR + sizeof(u64)) {
  450. pr_err_ratelimited("Invalid heartbeat msg data. Length too small: %u\n",
  451. recvlen);
  452. break;
  453. }
  454. heartbeat_msg = (struct heartbeat_msg_data *)&hbeat_txf_buf[ICMSG_HDR];
  455. heartbeat_msg->seq_num += 1;
  456. } else {
  457. icmsghdrp->status = HV_E_FAIL;
  458. pr_err_ratelimited("Heartbeat request received. Invalid msg type: %d\n",
  459. icmsghdrp->icmsgtype);
  460. }
  461. icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
  462. | ICMSGHDRFLAG_RESPONSE;
  463. vmbus_sendpacket(channel, hbeat_txf_buf,
  464. recvlen, requestid,
  465. VM_PKT_DATA_INBAND, 0);
  466. }
  467. }
  468. #define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
  469. #define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
  470. static int util_probe(struct hv_device *dev,
  471. const struct hv_vmbus_device_id *dev_id)
  472. {
  473. struct hv_util_service *srv =
  474. (struct hv_util_service *)dev_id->driver_data;
  475. int ret;
  476. srv->recv_buffer = kmalloc(HV_HYP_PAGE_SIZE * 4, GFP_KERNEL);
  477. if (!srv->recv_buffer)
  478. return -ENOMEM;
  479. srv->channel = dev->channel;
  480. if (srv->util_init) {
  481. ret = srv->util_init(srv);
  482. if (ret) {
  483. ret = -ENODEV;
  484. goto error1;
  485. }
  486. }
  487. /*
  488. * The set of services managed by the util driver are not performance
  489. * critical and do not need batched reading. Furthermore, some services
  490. * such as KVP can only handle one message from the host at a time.
  491. * Turn off batched reading for all util drivers before we open the
  492. * channel.
  493. */
  494. set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
  495. hv_set_drvdata(dev, srv);
  496. ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
  497. HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
  498. dev->channel);
  499. if (ret)
  500. goto error;
  501. return 0;
  502. error:
  503. if (srv->util_deinit)
  504. srv->util_deinit();
  505. error1:
  506. kfree(srv->recv_buffer);
  507. return ret;
  508. }
  509. static int util_remove(struct hv_device *dev)
  510. {
  511. struct hv_util_service *srv = hv_get_drvdata(dev);
  512. if (srv->util_deinit)
  513. srv->util_deinit();
  514. vmbus_close(dev->channel);
  515. kfree(srv->recv_buffer);
  516. return 0;
  517. }
  518. /*
  519. * When we're in util_suspend(), all the userspace processes have been frozen
  520. * (refer to hibernate() -> freeze_processes()). The userspace is thawed only
  521. * after the whole resume procedure, including util_resume(), finishes.
  522. */
  523. static int util_suspend(struct hv_device *dev)
  524. {
  525. struct hv_util_service *srv = hv_get_drvdata(dev);
  526. int ret = 0;
  527. if (srv->util_pre_suspend) {
  528. ret = srv->util_pre_suspend();
  529. if (ret)
  530. return ret;
  531. }
  532. vmbus_close(dev->channel);
  533. return 0;
  534. }
  535. static int util_resume(struct hv_device *dev)
  536. {
  537. struct hv_util_service *srv = hv_get_drvdata(dev);
  538. int ret = 0;
  539. if (srv->util_pre_resume) {
  540. ret = srv->util_pre_resume();
  541. if (ret)
  542. return ret;
  543. }
  544. ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
  545. HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
  546. dev->channel);
  547. return ret;
  548. }
  549. static const struct hv_vmbus_device_id id_table[] = {
  550. /* Shutdown guid */
  551. { HV_SHUTDOWN_GUID,
  552. .driver_data = (unsigned long)&util_shutdown
  553. },
  554. /* Time synch guid */
  555. { HV_TS_GUID,
  556. .driver_data = (unsigned long)&util_timesynch
  557. },
  558. /* Heartbeat guid */
  559. { HV_HEART_BEAT_GUID,
  560. .driver_data = (unsigned long)&util_heartbeat
  561. },
  562. /* KVP guid */
  563. { HV_KVP_GUID,
  564. .driver_data = (unsigned long)&util_kvp
  565. },
  566. /* VSS GUID */
  567. { HV_VSS_GUID,
  568. .driver_data = (unsigned long)&util_vss
  569. },
  570. /* File copy GUID */
  571. { HV_FCOPY_GUID,
  572. .driver_data = (unsigned long)&util_fcopy
  573. },
  574. { },
  575. };
  576. MODULE_DEVICE_TABLE(vmbus, id_table);
  577. /* The one and only one */
  578. static struct hv_driver util_drv = {
  579. .name = "hv_utils",
  580. .id_table = id_table,
  581. .probe = util_probe,
  582. .remove = util_remove,
  583. .suspend = util_suspend,
  584. .resume = util_resume,
  585. .driver = {
  586. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  587. },
  588. };
  589. static int hv_ptp_enable(struct ptp_clock_info *info,
  590. struct ptp_clock_request *request, int on)
  591. {
  592. return -EOPNOTSUPP;
  593. }
  594. static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts)
  595. {
  596. return -EOPNOTSUPP;
  597. }
  598. static int hv_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
  599. {
  600. return -EOPNOTSUPP;
  601. }
  602. static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
  603. {
  604. return -EOPNOTSUPP;
  605. }
  606. static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
  607. {
  608. return hv_get_adj_host_time(ts);
  609. }
  610. static struct ptp_clock_info ptp_hyperv_info = {
  611. .name = "hyperv",
  612. .enable = hv_ptp_enable,
  613. .adjtime = hv_ptp_adjtime,
  614. .adjfreq = hv_ptp_adjfreq,
  615. .gettime64 = hv_ptp_gettime,
  616. .settime64 = hv_ptp_settime,
  617. .owner = THIS_MODULE,
  618. };
  619. static struct ptp_clock *hv_ptp_clock;
  620. static int hv_timesync_init(struct hv_util_service *srv)
  621. {
  622. spin_lock_init(&host_ts.lock);
  623. INIT_WORK(&adj_time_work, hv_set_host_time);
  624. /*
  625. * ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is
  626. * disabled but the driver is still useful without the PTP device
  627. * as it still handles the ICTIMESYNCFLAG_SYNC case.
  628. */
  629. hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
  630. if (IS_ERR_OR_NULL(hv_ptp_clock)) {
  631. pr_err("cannot register PTP clock: %d\n",
  632. PTR_ERR_OR_ZERO(hv_ptp_clock));
  633. hv_ptp_clock = NULL;
  634. }
  635. return 0;
  636. }
  637. static void hv_timesync_cancel_work(void)
  638. {
  639. cancel_work_sync(&adj_time_work);
  640. }
  641. static int hv_timesync_pre_suspend(void)
  642. {
  643. hv_timesync_cancel_work();
  644. return 0;
  645. }
  646. static void hv_timesync_deinit(void)
  647. {
  648. if (hv_ptp_clock)
  649. ptp_clock_unregister(hv_ptp_clock);
  650. hv_timesync_cancel_work();
  651. }
  652. static int __init init_hyperv_utils(void)
  653. {
  654. pr_info("Registering HyperV Utility Driver\n");
  655. return vmbus_driver_register(&util_drv);
  656. }
  657. static void exit_hyperv_utils(void)
  658. {
  659. pr_info("De-Registered HyperV Utility Driver\n");
  660. vmbus_driver_unregister(&util_drv);
  661. }
  662. module_init(init_hyperv_utils);
  663. module_exit(exit_hyperv_utils);
  664. MODULE_DESCRIPTION("Hyper-V Utilities");
  665. MODULE_LICENSE("GPL");