|
35 | 35 | #include <sys/utsname.h> |
36 | 36 | #include <fcntl.h> |
37 | 37 | #include <poll.h> |
| 38 | +#include <pthread.h> |
38 | 39 |
|
39 | 40 | /* Linux */ |
40 | 41 | #include <linux/hidraw.h> |
@@ -841,6 +842,11 @@ int HID_API_EXPORT hid_exit(void) |
841 | 842 | return 0; |
842 | 843 | } |
843 | 844 |
|
| 845 | +static int hid_internal_match_device_id(unsigned short vendor_id, unsigned short product_id, unsigned short expected_vendor_id, unsigned short expected_product_id) |
| 846 | +{ |
| 847 | + return (expected_vendor_id == 0x0 || vendor_id == expected_vendor_id) && (expected_product_id == 0x0 || product_id == expected_product_id); |
| 848 | +} |
| 849 | + |
844 | 850 | struct hid_device_info HID_API_EXPORT *hid_enumerate(unsigned short vendor_id, unsigned short product_id) |
845 | 851 | { |
846 | 852 | struct udev *udev; |
@@ -942,26 +948,239 @@ void HID_API_EXPORT hid_free_enumeration(struct hid_device_info *devs) |
942 | 948 | } |
943 | 949 | } |
944 | 950 |
|
| 951 | +struct hid_hotplug_callback { |
| 952 | + hid_hotplug_callback_handle handle; |
| 953 | + unsigned short vendor_id; |
| 954 | + unsigned short product_id; |
| 955 | + hid_hotplug_event events; |
| 956 | + void *user_data; |
| 957 | + hid_hotplug_callback_fn callback; |
| 958 | + |
| 959 | + /* Pointer to the next notification */ |
| 960 | + struct hid_hotplug_callback *next; |
| 961 | +}; |
| 962 | + |
| 963 | +static struct hid_hotplug_context { |
| 964 | + /* UDEV context that handles the monitor */ |
| 965 | + struct udev* udev_ctx; |
| 966 | + |
| 967 | + /* UDEV monitor that receives events */ |
| 968 | + struct udev_monitor* mon; |
| 969 | + |
| 970 | + /* File descriptor for the UDEV monitor that allows to check for new events with select() */ |
| 971 | + int monitor_fd; |
| 972 | + |
| 973 | + /* Thread for the UDEV monitor */ |
| 974 | + pthread_t* thread; |
| 975 | + |
| 976 | + pthread_mutex_t mutex; |
| 977 | + |
| 978 | + /* HIDAPI unique callback handle counter */ |
| 979 | + hid_hotplug_callback_handle next_handle; |
| 980 | + |
| 981 | + /* Linked list of the hotplug callbacks */ |
| 982 | + struct hid_hotplug_callback *hotplug_cbs; |
| 983 | + |
| 984 | + /* Linked list of the device infos (mandatory when the device is disconnected) */ |
| 985 | + struct hid_device_info *devs; |
| 986 | +} hid_hotplug_context = { |
| 987 | + .udev_ctx = NULL, |
| 988 | + .thread = NULL, |
| 989 | + .monitor_fd = -1, |
| 990 | + .next_handle = 1, |
| 991 | + .hotplug_cbs = NULL, |
| 992 | + .devs = NULL |
| 993 | +}; |
| 994 | + |
| 995 | +static void hid_internal_invoke_callbacks(struct hid_device_info* info, hid_hotplug_event event) |
| 996 | +{ |
| 997 | + struct hid_hotplug_callback **current = &hid_hotplug_context.hotplug_cbs; |
| 998 | + while(*current) { |
| 999 | + struct hid_hotplug_callback *callback = *current; |
| 1000 | + if((callback->events & event) && |
| 1001 | + hid_internal_match_device_id(info->vendor_id, info->product_id, callback->vendor_id, callback->product_id)) { |
| 1002 | + int result = callback->callback(callback->handle, info, event, callback->user_data); |
| 1003 | + /* If the result is non-zero, we remove the callback and proceed */ |
| 1004 | + /* Do not use the deregister call as it locks the mutex, and we are currently in a lock */ |
| 1005 | + if(result) { |
| 1006 | + struct hid_hotplug_callback *callback = *current; |
| 1007 | + *current = (*current)->next; |
| 1008 | + free(callback); |
| 1009 | + continue; |
| 1010 | + } |
| 1011 | + } |
| 1012 | + current = &callback->next; |
| 1013 | + } |
| 1014 | +} |
| 1015 | + |
| 1016 | +static void hid_internal_cleanup_hotplugs() |
| 1017 | +{ |
| 1018 | + udev_monitor_unref(hid_hotplug_context.mon); |
| 1019 | + udev_unref(hid_hotplug_context.udev_ctx); |
| 1020 | +} |
| 1021 | + |
| 1022 | +static int match_udev_to_info(struct udev_device* raw_dev, struct hid_device_info *info) { |
| 1023 | + const char* path = udev_device_get_devpath(raw_dev); |
| 1024 | + if(!strcmp(path, info->path)) { |
| 1025 | + return 1; |
| 1026 | + } |
| 1027 | + return 0; |
| 1028 | +} |
| 1029 | + |
| 1030 | +static void* hotplug_thread(void*) |
| 1031 | +{ |
| 1032 | + while(hid_hotplug_context.monitor_fd > 0) |
| 1033 | + { |
| 1034 | + fd_set fds; |
| 1035 | + struct timeval tv; |
| 1036 | + int ret; |
| 1037 | + |
| 1038 | + FD_ZERO(&fds); |
| 1039 | + FD_SET(hid_hotplug_context.monitor_fd, &fds); |
| 1040 | + /* 5 msec timeout seems reasonable; don't set too low to avoid high CPU usage */ |
| 1041 | + /* This timeout only affects how much time it takes to stop the thread */ |
| 1042 | + tv.tv_sec = 0; |
| 1043 | + tv.tv_usec = 5000; |
| 1044 | + |
| 1045 | + ret = select(hid_hotplug_context.monitor_fd+1, &fds, NULL, NULL, &tv); |
| 1046 | + |
| 1047 | + /* Check if our file descriptor has received data. */ |
| 1048 | + if (ret > 0 && FD_ISSET(hid_hotplug_context.monitor_fd, &fds)) { |
| 1049 | + |
| 1050 | + /* Make the call to receive the device. |
| 1051 | + select() ensured that this will not block. */ |
| 1052 | + struct udev_device *raw_dev = udev_monitor_receive_device(hid_hotplug_context.mon); |
| 1053 | + if (raw_dev) { |
| 1054 | + /* Lock the mutex so callback/device lists don't change elsewhere from here on */ |
| 1055 | + pthread_mutex_lock(&hid_hotplug_context.mutex); |
| 1056 | + |
| 1057 | + const char* action = udev_device_get_action(raw_dev); |
| 1058 | + if(!strcmp(action, "add")) { |
| 1059 | + // We create a list of all usages on this UDEV device |
| 1060 | + struct hid_device_info* info = create_device_info_for_device(raw_dev); |
| 1061 | + |
| 1062 | + hid_internal_invoke_callbacks(info, HID_API_HOTPLUG_EVENT_DEVICE_ARRIVED); |
| 1063 | + } |
| 1064 | + else if(!strcmp(action, "remove")) { |
| 1065 | + for (struct hid_device_info** current = &hid_hotplug_context.devs; *current; current = &(*current)->next) { |
| 1066 | + if (match_udev_to_info(raw_dev, *current)) { |
| 1067 | + /* If the libusb device that's left matches this HID device, we detach it from the list */ |
| 1068 | + struct hid_device_info* info = *current; |
| 1069 | + *current = (*current)->next; |
| 1070 | + info->next = NULL; |
| 1071 | + hid_internal_invoke_callbacks(info, HID_API_HOTPLUG_EVENT_DEVICE_LEFT); |
| 1072 | + /* Free every removed device */ |
| 1073 | + free(info); |
| 1074 | + } |
| 1075 | + } |
| 1076 | + } |
| 1077 | + udev_device_unref(raw_dev); |
| 1078 | + pthread_mutex_unlock(&hid_hotplug_context.mutex); |
| 1079 | + } |
| 1080 | + } |
| 1081 | + } |
| 1082 | + return NULL; |
| 1083 | +} |
| 1084 | + |
945 | 1085 | int HID_API_EXPORT HID_API_CALL hid_hotplug_register_callback(unsigned short vendor_id, unsigned short product_id, int events, int flags, hid_hotplug_callback_fn callback, void *user_data, hid_hotplug_callback_handle *callback_handle) |
946 | 1086 | { |
947 | | - /* Stub */ |
948 | | - (void)vendor_id; |
949 | | - (void)product_id; |
950 | | - (void)events; |
951 | | - (void)flags; |
952 | | - (void)callback; |
953 | | - (void)user_data; |
954 | | - (void)callback_handle; |
| 1087 | + struct hid_hotplug_callback* hotplug_cb; |
| 1088 | + |
| 1089 | + /* Check params */ |
| 1090 | + if (events == 0 |
| 1091 | + || (events & ~(HID_API_HOTPLUG_EVENT_DEVICE_ARRIVED | HID_API_HOTPLUG_EVENT_DEVICE_LEFT)) |
| 1092 | + || (flags & ~(HID_API_HOTPLUG_ENUMERATE)) |
| 1093 | + || callback == NULL) { |
| 1094 | + return -1; |
| 1095 | + } |
| 1096 | + |
| 1097 | + hotplug_cb = (struct hid_hotplug_callback*)calloc(1, sizeof(struct hid_hotplug_callback)); |
| 1098 | + |
| 1099 | + if (hotplug_cb == NULL) { |
| 1100 | + return -1; |
| 1101 | + } |
| 1102 | + |
| 1103 | + /* Fill out the record */ |
| 1104 | + hotplug_cb->next = NULL; |
| 1105 | + hotplug_cb->vendor_id = vendor_id; |
| 1106 | + hotplug_cb->product_id = product_id; |
| 1107 | + hotplug_cb->events = events; |
| 1108 | + hotplug_cb->user_data = user_data; |
| 1109 | + hotplug_cb->callback = callback; |
| 1110 | + |
| 1111 | + /* TODO: protect the handle by the context hotplug lock */ |
| 1112 | + hotplug_cb->handle = hid_hotplug_context.next_handle++; |
| 1113 | + |
| 1114 | + /* handle the unlikely case of handle overflow */ |
| 1115 | + if (hid_hotplug_context.next_handle < 0) |
| 1116 | + { |
| 1117 | + hid_hotplug_context.next_handle = 1; |
| 1118 | + } |
| 1119 | + |
| 1120 | + /* Return allocated handle */ |
| 1121 | + if (callback_handle != NULL) { |
| 1122 | + *callback_handle = hotplug_cb->handle; |
| 1123 | + } |
| 1124 | + |
| 1125 | + /* Append a new callback to the end */ |
| 1126 | + if (hid_hotplug_context.hotplug_cbs != NULL) { |
| 1127 | + struct hid_hotplug_callback *last = hid_hotplug_context.hotplug_cbs; |
| 1128 | + while (last->next != NULL) { |
| 1129 | + last = last->next; |
| 1130 | + } |
| 1131 | + last->next = hotplug_cb; |
| 1132 | + } |
| 1133 | + else { |
| 1134 | + // Prepare a UDEV context to run monitoring on |
| 1135 | + hid_hotplug_context.udev_ctx = udev_new(); |
| 1136 | + if(!hid_hotplug_context.udev_ctx) |
| 1137 | + { |
| 1138 | + return -1; |
| 1139 | + } |
| 1140 | + |
| 1141 | + hid_hotplug_context.mon = udev_monitor_new_from_netlink(hid_hotplug_context.udev_ctx, "udev"); |
| 1142 | + udev_monitor_filter_add_match_subsystem_devtype(hid_hotplug_context.mon, "hidraw", NULL); |
| 1143 | + udev_monitor_enable_receiving(hid_hotplug_context.mon); |
| 1144 | + hid_hotplug_context.monitor_fd = udev_monitor_get_fd(hid_hotplug_context.mon); |
| 1145 | + |
| 1146 | + /* After monitoring is all set up, enumerate all devices */ |
| 1147 | + hid_hotplug_context.devs = hid_enumerate(0, 0); |
| 1148 | + |
| 1149 | + /* Don't forget to actually register the callback */ |
| 1150 | + hid_hotplug_context.hotplug_cbs = hotplug_cb; |
| 1151 | + |
| 1152 | + /* Start the thread that will be doing the event scanning */ |
| 1153 | + pthread_create(&hid_hotplug_context.thread, NULL, &hotplug_thread, NULL); |
| 1154 | + } |
955 | 1155 |
|
956 | 1156 | return -1; |
957 | 1157 | } |
958 | 1158 |
|
959 | 1159 | int HID_API_EXPORT HID_API_CALL hid_hotplug_deregister_callback(hid_hotplug_callback_handle callback_handle) |
960 | 1160 | { |
961 | | - /* Stub */ |
962 | | - (void)callback_handle; |
| 1161 | + struct hid_hotplug_callback *hotplug_cb = NULL; |
963 | 1162 |
|
964 | | - return -1; |
| 1163 | + pthread_mutex_lock(&hid_hotplug_context.mutex); |
| 1164 | + |
| 1165 | + if (hid_hotplug_context.hotplug_cbs == NULL) { |
| 1166 | + pthread_mutex_unlock(&hid_hotplug_context.mutex); |
| 1167 | + return -1; |
| 1168 | + } |
| 1169 | + |
| 1170 | + /* Remove this notification */ |
| 1171 | + for (struct hid_hotplug_callback **current = &hid_hotplug_context.hotplug_cbs; *current != NULL; current = &(*current)->next) { |
| 1172 | + if ((*current)->handle == callback_handle) { |
| 1173 | + struct hid_hotplug_callback *next = (*current)->next; |
| 1174 | + hotplug_cb = *current; |
| 1175 | + *current = next; |
| 1176 | + free(hotplug_cb); |
| 1177 | + break; |
| 1178 | + } |
| 1179 | + } |
| 1180 | + |
| 1181 | + hid_internal_cleanup_hotplugs(); |
| 1182 | + |
| 1183 | + pthread_mutex_unlock(&hid_hotplug_context.mutex); |
965 | 1184 | } |
966 | 1185 |
|
967 | 1186 | hid_device * hid_open(unsigned short vendor_id, unsigned short product_id, const wchar_t *serial_number) |
|
0 commit comments