Merge tag 'char-misc-4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here is the big set of new char/misc driver drivers and features for 4.12-rc1. There's lots of new drivers added this time around, new firmware drivers from Google, more auxdisplay drivers, extcon drivers, fpga drivers, and a bunch of other driver updates. Nothing major, except if you happen to have the hardware for these drivers, and then you will be happy :) All of these have been in linux-next for a while with no reported issues" * tag 'char-misc-4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (136 commits) firmware: google memconsole: Fix return value check in platform_memconsole_init() firmware: Google VPD: Fix return value check in vpd_platform_init() goldfish_pipe: fix build warning about using too much stack. goldfish_pipe: An implementation of more parallel pipe fpga fr br: update supported version numbers fpga: region: release FPGA region reference in error path fpga altera-hps2fpga: disable/unprepare clock on error in alt_fpga_bridge_probe() mei: drop the TODO from samples firmware: Google VPD sysfs driver firmware: Google VPD: import lib_vpd source files misc: lkdtm: Add volatile to intentional NULL pointer reference eeprom: idt_89hpesx: Add OF device ID table misc: ds1682: Add OF device ID table misc: tsl2550: Add OF device ID table w1: Remove unneeded use of assert() and remove w1_log.h w1: Use kernel common min() implementation uio_mf624: Align memory regions to page size and set correct offsets uio_mf624: Refactor memory info initialization uio: Allow handling of non page-aligned memory regions hangcheck-timer: Fix typo in comment ...
This commit is contained in:
@@ -333,7 +333,7 @@ static int create_gpadl_header(void *kbuffer, u32 size,
|
||||
* Gpadl is u32 and we are using a pointer which could
|
||||
* be 64-bit
|
||||
* This is governed by the guest/host protocol and
|
||||
* so the hypervisor gurantees that this is ok.
|
||||
* so the hypervisor guarantees that this is ok.
|
||||
*/
|
||||
for (i = 0; i < pfncurr; i++)
|
||||
gpadl_body->pfn[i] = slow_virt_to_phys(
|
||||
@@ -380,7 +380,7 @@ nomem:
|
||||
}
|
||||
|
||||
/*
|
||||
* vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
|
||||
* vmbus_establish_gpadl - Establish a GPADL for the specified buffer
|
||||
*
|
||||
* @channel: a channel
|
||||
* @kbuffer: from kmalloc or vmalloc
|
||||
@@ -731,7 +731,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
|
||||
/* Setup the descriptor */
|
||||
desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
|
||||
desc.flags = flags;
|
||||
desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
|
||||
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
|
||||
desc.length8 = (u16)(packetlen_aligned >> 3);
|
||||
desc.transactionid = requestid;
|
||||
desc.rangecount = pagecount;
|
||||
@@ -792,7 +792,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
|
||||
/* Setup the descriptor */
|
||||
desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
|
||||
desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
|
||||
desc->dataoffset8 = desc_size >> 3; /* in 8-bytes grandularity */
|
||||
desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
|
||||
desc->length8 = (u16)(packetlen_aligned >> 3);
|
||||
desc->transactionid = requestid;
|
||||
desc->rangecount = 1;
|
||||
@@ -842,7 +842,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
|
||||
/* Setup the descriptor */
|
||||
desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
|
||||
desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
|
||||
desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
|
||||
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
|
||||
desc.length8 = (u16)(packetlen_aligned >> 3);
|
||||
desc.transactionid = requestid;
|
||||
desc.rangecount = 1;
|
||||
|
@@ -1080,30 +1080,30 @@ static void vmbus_onversion_response(
|
||||
}
|
||||
|
||||
/* Channel message dispatch table */
|
||||
struct vmbus_channel_message_table_entry
|
||||
channel_message_table[CHANNELMSG_COUNT] = {
|
||||
{CHANNELMSG_INVALID, 0, NULL},
|
||||
{CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer},
|
||||
{CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind},
|
||||
{CHANNELMSG_REQUESTOFFERS, 0, NULL},
|
||||
{CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered},
|
||||
{CHANNELMSG_OPENCHANNEL, 0, NULL},
|
||||
{CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result},
|
||||
{CHANNELMSG_CLOSECHANNEL, 0, NULL},
|
||||
{CHANNELMSG_GPADL_HEADER, 0, NULL},
|
||||
{CHANNELMSG_GPADL_BODY, 0, NULL},
|
||||
{CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created},
|
||||
{CHANNELMSG_GPADL_TEARDOWN, 0, NULL},
|
||||
{CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown},
|
||||
{CHANNELMSG_RELID_RELEASED, 0, NULL},
|
||||
{CHANNELMSG_INITIATE_CONTACT, 0, NULL},
|
||||
{CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response},
|
||||
{CHANNELMSG_UNLOAD, 0, NULL},
|
||||
{CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response},
|
||||
{CHANNELMSG_18, 0, NULL},
|
||||
{CHANNELMSG_19, 0, NULL},
|
||||
{CHANNELMSG_20, 0, NULL},
|
||||
{CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL},
|
||||
const struct vmbus_channel_message_table_entry
|
||||
channel_message_table[CHANNELMSG_COUNT] = {
|
||||
{ CHANNELMSG_INVALID, 0, NULL },
|
||||
{ CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer },
|
||||
{ CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind },
|
||||
{ CHANNELMSG_REQUESTOFFERS, 0, NULL },
|
||||
{ CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered },
|
||||
{ CHANNELMSG_OPENCHANNEL, 0, NULL },
|
||||
{ CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result },
|
||||
{ CHANNELMSG_CLOSECHANNEL, 0, NULL },
|
||||
{ CHANNELMSG_GPADL_HEADER, 0, NULL },
|
||||
{ CHANNELMSG_GPADL_BODY, 0, NULL },
|
||||
{ CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created },
|
||||
{ CHANNELMSG_GPADL_TEARDOWN, 0, NULL },
|
||||
{ CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown },
|
||||
{ CHANNELMSG_RELID_RELEASED, 0, NULL },
|
||||
{ CHANNELMSG_INITIATE_CONTACT, 0, NULL },
|
||||
{ CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response },
|
||||
{ CHANNELMSG_UNLOAD, 0, NULL },
|
||||
{ CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response },
|
||||
{ CHANNELMSG_18, 0, NULL },
|
||||
{ CHANNELMSG_19, 0, NULL },
|
||||
{ CHANNELMSG_20, 0, NULL },
|
||||
{ CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL },
|
||||
};
|
||||
|
||||
/*
|
||||
|
@@ -296,44 +296,47 @@ struct vmbus_channel *relid2channel(u32 relid)
|
||||
|
||||
/*
|
||||
* vmbus_on_event - Process a channel event notification
|
||||
*
|
||||
* For batched channels (default) optimize host to guest signaling
|
||||
* by ensuring:
|
||||
* 1. While reading the channel, we disable interrupts from host.
|
||||
* 2. Ensure that we process all posted messages from the host
|
||||
* before returning from this callback.
|
||||
* 3. Once we return, enable signaling from the host. Once this
|
||||
* state is set we check to see if additional packets are
|
||||
* available to read. In this case we repeat the process.
|
||||
* If this tasklet has been running for a long time
|
||||
* then reschedule ourselves.
|
||||
*/
|
||||
void vmbus_on_event(unsigned long data)
|
||||
{
|
||||
struct vmbus_channel *channel = (void *) data;
|
||||
void (*callback_fn)(void *);
|
||||
unsigned long time_limit = jiffies + 2;
|
||||
|
||||
/*
|
||||
* A channel once created is persistent even when there
|
||||
* is no driver handling the device. An unloading driver
|
||||
* sets the onchannel_callback to NULL on the same CPU
|
||||
* as where this interrupt is handled (in an interrupt context).
|
||||
* Thus, checking and invoking the driver specific callback takes
|
||||
* care of orderly unloading of the driver.
|
||||
*/
|
||||
callback_fn = READ_ONCE(channel->onchannel_callback);
|
||||
if (unlikely(callback_fn == NULL))
|
||||
return;
|
||||
do {
|
||||
void (*callback_fn)(void *);
|
||||
|
||||
(*callback_fn)(channel->channel_callback_context);
|
||||
|
||||
if (channel->callback_mode == HV_CALL_BATCHED) {
|
||||
/*
|
||||
* This callback reads the messages sent by the host.
|
||||
* We can optimize host to guest signaling by ensuring:
|
||||
* 1. While reading the channel, we disable interrupts from
|
||||
* host.
|
||||
* 2. Ensure that we process all posted messages from the host
|
||||
* before returning from this callback.
|
||||
* 3. Once we return, enable signaling from the host. Once this
|
||||
* state is set we check to see if additional packets are
|
||||
* available to read. In this case we repeat the process.
|
||||
/* A channel once created is persistent even when
|
||||
* there is no driver handling the device. An
|
||||
* unloading driver sets the onchannel_callback to NULL.
|
||||
*/
|
||||
if (hv_end_read(&channel->inbound) != 0) {
|
||||
hv_begin_read(&channel->inbound);
|
||||
callback_fn = READ_ONCE(channel->onchannel_callback);
|
||||
if (unlikely(callback_fn == NULL))
|
||||
return;
|
||||
|
||||
tasklet_schedule(&channel->callback_event);
|
||||
}
|
||||
}
|
||||
(*callback_fn)(channel->channel_callback_context);
|
||||
|
||||
if (channel->callback_mode != HV_CALL_BATCHED)
|
||||
return;
|
||||
|
||||
if (likely(hv_end_read(&channel->inbound) == 0))
|
||||
return;
|
||||
|
||||
hv_begin_read(&channel->inbound);
|
||||
} while (likely(time_before(jiffies, time_limit)));
|
||||
|
||||
/* The time limit (2 jiffies) has been reached */
|
||||
tasklet_schedule(&channel->callback_event);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -254,7 +254,10 @@ int hv_synic_init(unsigned int cpu)
|
||||
shared_sint.as_uint64 = 0;
|
||||
shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
|
||||
shared_sint.masked = false;
|
||||
shared_sint.auto_eoi = true;
|
||||
if (ms_hyperv.hints & HV_X64_DEPRECATING_AEOI_RECOMMENDED)
|
||||
shared_sint.auto_eoi = false;
|
||||
else
|
||||
shared_sint.auto_eoi = true;
|
||||
|
||||
hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
|
||||
shared_sint.as_uint64);
|
||||
|
@@ -722,8 +722,6 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
|
||||
5*HZ);
|
||||
post_status(&dm_device);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void hv_online_page(struct page *pg)
|
||||
|
@@ -186,8 +186,6 @@ static void fcopy_send_data(struct work_struct *dummy)
|
||||
}
|
||||
}
|
||||
kfree(smsg_out);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -69,7 +69,7 @@ static const int fw_versions[] = {
|
||||
*
|
||||
* While the request/response protocol is guaranteed by the host, we further
|
||||
* ensure this by serializing packet processing in this driver - we do not
|
||||
* read additional packets from the VMBUs until the current packet is fully
|
||||
* read additional packets from the VMBUS until the current packet is fully
|
||||
* handled.
|
||||
*/
|
||||
|
||||
@@ -397,7 +397,7 @@ kvp_send_key(struct work_struct *dummy)
|
||||
* the max lengths specified. We will however, reserve room
|
||||
* for the string terminating character - in the utf16s_utf8s()
|
||||
* function we limit the size of the buffer where the converted
|
||||
* string is placed to HV_KVP_EXCHANGE_MAX_*_SIZE -1 to gaurantee
|
||||
* string is placed to HV_KVP_EXCHANGE_MAX_*_SIZE -1 to guarantee
|
||||
* that the strings can be properly terminated!
|
||||
*/
|
||||
|
||||
@@ -483,8 +483,6 @@ kvp_send_key(struct work_struct *dummy)
|
||||
}
|
||||
|
||||
kfree(message);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -533,7 +531,7 @@ kvp_respond_to_host(struct hv_kvp_msg *msg_to_host, int error)
|
||||
*/
|
||||
if (error) {
|
||||
/*
|
||||
* Something failed or we have timedout;
|
||||
* Something failed or we have timed out;
|
||||
* terminate the current host-side iteration.
|
||||
*/
|
||||
goto response_done;
|
||||
@@ -607,8 +605,8 @@ response_done:
|
||||
* This callback is invoked when we get a KVP message from the host.
|
||||
* The host ensures that only one KVP transaction can be active at a time.
|
||||
* KVP implementation in Linux needs to forward the key to a user-mde
|
||||
* component to retrive the corresponding value. Consequently, we cannot
|
||||
* respond to the host in the conext of this callback. Since the host
|
||||
* component to retrieve the corresponding value. Consequently, we cannot
|
||||
* respond to the host in the context of this callback. Since the host
|
||||
* guarantees that at most only one transaction can be active at a time,
|
||||
* we stash away the transaction state in a set of global variables.
|
||||
*/
|
||||
|
@@ -212,8 +212,6 @@ static void vss_send_op(void)
|
||||
}
|
||||
|
||||
kfree(vss_msg);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void vss_handle_request(struct work_struct *dummy)
|
||||
|
@@ -218,8 +218,8 @@ struct hv_per_cpu_context {
|
||||
|
||||
struct hv_context {
|
||||
/* We only support running on top of Hyper-V
|
||||
* So at this point this really can only contain the Hyper-V ID
|
||||
*/
|
||||
* So at this point this really can only contain the Hyper-V ID
|
||||
*/
|
||||
u64 guestid;
|
||||
|
||||
void *tsc_page;
|
||||
@@ -248,14 +248,6 @@ struct hv_context {
|
||||
|
||||
extern struct hv_context hv_context;
|
||||
|
||||
struct hv_ring_buffer_debug_info {
|
||||
u32 current_interrupt_mask;
|
||||
u32 current_read_index;
|
||||
u32 current_write_index;
|
||||
u32 bytes_avail_toread;
|
||||
u32 bytes_avail_towrite;
|
||||
};
|
||||
|
||||
/* Hv Interface */
|
||||
|
||||
extern int hv_init(void);
|
||||
@@ -289,9 +281,6 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
|
||||
void *buffer, u32 buflen, u32 *buffer_actual_len,
|
||||
u64 *requestid, bool raw);
|
||||
|
||||
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info);
|
||||
|
||||
/*
|
||||
* Maximum channels is determined by the size of the interrupt page
|
||||
* which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt
|
||||
@@ -376,7 +365,7 @@ struct vmbus_channel_message_table_entry {
|
||||
void (*message_handler)(struct vmbus_channel_message_header *msg);
|
||||
};
|
||||
|
||||
extern struct vmbus_channel_message_table_entry
|
||||
extern const struct vmbus_channel_message_table_entry
|
||||
channel_message_table[CHANNELMSG_COUNT];
|
||||
|
||||
|
||||
@@ -403,17 +392,17 @@ int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep);
|
||||
void vmbus_on_event(unsigned long data);
|
||||
void vmbus_on_msg_dpc(unsigned long data);
|
||||
|
||||
int hv_kvp_init(struct hv_util_service *);
|
||||
int hv_kvp_init(struct hv_util_service *srv);
|
||||
void hv_kvp_deinit(void);
|
||||
void hv_kvp_onchannelcallback(void *);
|
||||
void hv_kvp_onchannelcallback(void *context);
|
||||
|
||||
int hv_vss_init(struct hv_util_service *);
|
||||
int hv_vss_init(struct hv_util_service *srv);
|
||||
void hv_vss_deinit(void);
|
||||
void hv_vss_onchannelcallback(void *);
|
||||
void hv_vss_onchannelcallback(void *context);
|
||||
|
||||
int hv_fcopy_init(struct hv_util_service *);
|
||||
int hv_fcopy_init(struct hv_util_service *srv);
|
||||
void hv_fcopy_deinit(void);
|
||||
void hv_fcopy_onchannelcallback(void *);
|
||||
void hv_fcopy_onchannelcallback(void *context);
|
||||
void vmbus_initiate_unload(bool crash);
|
||||
|
||||
static inline void hv_poll_channel(struct vmbus_channel *channel,
|
||||
|
@@ -75,8 +75,6 @@ static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
|
||||
*/
|
||||
if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
|
||||
vmbus_setevent(channel);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Get the next write location for the specified ring buffer. */
|
||||
@@ -210,6 +208,7 @@ void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
ring_info->ring_buffer->interrupt_mask;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
|
||||
|
||||
/* Initialize the ring buffer. */
|
||||
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
|
||||
@@ -269,14 +268,13 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
|
||||
int hv_ringbuffer_write(struct vmbus_channel *channel,
|
||||
const struct kvec *kv_list, u32 kv_count)
|
||||
{
|
||||
int i = 0;
|
||||
int i;
|
||||
u32 bytes_avail_towrite;
|
||||
u32 totalbytes_towrite = 0;
|
||||
|
||||
u32 totalbytes_towrite = sizeof(u64);
|
||||
u32 next_write_location;
|
||||
u32 old_write;
|
||||
u64 prev_indices = 0;
|
||||
unsigned long flags = 0;
|
||||
u64 prev_indices;
|
||||
unsigned long flags;
|
||||
struct hv_ring_buffer_info *outring_info = &channel->outbound;
|
||||
|
||||
if (channel->rescind)
|
||||
@@ -285,8 +283,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
|
||||
for (i = 0; i < kv_count; i++)
|
||||
totalbytes_towrite += kv_list[i].iov_len;
|
||||
|
||||
totalbytes_towrite += sizeof(u64);
|
||||
|
||||
spin_lock_irqsave(&outring_info->ring_lock, flags);
|
||||
|
||||
bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
|
||||
@@ -349,18 +345,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
|
||||
u64 *requestid, bool raw)
|
||||
{
|
||||
u32 bytes_avail_toread;
|
||||
u32 next_read_location = 0;
|
||||
u32 next_read_location;
|
||||
u64 prev_indices = 0;
|
||||
struct vmpacket_descriptor desc;
|
||||
u32 offset;
|
||||
u32 packetlen;
|
||||
int ret = 0;
|
||||
struct hv_ring_buffer_info *inring_info = &channel->inbound;
|
||||
|
||||
if (buflen <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
*buffer_actual_len = 0;
|
||||
*requestid = 0;
|
||||
|
||||
@@ -371,7 +365,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
|
||||
* No error is set when there is even no header, drivers are
|
||||
* supposed to analyze buffer_actual_len.
|
||||
*/
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
init_cached_read_index(inring_info);
|
||||
@@ -417,7 +411,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
|
||||
|
||||
hv_signal_on_read(channel);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -787,8 +787,6 @@ static void vmbus_shutdown(struct device *child_device)
|
||||
|
||||
if (drv->shutdown)
|
||||
drv->shutdown(dev);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -855,7 +853,7 @@ void vmbus_on_msg_dpc(unsigned long data)
|
||||
struct hv_message *msg = (struct hv_message *)page_addr +
|
||||
VMBUS_MESSAGE_SINT;
|
||||
struct vmbus_channel_message_header *hdr;
|
||||
struct vmbus_channel_message_table_entry *entry;
|
||||
const struct vmbus_channel_message_table_entry *entry;
|
||||
struct onmessage_work_context *ctx;
|
||||
u32 message_type = msg->header.message_type;
|
||||
|
||||
|
Reference in New Issue
Block a user