Author: mjg
Date: Tue Sep  1 21:30:01 2020
New Revision: 365089
URL: https://svnweb.freebsd.org/changeset/base/365089

Log:
  vmware: clean up empty lines in .c and .h files

Modified:
  head/sys/dev/vmware/pvscsi/pvscsi.c
  head/sys/dev/vmware/vmci/vmci_defs.h
  head/sys/dev/vmware/vmci/vmci_doorbell.c
  head/sys/dev/vmware/vmci/vmci_driver.c
  head/sys/dev/vmware/vmci/vmci_event.c
  head/sys/dev/vmware/vmci/vmci_hashtable.c
  head/sys/dev/vmware/vmci/vmci_kernel_if.c
  head/sys/dev/vmware/vmci/vmci_queue_pair.c
  head/sys/dev/vmware/vmxnet3/if_vmx.c

Modified: head/sys/dev/vmware/pvscsi/pvscsi.c
==============================================================================
--- head/sys/dev/vmware/pvscsi/pvscsi.c Tue Sep  1 21:29:44 2020        
(r365088)
+++ head/sys/dev/vmware/pvscsi/pvscsi.c Tue Sep  1 21:30:01 2020        
(r365089)
@@ -127,7 +127,6 @@ static int pvscsi_probe(device_t dev);
 static int pvscsi_shutdown(device_t dev);
 static int pvscsi_get_tunable(struct pvscsi_softc *sc, char *name, int value);
 
-
 #ifdef PVSCSI_DEBUG_LOGGING
 static int pvscsi_log_level = 0;
 static SYSCTL_NODE(_hw, OID_AUTO, pvscsi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
@@ -154,11 +153,9 @@ TUNABLE_INT("hw.pvscsi.use_req_call_threshold", &pvscs
 static int pvscsi_max_queue_depth = 0;
 TUNABLE_INT("hw.pvscsi.max_queue_depth", &pvscsi_max_queue_depth);
 
-
 struct pvscsi_sg_list {
        struct pvscsi_sg_element sge[PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT];
 };
-
 
 #define        PVSCSI_ABORT_TIMEOUT    2
 #define        PVSCSI_RESET_TIMEOUT    10

Modified: head/sys/dev/vmware/vmci/vmci_defs.h
==============================================================================
--- head/sys/dev/vmware/vmci/vmci_defs.h        Tue Sep  1 21:29:44 2020        
(r365088)
+++ head/sys/dev/vmware/vmci/vmci_defs.h        Tue Sep  1 21:30:01 2020        
(r365089)
@@ -386,7 +386,6 @@ struct vmci_queue_header {
        volatile uint64_t       consumer_head;  /* Offset in peer queue. */
 };
 
-
 /*
  * If one client of a QueuePair is a 32bit entity, we restrict the QueuePair
  * size to be less than 4GB, and use 32bit atomic operations on the head and

Modified: head/sys/dev/vmware/vmci/vmci_doorbell.c
==============================================================================
--- head/sys/dev/vmware/vmci/vmci_doorbell.c    Tue Sep  1 21:29:44 2020        
(r365088)
+++ head/sys/dev/vmware/vmci/vmci_doorbell.c    Tue Sep  1 21:30:01 2020        
(r365089)
@@ -621,7 +621,6 @@ vmci_doorbell_destroy(struct vmci_handle handle)
 
        result = vmci_doorbell_unlink(handle, entry->is_doorbell);
        if (VMCI_SUCCESS != result) {
-
                /*
                 * The only reason this should fail would be an inconsistency
                 * between guest and hypervisor state, where the guest believes

Modified: head/sys/dev/vmware/vmci/vmci_driver.c
==============================================================================
--- head/sys/dev/vmware/vmci/vmci_driver.c      Tue Sep  1 21:29:44 2020        
(r365088)
+++ head/sys/dev/vmware/vmci/vmci_driver.c      Tue Sep  1 21:30:01 2020        
(r365089)
@@ -281,10 +281,8 @@ vmci_read_datagrams_from_port(vmci_io_handle io_handle
                         */
 
                        if (dg_in_size > remaining_bytes) {
-
                                if (remaining_bytes !=
                                    current_dg_in_buffer_size) {
-
                                        /*
                                         * We move the partial datagram to the
                                         * front and read the reminder of the

Modified: head/sys/dev/vmware/vmci/vmci_event.c
==============================================================================
--- head/sys/dev/vmware/vmci/vmci_event.c       Tue Sep  1 21:29:44 2020        
(r365088)
+++ head/sys/dev/vmware/vmci/vmci_event.c       Tue Sep  1 21:30:01 2020        
(r365089)
@@ -104,7 +104,6 @@ vmci_event_exit(void)
        for (e = 0; e < VMCI_EVENT_MAX; e++) {
                vmci_list_scan_safe(iter, &subscriber_array[e],
                    subscriber_list_item, iter_2) {
-
                        /*
                         * We should never get here because all events should
                         * have been unregistered before we try to unload the

Modified: head/sys/dev/vmware/vmci/vmci_hashtable.c
==============================================================================
--- head/sys/dev/vmware/vmci/vmci_hashtable.c   Tue Sep  1 21:29:44 2020        
(r365088)
+++ head/sys/dev/vmware/vmci/vmci_hashtable.c   Tue Sep  1 21:30:01 2020        
(r365089)
@@ -368,7 +368,6 @@ vmci_hashtable_release_entry_locked(struct vmci_hashta
        entry->ref_count--;
        /* Check if this is last reference and report if so. */
        if (entry->ref_count == 0) {
-
                /*
                 * Remove entry from hash table if not already removed. This
                 * could have happened already because VMCIHashTable_RemoveEntry

Modified: head/sys/dev/vmware/vmci/vmci_kernel_if.c
==============================================================================
--- head/sys/dev/vmware/vmci/vmci_kernel_if.c   Tue Sep  1 21:29:44 2020        
(r365088)
+++ head/sys/dev/vmware/vmci/vmci_kernel_if.c   Tue Sep  1 21:30:01 2020        
(r365089)
@@ -615,7 +615,6 @@ vmci_alloc_ppn_set(void *prod_q, uint64_t num_produce_
                if (sizeof(pfn) >
                    sizeof(*consume_ppns) && pfn != consume_ppns[i])
                        goto ppn_error;
-
        }
 
        ppn_set->num_produce_pages = num_produce_pages;

Modified: head/sys/dev/vmware/vmci/vmci_queue_pair.c
==============================================================================
--- head/sys/dev/vmware/vmci/vmci_queue_pair.c  Tue Sep  1 21:29:44 2020        
(r365088)
+++ head/sys/dev/vmware/vmci/vmci_queue_pair.c  Tue Sep  1 21:30:01 2020        
(r365089)
@@ -838,7 +838,6 @@ vmci_queue_pair_detach_guest_work(struct vmci_handle h
                result = vmci_queue_pair_detach_hypercall(handle);
                if (entry->hibernate_failure) {
                        if (result == VMCI_ERROR_NOT_FOUND) {
-
                                /*
                                 * If a queue pair detach failed when entering
                                 * hibernation, the guest driver and the device
@@ -856,7 +855,6 @@ vmci_queue_pair_detach_guest_work(struct vmci_handle h
                        }
                }
                if (result < VMCI_SUCCESS) {
-
                        /*
                         * We failed to notify a non-local queuepair. That other
                         * queuepair might still be accessing the shared

Modified: head/sys/dev/vmware/vmxnet3/if_vmx.c
==============================================================================
--- head/sys/dev/vmware/vmxnet3/if_vmx.c        Tue Sep  1 21:29:44 2020        
(r365088)
+++ head/sys/dev/vmware/vmxnet3/if_vmx.c        Tue Sep  1 21:30:01 2020        
(r365089)
@@ -76,7 +76,6 @@ __FBSDID("$FreeBSD$");
 #include "opt_inet.h"
 #include "opt_inet6.h"
 
-
 #define VMXNET3_VMWARE_VENDOR_ID       0x15AD
 #define VMXNET3_VMWARE_DEVICE_ID       0x07B0
 
@@ -192,7 +191,6 @@ typedef enum {
 
 static void    vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t);
 
-
 static device_method_t vmxnet3_methods[] = {
        /* Device interface */
        DEVMETHOD(device_register, vmxnet3_register),
@@ -476,7 +474,7 @@ vmxnet3_msix_intr_assign(if_ctx_t ctx, int msix)
 
        sc = iflib_get_softc(ctx);
        scctx = sc->vmx_scctx;
-       
+
        for (i = 0; i < scctx->isc_nrxqsets; i++) {
                snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
 
@@ -737,7 +735,7 @@ vmxnet3_queues_shared_alloc(struct vmxnet3_softc *sc)
        if_softc_ctx_t scctx;
        int size;
        int error;
-       
+
        scctx = sc->vmx_scctx;
 
        /*
@@ -763,7 +761,7 @@ vmxnet3_init_txq(struct vmxnet3_softc *sc, int q)
        struct vmxnet3_comp_ring *txc;
        struct vmxnet3_txring *txr;
        if_softc_ctx_t scctx;
-       
+
        txq = &sc->vmx_txq[q];
        txc = &txq->vxtxq_comp_ring;
        txr = &txq->vxtxq_cmd_ring;
@@ -786,7 +784,7 @@ vmxnet3_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
        int q;
        int error;
        caddr_t kva;
-       
+
        sc = iflib_get_softc(ctx);
 
        /* Allocate the array of transmit queues */
@@ -881,7 +879,7 @@ vmxnet3_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
        int i;
        int error;
        caddr_t kva;
-       
+
        sc = iflib_get_softc(ctx);
        scctx = sc->vmx_scctx;
 
@@ -1208,11 +1206,11 @@ vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
        struct ifnet *ifp;
        struct vmxnet3_driver_shared *ds;
        if_softc_ctx_t scctx;
-       
+
        ifp = sc->vmx_ifp;
        ds = sc->vmx_ds;
        scctx = sc->vmx_scctx;
-       
+
        ds->mtu = ifp->if_mtu;
        ds->ntxqueue = scctx->isc_ntxqsets;
        ds->nrxqueue = scctx->isc_nrxqsets;
@@ -1416,7 +1414,7 @@ vmxnet3_isc_txd_credits_update(void *vsc, uint16_t txq
        struct vmxnet3_txcompdesc *txcd;
        struct vmxnet3_txring *txr;
        int processed;
-       
+
        sc = vsc;
        txq = &sc->vmx_txq[txqid];
        txc = &txq->vxtxq_comp_ring;
@@ -1493,7 +1491,7 @@ vmxnet3_isc_rxd_available(void *vsc, uint16_t rxqid, q
                        completed_gen ^= 1;
                }
        }
-       
+
        return (avail);
 }
 
@@ -1734,7 +1732,7 @@ vmxnet3_isc_rxd_flush(void *vsc, uint16_t rxqid, uint8
        struct vmxnet3_rxqueue *rxq;
        struct vmxnet3_rxring *rxr;
        bus_size_t r;
-       
+
        sc = vsc;
        rxq = &sc->vmx_rxq[rxqid];
        rxr = &rxq->vxrxq_cmd_ring[flid];
@@ -1753,7 +1751,7 @@ vmxnet3_legacy_intr(void *xsc)
        struct vmxnet3_softc *sc;
        if_softc_ctx_t scctx;
        if_ctx_t ctx;
-       
+
        sc = xsc;
        scctx = sc->vmx_scctx;
        ctx = sc->vmx_ctx;
@@ -1834,7 +1832,7 @@ vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet
        struct vmxnet3_comp_ring *txc;
 
        txq->vxtxq_last_flush = -1;
-       
+
        txr = &txq->vxtxq_cmd_ring;
        txr->vxtxr_next = 0;
        txr->vxtxr_gen = VMXNET3_INIT_GEN;
@@ -1953,7 +1951,7 @@ static void
 vmxnet3_init(if_ctx_t ctx)
 {
        struct vmxnet3_softc *sc;
-       
+
        sc = iflib_get_softc(ctx);
 
        /* Use the current MAC address. */
@@ -2182,7 +2180,7 @@ vmxnet3_link_status(struct vmxnet3_softc *sc)
        ctx = sc->vmx_ctx;
        link = vmxnet3_link_is_up(sc);
        speed = IF_Gbps(10);
-       
+
        if (link != 0 && sc->vmx_link_active == 0) {
                sc->vmx_link_active = 1;
                iflib_link_state_change(ctx, LINK_STATE_UP, speed);
@@ -2320,7 +2318,7 @@ vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc,
        int i;
 
        scctx = sc->vmx_scctx;
-       
+
        for (i = 0; i < scctx->isc_ntxqsets; i++) {
                struct vmxnet3_txqueue *txq = &sc->vmx_txq[i];
 
@@ -2380,7 +2378,7 @@ vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc,
        int i;
 
        scctx = sc->vmx_scctx;
-       
+
        for (i = 0; i < scctx->isc_ntxqsets; i++)
                vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child);
        for (i = 0; i < scctx->isc_nrxqsets; i++)
_______________________________________________
[email protected] mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "[email protected]"

Reply via email to