Hi,

> On 01/10/2018 05:17 PM, Andreas Beckmann wrote:
>> while packaging libpfm4 4.9.0 for Debian, Lintian again complained about
>> a few typos, a patch is attached.

Respun the patch against current git HEAD to fix more typos and
normalize some weird spacing in the descriptions.

On 2018-01-11 22:44, William Cohen wrote:
> The patch to correct the typos in the descriptions looks reasonable.  Doing 
> some searching on the web about the "*(0x182)" it appears "(0x1" and "(0x " 
> are extraneous and should just have "DRD (0x182)" at the end:

Fixed these, too, as well as some more duplicate substrings.


Andreas
From 9ccea352d190789d9e823b9e854ab891087bc900 Mon Sep 17 00:00:00 2001
From: Andreas Beckmann <a.beckm...@fz-juelich.de>
Date: Sun, 22 Apr 2018 12:56:02 +0200
Subject: [PATCH] fix typos and normalize spacing

most typos were found by Lintian

Signed-off-by: Andreas Beckmann <a.beckm...@fz-juelich.de>
---
 lib/events/amd64_events_fam16h.h        |  2 +-
 lib/events/intel_bdx_unc_cbo_events.h   |  8 ++++----
 lib/events/intel_bdx_unc_ha_events.h    | 12 ++++++------
 lib/events/intel_bdx_unc_imc_events.h   |  2 +-
 lib/events/intel_bdx_unc_pcu_events.h   |  4 ++--
 lib/events/intel_bdx_unc_qpi_events.h   | 14 +++++++-------
 lib/events/intel_bdx_unc_r3qpi_events.h |  2 +-
 lib/events/intel_skx_unc_cha_events.h   | 10 +++++-----
 lib/events/intel_skx_unc_imc_events.h   |  2 +-
 lib/events/intel_skx_unc_m3upi_events.h | 12 ++++++------
 lib/events/intel_skx_unc_pcu_events.h   |  2 +-
 lib/events/intel_skx_unc_upi_events.h   |  4 ++--
 lib/events/power5+_events.h             |  8 ++++----
 lib/events/power5_events.h              |  8 ++++----
 lib/events/power6_events.h              |  4 ++--
 lib/events/power7_events.h              | 16 ++++++++--------
 16 files changed, 55 insertions(+), 55 deletions(-)

diff --git a/lib/events/amd64_events_fam16h.h b/lib/events/amd64_events_fam16h.h
index 2eab1dc..b92e728 100644
--- a/lib/events/amd64_events_fam16h.h
+++ b/lib/events/amd64_events_fam16h.h
@@ -675,7 +675,7 @@ static const amd64_umask_t amd64_fam16h_cache_cross_invalidates[]={
      .ucode = 0x4,
    },
    { .uname  = "IC_INVALIDATES_DC_DIRTY",
-     .udesc  = "Exection of modified instruction or data too close to code",
+     .udesc  = "Execution of modified instruction or data too close to code",
      .ucode = 0x8,
    },
    { .uname  = "IC_HITS_DC_CLEAN_LINE",
diff --git a/lib/events/intel_bdx_unc_cbo_events.h b/lib/events/intel_bdx_unc_cbo_events.h
index c359821..28e1faf 100644
--- a/lib/events/intel_bdx_unc_cbo_events.h
+++ b/lib/events/intel_bdx_unc_cbo_events.h
@@ -936,7 +936,7 @@ static intel_x86_entry_t intel_bdx_unc_c_pe[]={
   },
   { .name   = "UNC_C_COUNTER0_OCCUPANCY",
     .code   = 0x1f,
-    .desc   = "Since occupancy counts can only be captured in the Cbos 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect.   E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entryy.",
+    .desc   = "Since occupancy counts can only be captured in the Cbos 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect.  E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
     .modmsk = BDX_UNC_CBO_ATTRS,
     .cntmsk = 0xf,
   },
@@ -948,7 +948,7 @@ static intel_x86_entry_t intel_bdx_unc_c_pe[]={
   },
   { .name   = "UNC_C_LLC_LOOKUP",
     .code   = 0x34,
-    .desc   = "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2.  This has numerous filters available.  Note the non-standard filtering equation.  This event will count requests that lookup the cache multiple times with multiple increments.  One must ALWAYS set umask bit 0 and select a state or states to match.  Otherwise, the event will count nothing.   CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
+    .desc   = "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2.  This has numerous filters available.  Note the non-standard filtering equation.  This event will count requests that lookup the cache multiple times with multiple increments.  One must ALWAYS set umask bit 0 and select a state or states to match.  Otherwise, the event will count nothing.  CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
     .modmsk = BDX_UNC_CBO_NID_ATTRS,
     .flags  = INTEL_X86_NO_AUTOENCODE,
     .cntmsk = 0xf,
@@ -1127,7 +1127,7 @@ static intel_x86_entry_t intel_bdx_unc_c_pe[]={
   },
   { .name   = "UNC_C_TOR_INSERTS",
     .code   = 0x35,
-    .desc   = "Counts the number of entries successfuly inserted into the TOR that match  qualifications specified by the subevent.  There are a number of subevent filters but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc  to DRD (0x1(0x182).",
+    .desc   = "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.  There are a number of subevent filters but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
     .modmsk = BDX_UNC_CBO_NID_ATTRS | _SNBEP_UNC_ATTR_ISOC | _SNBEP_UNC_ATTR_NC,
     .flags  = INTEL_X86_NO_AUTOENCODE,
     .cntmsk = 0xf,
@@ -1137,7 +1137,7 @@ static intel_x86_entry_t intel_bdx_unc_c_pe[]={
   },
   { .name   = "UNC_C_TOR_OCCUPANCY",
     .code   = 0x36,
-    .desc   = "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.   There are a number of subevent filters but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x (0x182)",
+    .desc   = "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.  There are a number of subevent filters but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
     .modmsk = BDX_UNC_CBO_NID_ATTRS | _SNBEP_UNC_ATTR_ISOC | _SNBEP_UNC_ATTR_NC,
     .flags  = INTEL_X86_NO_AUTOENCODE,
     .cntmsk = 0x1,
diff --git a/lib/events/intel_bdx_unc_ha_events.h b/lib/events/intel_bdx_unc_ha_events.h
index a4ab858..764e8f4 100644
--- a/lib/events/intel_bdx_unc_ha_events.h
+++ b/lib/events/intel_bdx_unc_ha_events.h
@@ -1014,7 +1014,7 @@ static intel_x86_entry_t intel_bdx_unc_h_pe[]={
   },
   { .name   = "UNC_H_SNOOP_OCCUPANCY",
     .code   = 0x9,
-    .desc   = "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle.    This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency.  HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have retureturned.",
+    .desc   = "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle.  This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency.  HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.",
     .modmsk = BDX_UNC_HA_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
@@ -1023,7 +1023,7 @@ static intel_x86_entry_t intel_bdx_unc_h_pe[]={
   },
   { .name   = "UNC_H_SNOOP_RESP",
     .code   = 0x21,
-    .desc   = "Counts the total number of RspI snoop responses received.  Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system.   In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received.  For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.",
+    .desc   = "Counts the total number of RspI snoop responses received.  Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system.  In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received.  For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.",
     .modmsk = BDX_UNC_HA_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
@@ -1032,7 +1032,7 @@ static intel_x86_entry_t intel_bdx_unc_h_pe[]={
   },
   { .name   = "UNC_H_SNP_RESP_RECV_LOCAL",
     .code   = 0x60,
-    .desc   = "Number of snoop responses received for a Local  request",
+    .desc   = "Number of snoop responses received for a Local request",
     .modmsk = BDX_UNC_HA_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
@@ -1050,7 +1050,7 @@ static intel_x86_entry_t intel_bdx_unc_h_pe[]={
   },
   { .name   = "UNC_H_TAD_REQUESTS_G0",
     .code   = 0x1b,
-    .desc   = "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 0 to 7.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save powewer.",
+    .desc   = "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 0 to 7.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.",
     .modmsk = BDX_UNC_HA_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
@@ -1059,7 +1059,7 @@ static intel_x86_entry_t intel_bdx_unc_h_pe[]={
   },
   { .name   = "UNC_H_TAD_REQUESTS_G1",
     .code   = 0x1c,
-    .desc   = "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 8 to 10.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save powewer.",
+    .desc   = "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 8 to 10.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.",
     .modmsk = BDX_UNC_HA_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
@@ -1122,7 +1122,7 @@ static intel_x86_entry_t intel_bdx_unc_h_pe[]={
   },
   { .name   = "UNC_H_TXR_BL",
     .code   = 0x10,
-    .desc   = "Counts the number of DRS messages sent out on the BL ring.   This can be filtered by the destination.",
+    .desc   = "Counts the number of DRS messages sent out on the BL ring.  This can be filtered by the destination.",
     .modmsk = BDX_UNC_HA_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
diff --git a/lib/events/intel_bdx_unc_imc_events.h b/lib/events/intel_bdx_unc_imc_events.h
index e6406d9..e3850b6 100644
--- a/lib/events/intel_bdx_unc_imc_events.h
+++ b/lib/events/intel_bdx_unc_imc_events.h
@@ -429,7 +429,7 @@ static intel_x86_entry_t intel_bdx_unc_m_pe[]={
   },
   { .name   = "UNC_M_MAJOR_MODES",
     .code   = 0x7,
-    .desc   = "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel.   Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
+    .desc   = "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel.  Major modes are channel-wide, and not a per-rank (or dimm or bank) mode.",
     .modmsk = BDX_UNC_IMC_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
diff --git a/lib/events/intel_bdx_unc_pcu_events.h b/lib/events/intel_bdx_unc_pcu_events.h
index 24b0bd5..0d67e09 100644
--- a/lib/events/intel_bdx_unc_pcu_events.h
+++ b/lib/events/intel_bdx_unc_pcu_events.h
@@ -316,7 +316,7 @@ static intel_x86_entry_t intel_bdx_unc_p_pe[]={
   },
   { .name   = "UNC_P_PROCHOT_INTERNAL_CYCLES",
     .code   = 0x9,
-    .desc   = "Counts the number of cycles that we are in Interal PROCHOT mode.  This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+    .desc   = "Counts the number of cycles that we are in internal PROCHOT mode.  This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
     .modmsk = BDX_UNC_PCU_ATTRS,
     .cntmsk = 0xf,
   },
@@ -346,7 +346,7 @@ static intel_x86_entry_t intel_bdx_unc_p_pe[]={
   },
   { .name   = "UNC_P_UFS_TRANSITIONS_NO_CHANGE",
     .code   = 0x79,
-    .desc   = "Ring GV with same final and inital frequency",
+    .desc   = "Ring GV with same final and initial frequency",
     .modmsk = BDX_UNC_PCU_ATTRS,
     .cntmsk = 0xf,
   },
diff --git a/lib/events/intel_bdx_unc_qpi_events.h b/lib/events/intel_bdx_unc_qpi_events.h
index 18c010a..a4d1747 100644
--- a/lib/events/intel_bdx_unc_qpi_events.h
+++ b/lib/events/intel_bdx_unc_qpi_events.h
@@ -304,7 +304,7 @@ static intel_x86_entry_t intel_bdx_unc_q_pe[]={
   },
   { .name   = "UNC_Q_DIRECT2CORE",
     .code   = 0x13,
-    .desc   = "Counts the number of DRS packets that we attempted to do direct2core on.  There are 4 mutually exlusive filters.  Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases.  Note that this does not count packets that are not candidates for Direct2Core.  The only candidates for Direct2Core are DRS packets destined for Cbos.",
+    .desc   = "Counts the number of DRS packets that we attempted to do direct2core on.  There are 4 mutually exclusive filters.  Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases.  Note that this does not count packets that are not candidates for Direct2Core.  The only candidates for Direct2Core are DRS packets destined for Cbos.",
     .modmsk = BDX_UNC_QPI_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
@@ -331,7 +331,7 @@ static intel_x86_entry_t intel_bdx_unc_q_pe[]={
   },
   { .name   = "UNC_Q_RXL_BYPASSED",
     .code   = 0x9,
-    .desc   = "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress.  This is a latency optimization, and should generally be the common case.  If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+    .desc   = "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress.  This is a latency optimization, and should generally be the common case.  If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
     .modmsk = BDX_UNC_QPI_ATTRS,
     .cntmsk = 0xf,
   },
@@ -376,7 +376,7 @@ static intel_x86_entry_t intel_bdx_unc_q_pe[]={
   },
   { .name   = "UNC_Q_RXL_FLITS_G1",
     .code   = 0x2 | (1 << 21), /* extra ev_sel_ext bit set */
-    .desc   = "Counts the number of flits received from the QPI Link.  This is one of three groups that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each flit is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits.  Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as data bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information.  To calculate data bandwidth, one should therefore do: datld therefore do: data flits * 8B / time.",
+    .desc   = "Counts the number of flits received from the QPI Link.  This is one of three groups that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each flit is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data).  In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits.  Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as data bandwidth.  For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information.  To calculate data bandwidth, one should therefore do: data flits * 8B / time.",
     .modmsk = BDX_UNC_QPI_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
@@ -385,7 +385,7 @@ static intel_x86_entry_t intel_bdx_unc_q_pe[]={
   },
   { .name   = "UNC_Q_RXL_FLITS_G2",
     .code   = 0x3 | (1 << 21), /* extra ev_sel_ext bit set */
-    .desc   = "Counts the number of flits received from the QPI Link.  This is one of three groups that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each flit is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits.  Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as data bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information.  To calculate data bandwidth, one should therefore do: datld therefore do: data flits * 8B / time.",
+    .desc   = "Counts the number of flits received from the QPI Link.  This is one of three groups that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each flit is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data).  In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits.  Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as data bandwidth.  For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information.  To calculate data bandwidth, one should therefore do: data flits * 8B / time.",
     .modmsk = BDX_UNC_QPI_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
@@ -538,7 +538,7 @@ static intel_x86_entry_t intel_bdx_unc_q_pe[]={
   },
   { .name   = "UNC_Q_TXL_FLITS_G0",
     .code   = 0x0,
-    .desc   = "Counts the number of flits transmitted across the QPI Link.  It includes filters for Idle, protocol, and Data Flits.  Each flit is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits.  Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as data bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information.  To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instfor L0) or 4B instead of 8B for L0p.",
+    .desc   = "Counts the number of flits transmitted across the QPI Link.  It includes filters for Idle, protocol, and Data Flits.  Each flit is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data).  In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits.  Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as data bandwidth.  For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information.  To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
     .modmsk = BDX_UNC_QPI_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
@@ -547,7 +547,7 @@ static intel_x86_entry_t intel_bdx_unc_q_pe[]={
   },
   { .name   = "UNC_Q_TXL_FLITS_G1",
     .code   = 0x0 | (1 << 21), /* extra ev_sel_ext bit set */
-    .desc   = "Counts the number of flits transmitted across the QPI Link.  It includes filters for Idle, protocol, and Data Flits.  Each flit is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits.  Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as data bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information.  To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instfor L0) or 4B instead of 8B for L0p.",
+    .desc   = "Counts the number of flits transmitted across the QPI Link.  It includes filters for Idle, protocol, and Data Flits.  Each flit is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data).  In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits.  Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as data bandwidth.  For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information.  To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
     .modmsk = BDX_UNC_QPI_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
@@ -556,7 +556,7 @@ static intel_x86_entry_t intel_bdx_unc_q_pe[]={
   },
   { .name   = "UNC_Q_TXL_FLITS_G2",
     .code   = 0x1 | (1 << 21), /* extra ev_sel_ext bit set */
-    .desc   = "Counts the number of flits trasmitted across the QPI Link.  This is one of three groups that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each flit is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits.  Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as data bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information.  To calculate data bandwidth, one should therefore do: datld therefore do: data flits * 8B / time.",
+    .desc   = "Counts the number of flits trasmitted across the QPI Link.  This is one of three groups that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each flit is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data).  In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits.  Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as data bandwidth.  For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information.  To calculate data bandwidth, one should therefore do: data flits * 8B / time.",
     .modmsk = BDX_UNC_QPI_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
diff --git a/lib/events/intel_bdx_unc_r3qpi_events.h b/lib/events/intel_bdx_unc_r3qpi_events.h
index 8d7f6a3..cbcc8c4 100644
--- a/lib/events/intel_bdx_unc_r3qpi_events.h
+++ b/lib/events/intel_bdx_unc_r3qpi_events.h
@@ -732,7 +732,7 @@ static intel_x86_entry_t intel_bdx_unc_r3_pe[]={
   },
   { .name   = "UNC_R3_VNA_CREDITS_ACQUIRED",
     .code   = 0x33,
-    .desc   = "Number of QPI VNA Credit acquisitions.  This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder.  VNA credits are used by all message classes in order to communicate across QPI.  If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool.  Note that a single packet may require multiple flit buffers (i.e. when data is being transfered).  Therefore, this event will increment by the number of credits acquired in each cycle.  Filtering based on message class is not provided.  One can count the number of packets transfered in a given message class using an qfclk event.",
+    .desc   = "Number of QPI VNA Credit acquisitions.  This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder.  VNA credits are used by all message classes in order to communicate across QPI.  If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool.  Note that a single packet may require multiple flit buffers (i.e. when data is being transferred).  Therefore, this event will increment by the number of credits acquired in each cycle.  Filtering based on message class is not provided.  One can count the number of packets transferred in a given message class using an qfclk event.",
     .modmsk = BDX_UNC_R3QPI_ATTRS,
     .cntmsk = 0x3,
     .ngrp   = 1,
diff --git a/lib/events/intel_skx_unc_cha_events.h b/lib/events/intel_skx_unc_cha_events.h
index c94caa5..893237b 100644
--- a/lib/events/intel_skx_unc_cha_events.h
+++ b/lib/events/intel_skx_unc_cha_events.h
@@ -3120,7 +3120,7 @@ static intel_x86_entry_t intel_skx_unc_c_pe[]={
   },
   { .name   = "UNC_C_COUNTER0_OCCUPANCY",
     .code   = 0x1f,
-    .desc   = "Since occupancy counts can only be captured in the Cbos 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0.   The filtering available is found in the control register - threshold, invert and edge detect.   E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entryy.",
+    .desc   = "Since occupancy counts can only be captured in the Cbos 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0.  The filtering available is found in the control register - threshold, invert and edge detect.  E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
     .modmsk = SKX_UNC_CHA_ATTRS,
     .cntmsk = 0xf,
   },
@@ -3270,7 +3270,7 @@ static intel_x86_entry_t intel_skx_unc_c_pe[]={
   },
   { .name   = "UNC_C_LLC_LOOKUP",
     .code   = 0x34,
-    .desc   = "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2.  This has numerous filters available.  Note the non-standard filtering equation.  This event will count requests that lookup the cache multiple times with multiple increments.  One must ALWAYS set umask bit 0 and select a state or states to match.  Otherwise, the event will count nothing.   CHAFilter0[24:21,17] bits correspond to [FMESI] state.",
+    .desc   = "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2.  This has numerous filters available.  Note the non-standard filtering equation.  This event will count requests that lookup the cache multiple times with multiple increments.  One must ALWAYS set umask bit 0 and select a state or states to match.  Otherwise, the event will count nothing.  CHAFilter0[24:21,17] bits correspond to [FMESI] state.",
     .modmsk = SKX_UNC_CHA_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 2,
@@ -3606,7 +3606,7 @@ static intel_x86_entry_t intel_skx_unc_c_pe[]={
   },
   { .name   = "UNC_C_SNOOP_RESP",
     .code   = 0x5c,
-    .desc   = "Counts the total number of RspI snoop responses received.  Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system.   In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received.  For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.",
+    .desc   = "Counts the total number of RspI snoop responses received.  Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system.  In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received.  For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.",
     .modmsk = SKX_UNC_CHA_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
@@ -3660,7 +3660,7 @@ static intel_x86_entry_t intel_skx_unc_c_pe[]={
   },
   { .name   = "UNC_C_TOR_INSERTS",
     .code   = 0x35,
-    .desc   = "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.",
+    .desc   = "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
     .modmsk = SKX_UNC_CHA_FILT1_ATTRS,
     .cntmsk = 0xf,
     .flags  = INTEL_X86_NO_AUTOENCODE,
@@ -3670,7 +3670,7 @@ static intel_x86_entry_t intel_skx_unc_c_pe[]={
   },
   { .name   = "UNC_C_TOR_OCCUPANCY",
     .code   = 0x36,
-    .desc   = "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.   T",
+    .desc   = "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.",
     .modmsk = SKX_UNC_CHA_FILT1_ATTRS,
     .cntmsk = 0x1,
     .flags  = INTEL_X86_NO_AUTOENCODE,
diff --git a/lib/events/intel_skx_unc_imc_events.h b/lib/events/intel_skx_unc_imc_events.h
index 39b0f27..87f8afb 100644
--- a/lib/events/intel_skx_unc_imc_events.h
+++ b/lib/events/intel_skx_unc_imc_events.h
@@ -386,7 +386,7 @@ static intel_x86_entry_t intel_skx_unc_m_pe[]={
   },
   { .name   = "UNC_M_MAJOR_MODES",
     .code   = 0x7,
-    .desc   = "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel.   Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
+    .desc   = "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel.  Major modes are channel-wide, and not a per-rank (or dimm or bank) mode.",
     .modmsk = SKX_UNC_IMC_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
diff --git a/lib/events/intel_skx_unc_m3upi_events.h b/lib/events/intel_skx_unc_m3upi_events.h
index 3accb44..7e0273e 100644
--- a/lib/events/intel_skx_unc_m3upi_events.h
+++ b/lib/events/intel_skx_unc_m3upi_events.h
@@ -826,15 +826,15 @@ static intel_x86_umask_t skx_unc_m3_rxc_flits_slot_bl[]={
 	},
 	{ .uname = "P1_NOT_REQ",
 	  .ucode = 0x1000,
-	  .udesc = "Slotting BL Message Into Header Flit -- Dont Need Pump  1",
+	  .udesc = "Slotting BL Message Into Header Flit -- Don't Need Pump 1",
 	},
 	{ .uname = "P1_NOT_REQ_BUT_BUBBLE",
 	  .ucode = 0x2000,
-	  .udesc = "Slotting BL Message Into Header Flit -- Dont Need Pump 1 - Bubblle",
+	  .udesc = "Slotting BL Message Into Header Flit -- Don't Need Pump 1 - Bubble",
 	},
 	{ .uname = "P1_NOT_REQ_NOT_AVAIL",
 	  .ucode = 0x4000,
-	  .udesc = "Slotting BL Message Into Header Flit -- Dont Need Pump 1 - Not Avaiil",
+	  .udesc = "Slotting BL Message Into Header Flit -- Don't Need Pump 1 - Not Avail",
 	},
 	{ .uname = "P1_WAIT",
 	  .ucode = 0x800,
@@ -845,7 +845,7 @@ static intel_x86_umask_t skx_unc_m3_rxc_flits_slot_bl[]={
 static intel_x86_umask_t skx_unc_m3_rxc_flit_gen_hdr1[]={
 	{ .uname = "ACCUM",
 	  .ucode = 0x100,
-	  .udesc = "Flit Gen - Header 1 -- Acumullate",
+	  .udesc = "Flit Gen - Header 1 -- Accumulate",
 	},
 	{ .uname = "ACCUM_READ",
 	  .ucode = 0x200,
@@ -3277,7 +3277,7 @@ static intel_x86_entry_t intel_skx_unc_m3_pe[]={
   },
   { .name   = "UNC_M3_TXC_AD_SPEC_ARB_NO_OTHER_PEND",
     .code   = 0x32,
-    .desc   = "AD speculative arb request asserted due to no other channel being active (have a valid entry but dont have credits to sendd)",
+    .desc   = "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
     .modmsk = SKX_UNC_M3UPI_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
@@ -3343,7 +3343,7 @@ static intel_x86_entry_t intel_skx_unc_m3_pe[]={
   },
   { .name   = "UNC_M3_TXC_BL_SPEC_ARB_NO_OTHER_PEND",
     .code   = 0x37,
-    .desc   = "BL speculative arb request asserted due to no other channel being active (have a valid entry but dont have credits to sendd)",
+    .desc   = "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
     .modmsk = SKX_UNC_M3UPI_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
diff --git a/lib/events/intel_skx_unc_pcu_events.h b/lib/events/intel_skx_unc_pcu_events.h
index 42b8a58..131a4f7 100644
--- a/lib/events/intel_skx_unc_pcu_events.h
+++ b/lib/events/intel_skx_unc_pcu_events.h
@@ -170,7 +170,7 @@ static intel_x86_entry_t intel_skx_unc_p_pe[]={
   },
   { .name   = "UNC_P_PROCHOT_INTERNAL_CYCLES",
     .code   = 0x9,
-    .desc   = "Counts the number of cycles that we are in Interal PROCHOT mode.  This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+    .desc   = "Counts the number of cycles that we are in internal PROCHOT mode.  This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
     .modmsk = SKX_UNC_PCU_ATTRS,
     .cntmsk = 0xf,
   },
diff --git a/lib/events/intel_skx_unc_upi_events.h b/lib/events/intel_skx_unc_upi_events.h
index ff12e93..2769cdf 100644
--- a/lib/events/intel_skx_unc_upi_events.h
+++ b/lib/events/intel_skx_unc_upi_events.h
@@ -882,7 +882,7 @@ static intel_x86_entry_t intel_skx_unc_upi_pe[]={
   },
   { .name   = "UNC_UPI_DIRECT_ATTEMPTS",
     .code   = 0x12,
-    .desc   = "Counts the number of Data Response(DRS) packets UPI attempted to send directly to the core or to a different UPI link.    Note:  This only counts attempts on valid candidates such as DRS packets destined for CHAs.",
+    .desc   = "Counts the number of Data Response(DRS) packets UPI attempted to send directly to the core or to a different UPI link.  Note:  This only counts attempts on valid candidates such as DRS packets destined for CHAs.",
     .modmsk = SKX_UNC_UPI_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
@@ -979,7 +979,7 @@ static intel_x86_entry_t intel_skx_unc_upi_pe[]={
   },
   { .name   = "UNC_UPI_RXL_BYPASSED",
     .code   = 0x31,
-    .desc   = "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly and into the Egress.  This is a latency optimization, and should generally be the common case.  If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+    .desc   = "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly and into the Egress.  This is a latency optimization, and should generally be the common case.  If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
     .modmsk = SKX_UNC_UPI_ATTRS,
     .cntmsk = 0xf,
     .ngrp   = 1,
diff --git a/lib/events/power5+_events.h b/lib/events/power5+_events.h
index 4d575fd..8050d5e 100644
--- a/lib/events/power5+_events.h
+++ b/lib/events/power5+_events.h
@@ -983,7 +983,7 @@ static const pme_power_entry_t power5p_pe[] = {
 		.pme_name = "PM_FPU_FEST",
 		.pme_code = 0x1010a8,
 		.pme_short_desc = "FPU executed FEST instruction",
-		.pme_long_desc = "The floating point unit has executed an estimate instructions. This could be fres* or frsqrte* where XYZ* means XYZ or  XYZ.   Combined Unit 0 + Unit 1.",
+		.pme_long_desc = "The floating point unit has executed an estimate instructions.  This could be fres* or frsqrte* where XYZ* means XYZ or XYZ.  Combined Unit 0 + Unit 1.",
 	},
 	[ POWER5p_PME_PM_FAB_M1toP1_SIDECAR_EMPTY ] = {
 		.pme_name = "PM_FAB_M1toP1_SIDECAR_EMPTY",
@@ -2357,7 +2357,7 @@ static const pme_power_entry_t power5p_pe[] = {
 		.pme_name = "PM_MEM_PW_CMPL",
 		.pme_code = 0x724e6,
 		.pme_short_desc = "Memory partial-write completed",
-		.pme_long_desc = "Number of Partial Writes completed.   This event is sent from the Memory Controller clock domain and must be scaled accordingly.",
+		.pme_long_desc = "Number of Partial Writes completed.  This event is sent from the Memory Controller clock domain and must be scaled accordingly.",
 	},
 	[ POWER5p_PME_PM_THRD_PRIO_DIFF_minus5or6_CYC ] = {
 		.pme_name = "PM_THRD_PRIO_DIFF_minus5or6_CYC",
@@ -2807,7 +2807,7 @@ static const pme_power_entry_t power5p_pe[] = {
 		.pme_name = "PM_MEM_NONSPEC_RD_CANCEL",
 		.pme_code = 0x711c6,
 		.pme_short_desc = "Non speculative memory read cancelled",
-		.pme_long_desc = "A non-speculative read was cancelled because the combined response indicated it was sourced from aother L2 or L3.   This event is sent from the Memory Controller clock domain and must be scaled accordingly",
+		.pme_long_desc = "A non-speculative read was cancelled because the combined response indicated it was sourced from aother L2 or L3.  This event is sent from the Memory Controller clock domain and must be scaled accordingly.",
 	},
 	[ POWER5p_PME_PM_BR_PRED_CR_TA ] = {
 		.pme_name = "PM_BR_PRED_CR_TA",
@@ -2849,7 +2849,7 @@ static const pme_power_entry_t power5p_pe[] = {
 		.pme_name = "PM_LSU0_DERAT_MISS",
 		.pme_code = 0x800c2,
 		.pme_short_desc = "LSU0 DERAT misses",
-		.pme_long_desc = "Total D-ERAT Misses by LSU0.   Requests that miss the Derat are rejected and retried until the request hits in the Erat. This may result in multiple erat misses for the same instruction.",
+		.pme_long_desc = "Total D-ERAT Misses by LSU0.  Requests that miss the Derat are rejected and retried until the request hits in the Erat. This may result in multiple erat misses for the same instruction.",
 	},
 	[ POWER5p_PME_PM_FPU_STALL3 ] = {
 		.pme_name = "PM_FPU_STALL3",
diff --git a/lib/events/power5_events.h b/lib/events/power5_events.h
index 683fe28..4b2e24a 100644
--- a/lib/events/power5_events.h
+++ b/lib/events/power5_events.h
@@ -974,7 +974,7 @@ static const pme_power_entry_t power5_pe[] = {
 		.pme_name = "PM_FPU_FEST",
 		.pme_code = 0x401090,
 		.pme_short_desc = "FPU executed FEST instruction",
-		.pme_long_desc = "The floating point unit has executed an estimate instructions. This could be fres* or frsqrte* where XYZ* means XYZ or  XYZ.   Combined Unit 0 + Unit 1.",
+		.pme_long_desc = "The floating point unit has executed an estimate instructions.  This could be fres* or frsqrte* where XYZ* means XYZ or XYZ.  Combined Unit 0 + Unit 1.",
 	},
 	[ POWER5_PME_PM_FAB_M1toP1_SIDECAR_EMPTY ] = {
 		.pme_name = "PM_FAB_M1toP1_SIDECAR_EMPTY",
@@ -2294,7 +2294,7 @@ static const pme_power_entry_t power5_pe[] = {
 		.pme_name = "PM_MEM_PW_CMPL",
 		.pme_code = 0x724e6,
 		.pme_short_desc = "Memory partial-write completed",
-		.pme_long_desc = "Number of Partial Writes completed.   This event is sent from the Memory Controller clock domain and must be scaled accordingly.",
+		.pme_long_desc = "Number of Partial Writes completed.  This event is sent from the Memory Controller clock domain and must be scaled accordingly.",
 	},
 	[ POWER5_PME_PM_THRD_PRIO_DIFF_minus5or6_CYC ] = {
 		.pme_name = "PM_THRD_PRIO_DIFF_minus5or6_CYC",
@@ -2738,7 +2738,7 @@ static const pme_power_entry_t power5_pe[] = {
 		.pme_name = "PM_MEM_NONSPEC_RD_CANCEL",
 		.pme_code = 0x711c6,
 		.pme_short_desc = "Non speculative memory read cancelled",
-		.pme_long_desc = "A non-speculative read was cancelled because the combined response indicated it was sourced from aother L2 or L3.   This event is sent from the Memory Controller clock domain and must be scaled accordingly",
+		.pme_long_desc = "A non-speculative read was cancelled because the combined response indicated it was sourced from aother L2 or L3.  This event is sent from the Memory Controller clock domain and must be scaled accordingly.",
 	},
 	[ POWER5_PME_PM_BR_PRED_CR_TA ] = {
 		.pme_name = "PM_BR_PRED_CR_TA",
@@ -2780,7 +2780,7 @@ static const pme_power_entry_t power5_pe[] = {
 		.pme_name = "PM_LSU0_DERAT_MISS",
 		.pme_code = 0x800c2,
 		.pme_short_desc = "LSU0 DERAT misses",
-		.pme_long_desc = "Total D-ERAT Misses by LSU0.   Requests that miss the Derat are rejected and retried until the request hits in the Erat. This may result in multiple erat misses for the same instruction.",
+		.pme_long_desc = "Total D-ERAT Misses by LSU0.  Requests that miss the Derat are rejected and retried until the request hits in the Erat. This may result in multiple erat misses for the same instruction.",
 	},
 	[ POWER5_PME_PM_L2SB_RCLD_DISP ] = {
 		.pme_name = "PM_L2SB_RCLD_DISP",
diff --git a/lib/events/power6_events.h b/lib/events/power6_events.h
index 90bd26a..23ce64d 100644
--- a/lib/events/power6_events.h
+++ b/lib/events/power6_events.h
@@ -3494,8 +3494,8 @@ static const pme_power_entry_t power6_pe[] = {
 	[ POWER6_PME_PM_FAB_ADDR_COLLISION ] = {
 		.pme_name = "PM_FAB_ADDR_COLLISION",
 		.pme_code = 0x5018e,
-		.pme_short_desc = "local node launch collision with off-node address   ",
-		.pme_long_desc = "local node launch collision with off-node address   ",
+		.pme_short_desc = "local node launch collision with off-node address",
+		.pme_long_desc = "local node launch collision with off-node address",
 	},
 	[ POWER6_PME_PM_MRK_FXU_FIN ] = {
 		.pme_name = "PM_MRK_FXU_FIN",
diff --git a/lib/events/power7_events.h b/lib/events/power7_events.h
index 7bfdf15..43e9104 100644
--- a/lib/events/power7_events.h
+++ b/lib/events/power7_events.h
@@ -598,8 +598,8 @@ static const pme_power_entry_t power7_pe[] = {
 	[ POWER7_PME_PM_VSU0_16FLOP ] = {
 		.pme_name = "PM_VSU0_16FLOP",
 		.pme_code = 0xa0a4,
-		.pme_short_desc = "Sixteen flops operation (SP vector versions of fdiv,fsqrt)  ",
-		.pme_long_desc = "Sixteen flops operation (SP vector versions of fdiv,fsqrt)  ",
+		.pme_short_desc = "Sixteen flops operation (SP vector versions of fdiv,fsqrt)",
+		.pme_long_desc = "Sixteen flops operation (SP vector versions of fdiv,fsqrt)",
 	},
 	[ POWER7_PME_PM_MRK_LSU_DERAT_MISS ] = {
 		.pme_name = "PM_MRK_LSU_DERAT_MISS",
@@ -1792,8 +1792,8 @@ static const pme_power_entry_t power7_pe[] = {
 	[ POWER7_PME_PM_IC_BANK_CONFLICT ] = {
 		.pme_name = "PM_IC_BANK_CONFLICT",
 		.pme_code = 0x4082,
-		.pme_short_desc = "Read blocked due to interleave conflict.  ",
-		.pme_long_desc = "Read blocked due to interleave conflict.  ",
+		.pme_short_desc = "Read blocked due to interleave conflict.",
+		.pme_long_desc = "Read blocked due to interleave conflict.",
 	},
 	[ POWER7_PME_PM_BR_MPRED_CR_TA ] = {
 		.pme_name = "PM_BR_MPRED_CR_TA",
@@ -1984,8 +1984,8 @@ static const pme_power_entry_t power7_pe[] = {
 	[ POWER7_PME_PM_VSU1_2FLOP_DOUBLE ] = {
 		.pme_name = "PM_VSU1_2FLOP_DOUBLE",
 		.pme_code = 0xa08e,
-		.pme_short_desc = "two flop DP vector operation (xvadddp, xvmuldp, xvsubdp, xvcmpdp, xvseldp, xvabsdp, xvnabsdp, xvredp ,xvsqrtedp, vxnegdp)  ",
-		.pme_long_desc = "two flop DP vector operation (xvadddp, xvmuldp, xvsubdp, xvcmpdp, xvseldp, xvabsdp, xvnabsdp, xvredp ,xvsqrtedp, vxnegdp)  ",
+		.pme_short_desc = "two flop DP vector operation (xvadddp, xvmuldp, xvsubdp, xvcmpdp, xvseldp, xvabsdp, xvnabsdp, xvredp ,xvsqrtedp, vxnegdp)",
+		.pme_long_desc = "two flop DP vector operation (xvadddp, xvmuldp, xvsubdp, xvcmpdp, xvseldp, xvabsdp, xvnabsdp, xvredp ,xvsqrtedp, vxnegdp)",
 	},
 	[ POWER7_PME_PM_THRD_PRIO_6_7_CYC ] = {
 		.pme_name = "PM_THRD_PRIO_6_7_CYC",
@@ -3670,8 +3670,8 @@ static const pme_power_entry_t power7_pe[] = {
 	[ POWER7_PME_PM_VSU0_2FLOP_DOUBLE ] = {
 		.pme_name = "PM_VSU0_2FLOP_DOUBLE",
 		.pme_code = 0xa08c,
-		.pme_short_desc = "two flop DP vector operation (xvadddp, xvmuldp, xvsubdp, xvcmpdp, xvseldp, xvabsdp, xvnabsdp, xvredp ,xvsqrtedp, vxnegdp)  ",
-		.pme_long_desc = "two flop DP vector operation (xvadddp, xvmuldp, xvsubdp, xvcmpdp, xvseldp, xvabsdp, xvnabsdp, xvredp ,xvsqrtedp, vxnegdp)  ",
+		.pme_short_desc = "two flop DP vector operation (xvadddp, xvmuldp, xvsubdp, xvcmpdp, xvseldp, xvabsdp, xvnabsdp, xvredp ,xvsqrtedp, vxnegdp)",
+		.pme_long_desc = "two flop DP vector operation (xvadddp, xvmuldp, xvsubdp, xvcmpdp, xvseldp, xvabsdp, xvnabsdp, xvredp ,xvsqrtedp, vxnegdp)",
 	},
 	[ POWER7_PME_PM_LSU_DC_PREF_STRIDED_STREAM_CONFIRM ] = {
 		.pme_name = "PM_LSU_DC_PREF_STRIDED_STREAM_CONFIRM",
-- 
2.11.0

------------------------------------------------------------------------------
Check out the vibrant tech community on one of the world's most
engaging tech sites, Slashdot.org! http://sdm.link/slashdot
_______________________________________________
perfmon2-devel mailing list
perfmon2-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/perfmon2-devel

Reply via email to