http://git-wip-us.apache.org/repos/asf/ignite/blob/b21f750f/modules/platforms/dotnet/Apache.Ignite.ndproj ---------------------------------------------------------------------- diff --git a/modules/platforms/dotnet/Apache.Ignite.ndproj b/modules/platforms/dotnet/Apache.Ignite.ndproj new file mode 100644 index 0000000..9f5287b --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.ndproj @@ -0,0 +1,11139 @@ +<?xml version="1.0" encoding="utf-8" standalone="yes"?> +<NDepend AppName="Apache.Ignite" Platform="DotNet" FileWrittenByProductVersion="2017.2.2.8962"> + <OutputDir KeepXmlFiles="False">c:\w\incubator-ignite\modules\platforms\dotnet\NDependOut</OutputDir> + <Assemblies> + <Name>Apache.Ignite.Core</Name> + </Assemblies> + <FrameworkAssemblies> + <Name>mscorlib</Name> + <Name>System.Core</Name> + <Name>System.Xml</Name> + <Name>System</Name> + <Name>System.Configuration</Name> + <Name>System.Transactions</Name> + </FrameworkAssemblies> + <Dirs> + <Dir>C:\WINDOWS\Microsoft.NET\Framework\v4.0.30319</Dir> + <Dir>C:\WINDOWS\Microsoft.NET\Framework\v4.0.30319\WPF</Dir> + <Dir>$(NdProjectDir)\Apache.Ignite.Core\bin\Debug</Dir> + </Dirs> + <MergeCodeGeneratedByCompiler>True</MergeCodeGeneratedByCompiler> + <Report Kind="0" SectionsEnabled="110591" XslPath="" Flags="261120" /> + <BuildComparisonSetting ProjectMode="CurrentProject" BuildMode="NDaysAgoAnalysisResult" ProjectFileToCompareWith="" BuildFileToCompareWith="" NDaysAgo="30" /> + <BaselineInUISetting ProjectMode="CurrentProject" BuildMode="NDaysAgoAnalysisResult" ProjectFileToCompareWith="" BuildFileToCompareWith="" NDaysAgo="30" /> + <CoverageFiles CoverageDir="" UncoverableAttribute="" /> + <TrendMetrics UseCustomLog="False" LogRecurrence="3" LogLabel="2" UseCustomDir="False" CustomDir=""> + <Chart Name="Size" ShowInReport="True"> + <Serie MetricName="# Lines of Code" MetricUnit="Loc" Color="#FF00BFFF" ChartType="Line" ScaleExp="0" /> + <Serie MetricName="# Lines of Code Covered" MetricUnit="Loc" Color="#FF32CD32" ChartType="Area" ScaleExp="0" /> + <Serie MetricName="# Lines of Code (NotMyCode)" MetricUnit="Loc" Color="#FFA9A9A9" ChartType="Area" ScaleExp="0" /> + <Serie MetricName="# Lines of Comments" MetricUnit="Lines" Color="#FF008000" ChartType="Line" ScaleExp="0" /> + </Chart> + <Chart Name="% Coverage and % Debt" ShowInReport="True"> + <Serie MetricName="Percentage Code Coverage" MetricUnit="%" Color="#FF32CD32" ChartType="Area" ScaleExp="0" /> + <Serie MetricName="Percentage Debt (Metric)" MetricUnit="%" Color="#FFFF0000" ChartType="Line" ScaleExp="0" /> + </Chart> + <Chart Name="Issues" ShowInReport="True"> + <Serie MetricName="# New Issues since Baseline" MetricUnit="issues" Color="#FFFF0000" ChartType="Line" ScaleExp="0" /> + <Serie MetricName="# Issues Fixed since Baseline" MetricUnit="issues" Color="#FF32CD32" ChartType="Line" ScaleExp="0" /> + <Serie MetricName="# Blocker/Critical/Major Issues" MetricUnit="issues" Color="#FFFF8C00" ChartType="Line" ScaleExp="0" /> + <Serie MetricName="# Issues" MetricUnit="issues" Color="#FFFFD700" ChartType="Line" ScaleExp="-2" /> + </Chart> + <Chart Name="Rules" ShowInReport="True"> + <Serie MetricName="# Rules" MetricUnit="Rules" Color="#FF66CDAA" ChartType="Line" ScaleExp="0" /> + <Serie MetricName="# Rules Violated" MetricUnit="Rules" Color="#FFFF8C00" ChartType="Area" ScaleExp="0" /> + <Serie MetricName="# Critical Rules Violated" MetricUnit="Rules" Color="#FFFF0000" ChartType="Area" ScaleExp="0" /> + </Chart> + <Chart Name="Quality Gates" ShowInReport="True"> + <Serie MetricName="# Quality Gates Fail" MetricUnit="quality gates" Color="#FFFF0000" ChartType="Line" ScaleExp="0" /> + <Serie MetricName="# Quality Gates Warn" MetricUnit="quality gates" Color="#FFFF8C00" ChartType="Line" ScaleExp="0" /> + <Serie MetricName="# Quality Gates" MetricUnit="quality gates" Color="#FF32CD32" ChartType="Line" ScaleExp="0" /> + </Chart> + <Chart Name="Debt" ShowInReport="True"> + <Serie MetricName="Debt (Metric)" MetricUnit="man-days" Color="#FFFF0000" ChartType="Line" ScaleExp="0" /> + <Serie MetricName="Annual Interest (Metric)" MetricUnit="man-days" Color="#FFFF8C00" ChartType="Line" ScaleExp="0" /> + </Chart> + </TrendMetrics> + <HistoricAnalysisResult PersistRecurrence="3" UseCustomDir="False" CustomDir="" /> + <SourceFileRebasing FromPath="" ToPath="" /> + <PathVariables /> + <RuleFiles /> + <ProjectRules AreActive="True" /> + <ProjectDebtSettings DebtSettingsStorage="0" SettingsFilePath=""> + <DebtSettings> + <DebtFactor>1</DebtFactor> + <AnnualInterestFactor>1</AnnualInterestFactor> + <DebtDefault>0</DebtDefault> + <AnnualInterestDefault>0</AnnualInterestDefault> + <DebtStringFormat>$ManDay$</DebtStringFormat> + <MoneyPerManHour>50</MoneyPerManHour> + <Currency>USD</Currency> + <CurrencyLocation>After</CurrencyLocation> + <EstimatedNumberOfManDayToDevelop1000LogicalLinesOfCode>18</EstimatedNumberOfManDayToDevelop1000LogicalLinesOfCode> + <NumberOfWorkDayPerYear>240</NumberOfWorkDayPerYear> + <NumberOfWorkHourPerDay>8</NumberOfWorkHourPerDay> + <A2B_RatingThreshold>5</A2B_RatingThreshold> + <B2C_RatingThreshold>10</B2C_RatingThreshold> + <C2D_RatingThreshold>20</C2D_RatingThreshold> + <D2E_RatingThreshold>50</D2E_RatingThreshold> + <Low2Medium_SeverityThreshold>1200000000</Low2Medium_SeverityThreshold> + <Medium2High_SeverityThreshold>12000000000</Medium2High_SeverityThreshold> + <High2Critical_SeverityThreshold>72000000000</High2Critical_SeverityThreshold> + <Critical2Blocker_SeverityThreshold>360000000000</Critical2Blocker_SeverityThreshold> + </DebtSettings> + </ProjectDebtSettings> + <Queries> + <Group Name="Quality Gates" Active="True" ShownInReport="True"> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Quality Gates Evolution</Name> +from qg in QualityGates +let qgBaseline = qg.OlderVersion() +let relyOnDiff = qgBaseline == null +let evolution = relyOnDiff ? (TrendIcon?)null : + // When a quality gate relies on diff between now and baseline + // it is not executed against the baseline + qg.ValueDiff() == 0d ? + TrendIcon.Constant : + (qg.ValueDiff() > 0 ? + ( qg.MoreIsBad ? TrendIcon.RedUp: TrendIcon.GreenUp) : + (!qg.MoreIsBad ? TrendIcon.RedDown: TrendIcon.GreenDown)) +select new { qg, + Evolution = evolution, + + BaselineStatus = relyOnDiff? (QualityGateStatus?) null : qgBaseline.Status, + Status = qg.Status, + + BaselineValue = relyOnDiff? (null) : qgBaseline.ValueString, + Value = qg.ValueString, +} + +// <Description> +// Show quality gates evolution between baseline and now. +// +// When a quality gate relies on diff between now and baseline (like *New Debt since Baseline*) +// it is not executed against the baseline and as a consequence its evolution is not available. +// +// Double-click a quality gate for editing. +// </Description>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <QualityGate Name="Percentage Coverage" Unit="%" /> +failif value < 70% +warnif value < 80% +codeBase.PercentageCoverage + +//<Description> +// Code coverage is a measure used to describe the degree to which the source code of a program +// is tested by a particular test suite. A program with high code coverage, measured as a percentage, +// has had more of its source code executed during testing which suggests it has a lower chance of +// containing undetected software bugs compared to a program with low code coverage. +// +// Code coverage is certainly the most important quality code metric. But coverage is not enough +// the team needs to ensure that results are checked at test-time. These checks can be done both +// in test code, and in application code through assertions. The important part is that a test +// must fail explicitely when a check gets unvalidated during the test execution. +// +// This quality gate define a warn threshold (70%) and a fail threshold (80%). These are +// indicative thresholds and in practice the more the better. To achieve high coverage and +// low risk, make sure that new and refactored classes gets 100% covered by tests and that +// the application and test code contains as many checks/assertions as possible. +//</Description>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <QualityGate Name="Percentage Coverage on New Code" Unit="%" /> +failif value < 70% +warnif value < 80% +let newMethods = Application.Methods.Where(m => m.WasAdded() && m.NbLinesOfCode > 0) +let locCovered = newMethods.Sum(m => m.NbLinesOfCodeCovered) +let loc = newMethods.Sum(m => m.NbLinesOfCode) +select 100d * locCovered / loc + +//<Description> +// *New Code* is defined as methods added since the baseline. +// +// To achieve high code coverage it is essential that new code gets properly +// tested and covered by tests. It is advised that all non-UI new classes gets +// 100% covered. +// +// Typically 90% of a class is easy to cover by tests and 10% is hard to reach +// through tests. It means that this 10% remaining is not easily testable, which +// means it is not well designed, which often means that this code is especially +// **error-prone**. This is the reason why it is important to reach 100% coverage +// for a class, to make sure that potentially *error-prone* code gets tested. +//</Description> +]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <QualityGate Name="Percentage Coverage on Refactored Code" Unit="%" /> +failif value < 70% +warnif value < 80% +let newMethods = Application.Methods.Where(m => m.CodeWasChanged() && m.NbLinesOfCode > 0) +let locCovered = newMethods.Sum(m => m.NbLinesOfCodeCovered) +let loc = newMethods.Sum(m => m.NbLinesOfCode) +select 100d * locCovered / loc + +//<Description> +// *Refactored Code* is defined as methods where *code was changed* since the baseline. +// +// Comment changes and formatting changes are not considerd as refactoring. +// +// To achieve high code coverage it is essential that refactored code gets properly +// tested and covered by tests. It is advised that when refactoring a class +// or a method, it is important to also write tests to make sure it gets 100% covered. +// +// Typically 90% of a class is easy to cover by tests and 10% is hard to reach +// through tests. It means that this 10% remaining is not easily testable, which +// means it is not well designed, which often means that this code is especially +// **error-prone**. This is the reason why it is important to reach 100% coverage +// for a class, to make sure that potentially *error-prone* code gets tested. +//</Description> +]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <QualityGate Name="Blocker Issues" Unit="issues" /> +failif count > 0 issues +from i in Issues +where i.Severity == Severity.Blocker +select new { i, i.Severity, i.Debt, i.AnnualInterest } + +//<Description> +// An issue with the severity **Blocker** cannot move to production, it must be fixed. +// +// The severity of an issue is either defined explicitely in the rule source code, +// either inferred from the issue *annual interest* and thresholds defined in the +// NDepend Project Properties > Issue and Debt. +//</Description> + +]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <QualityGate Name="Critical Issues" Unit="issues" /> +failif count > 10 issues +warnif count > 0 issues + +from i in Issues +where i.Severity == Severity.Critical +select new { i, i.Severity, i.Debt, i.AnnualInterest } + +//<Description> +// An issue with a severity level **Critical** shouldn't move to production. +// It still can for business imperative needs purposes, but at worst it must +// be fixed during the next iterations. +// +// The severity of an issue is either defined explicitely in the rule source code, +// either inferred from the issue *annual interest* and thresholds defined in the +// NDepend Project Properties > Issue and Debt. +//</Description>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <QualityGate Name="New Blocker / Critical / High Issues" Unit="issues" /> +failif count > 0 issues +from i in Issues +where i.Severity.EqualsAny(Severity.Blocker, Severity.Critical, Severity.High) && + // Count both the new issues and the issues that became at least Critical + (i.WasAdded() || i.OlderVersion().Severity < Severity.High) +select new { i, i.Severity, i.Debt, i.AnnualInterest } + + +//<Description> +// An issue with the severity **Blocker** cannot move to production, it must be fixed. +// +// An issue with a severity level **Critical** shouldn't move to production. +// It still can for business imperative needs purposes, but at worth it must be fixed +// during the next iterations. +// +// An issue with a severity level **High** should be fixed quickly, but can wait until +// the next scheduled interval. +// +// The severity of an issue is either defined explicitely in the rule source code, +// either inferred from the issue *annual interest* and thresholds defined in the +// NDepend Project Properties > Issue and Debt. +//</Description> +]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <QualityGate Name="Critical Rules Violated" Unit="rules" /> +failif count > 0 rules +from r in Rules where r.IsCritical && r.IsViolated() +select new { r, issues = r.Issues() } + +//<Description> +// The concept of critical rule is useful to pinpoint certain rules that +// should not be violated. +// +// A rule can be made critical just by checking the *Critical button* in the +// rule edition control and then saving the rule. +// +// This quality gate fails if any critical rule gets any violations. +// +// When no baseline is available, rules that rely on diff are not counted. +// If you observe that this quality gate count slightly decreases with no apparent reason, +// the reason is certainly that rules that rely on diff are not counted +// because the baseline is not defined. +//</Description>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <QualityGate Name="Percentage Debt" Unit="%" /> +failif value > 30% +warnif value > 20% +let timeToDev = codeBase.EffortToDevelop() +let debt = Issues.Sum(i => i.Debt) +select 100d * debt.ToManDay() / timeToDev.ToManDay() + +// <Description> +// % Debt total is defined as a percentage on: +// +// ⢠the estimated total effort to develop the code base +// +// ⢠and the the estimated total time to fix all issues (the Debt) +// +// Estimated total effort to develop the code base is inferred from +// # lines of code of the code base and from the +// *Estimated number of man-day to develop 1000 logicial lines of code* +// setting found in NDepend Project Properties > Issue and Debt. +// +// Debt documentation: http://www.ndepend.com/docs/technical-debt#Debt +// +// This quality gates fails if the estimated debt is more than 30% +// of the estimated effort to develop the code base, and warns if the +// estimated debt is more than 20% of the estimated effort to develop +// the code base +// </Description>]]></Query> + <Query Active="False" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <QualityGate Name="Debt" Unit="man-days" /> +failif value > 50 man-days +warnif value > 30 man-days +Issues.Sum(i => i.Debt).ToManDay() + +//<Description> +// This Quality Gate is disabled per default because the fail and warn +// thresholds of unacceptable Debt in man-days can only depend on the +// project size, number of developers and overall context. +// +// However you can refer to the default Quality Gate **Percentage Debt**. +// +// The Debt is defined as the sum of estimated effort to fix all issues. +// Debt documentation: http://www.ndepend.com/docs/technical-debt#Debt +//</Description>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <QualityGate Name="New Debt since Baseline" Unit="man-days" /> +failif value > 2 man-days +warnif value > 0 man-days +let debt = Issues.Sum(i => i.Debt) +let debtInBaseline = IssuesInBaseline.Sum(i => i.Debt) +select (debt - debtInBaseline).ToManDay() + + +//<Description> +// This Quality Gate fails if the estimated effort to fix new or worsened +// issues (what is called the *New Debt since Baseline*) is higher +// than 2 man-days. +// +// This Quality Gate warns if this estimated effort is positive. +// +// Debt documentation: http://www.ndepend.com/docs/technical-debt#Debt +//</Description>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <QualityGate Name="Debt Rating per Namespace" Unit="namespaces" /> +failif count > 0 namespaces + +from n in Application.Namespaces +where n.DebtRating() != null && + n.DebtRating().Value.EqualsAny(DebtRating.E, DebtRating.D) +select new { + n, + debtRating = n.DebtRating(), + debtRatio = n.DebtRatio(), // % of debt from which DebtRating is inferred + devTimeInManDay = n.EffortToDevelop().ToDebt(), + debtInManDay = n.AllDebt(), + issues = n.AllIssues() +} + +// <Description> +// Forbid namespaces with a poor Debt Rating equals to **E** or **D**. +// +// The **Debt Rating** for a code element is estimated by the value of the **Debt Ratio** +// and from the various rating thresholds defined in this project *Debt Settings*. +// +// The **Debt Ratio** of a code element is a percentage of **Debt Amount** (in floating man-days) +// compared to the **estimated effort to develop the code element** (also in floating man-days). +// +// The **estimated effort to develop the code element** is inferred from the code elements +// number of lines of code, and from the project *Debt Settings* parameters +// *estimated number of man-days to develop 1000* **logical lines of code**. +// +// The **logical lines of code** corresponds to the number of debug breakpoints in a method +// and doesn't depend on code formatting nor comments. +// +// The Quality Gate can be modified to match assemblies, types or methods +// with a poor Debt Rating, instead of matching namespaces. +// </Description>]]></Query> + <Query Active="False" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <QualityGate Name="Annual Interest" Unit="man-days" /> +failif value > 50 man-days +warnif value > 30 man-days +Issues.Sum(i => i.AnnualInterest).ToManDay() + + +//<Description> +// This Quality Gate is disabled per default because the fail and warn +// thresholds of unacceptable Annual-Interest in man-days can only depend +// on the project size, number of developers and overall context. +// +// However you can refer to the default Quality Gate +// **New Annual Interest since Baseline**. +// +// The Annual-Interest is defined as the sum of estimated annual cost +// in man-days, to leave all issues unfixed. +// +// Each rule can either provide a formula to compute the Annual-Interest +// per issue, or assign a **Severity** level for each issue. Some thresholds +// defined in *Project Properties > Issue and Debt > Annual Interest* are +// used to infer an Annual-Interest value from a Severity level. +// Annual Interest documentation: http://www.ndepend.com/docs/technical-debt#AnnualInterest +//</Description>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <QualityGate Name="New Annual Interest since Baseline" Unit="man-days" /> +failif value > 2 man-days +warnif value > 0 man-days +let ai = Issues.Sum(i => i.AnnualInterest) +let aiInBaseline = IssuesInBaseline.Sum(i => i.AnnualInterest) +select (ai - aiInBaseline).ToManDay() + +//<Description> +// This Quality Gate fails if the estimated annual cost to leave all issues +// unfixed, increased from more than 2 man-days since the baseline. +// +// This Quality Gate warns if this estimated annual cost is positive. +// +// This estimated annual cost is named the **Annual-Interest**. +// +// Each rule can either provide a formula to compute the Annual-Interest +// per issue, or assign a **Severity** level for each issue. Some thresholds +// defined in *Project Properties > Issue and Debt > Annual Interest* are +// used to infer an Annual-Interest value from a Severity level. +// Annual Interest documentation: http://www.ndepend.com/docs/technical-debt#AnnualInterest +//</Description>]]></Query> + </Group> + <Group Name="Hot Spots" Active="True" ShownInReport="True"> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Types Hot Spots</Name> +from t in JustMyCode.Types +where t.AllDebt() > Debt.Zero && + t.AllAnnualInterest() > AnnualInterest.Zero +orderby t.AllDebt().Value.TotalMinutes descending +select new { t, + Debt = t.AllDebt(), + Issues = t.AllIssues(), // AllIssues = {types issues} union {members issues} + AnnualInterest = t.AllAnnualInterest(), + BreakingPoint = t.AllBreakingPoint(), + t.NbLinesOfCode, + // t.PercentageCoverage, to uncomment if coverage data is imported + DebtRating = t.DebtRating(), + DebtRatio = t.DebtRatio() +} + +//<Description> +// This query lists **types with most Debt**, +// or in other words, types with issues that would need +// the largest effort to get fixed. +// +// Both issues on the type and its members are +// taken account. +// +// Since untested code often generates a lot of +// Debt, the type size and percentage coverage is shown +// (just uncomment *t.PercentageCoverage* in the query +// source code once you've imported the coverage data). +// +// The *Debt Rating* and *Debt Ratio* are also shown +// for informational purpose. +// +// -- +// +// The amount of *Debt* is not a measure to prioritize +// the effort to fix issues, it is an estimation of how far +// the team is from clean code that abides by the rules set. +// +// For each issue the *Annual Interest* estimates the annual +// cost to leave the issues unfixed. The *Severity* of an issue +// is estimated through thresholds from the *Annual Interest*. +// +// The **Debt Breaking Point** represents the duration +// from now when the estimated cost to leave the issue unfixed +// costs as much as the estimated effort to fix it. +// +// Hence the shorter the **Debt Breaking Point** +// the largest the **Return on Investment** for fixing +// the issue. The **Breaking Point is the right metric +// to prioritize issues fix**. +//</Description>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Types to Fix Priority</Name> +from t in JustMyCode.Types +where t.AllBreakingPoint() > TimeSpan.Zero && + t.AllDebt().Value > 30.ToMinutes() +orderby t.AllBreakingPoint().TotalMinutes ascending +select new { t, + BreakingPoint = t.AllBreakingPoint(), + Debt = t.AllDebt(), + AnnualInterest = t.AllAnnualInterest(), + Issues = t.AllIssues(), + t.NbLinesOfCode, + // t.PercentageCoverage, to uncomment if coverage data is imported + DebtRating = t.DebtRating(), + DebtRatio = t.DebtRatio() +} + +//<Description> +// This query lists types per increasing +// **Debt Breaking Point**. +// +// For each issue the *Debt* estimates the +// effort to fix the issue, and the *Annual Interest* +// estimates the annual cost to leave the issue unfixed. +// The *Severity* of an issue is estimated through +// thresholds from the *Annual Interest* of the issue. +// +// The **Debt Breaking Point** represents the duration +// from now when the estimated cost to leave the issue unfixed +// costs as much as the estimated effort to fix it. +// +// Hence the shorter the **Debt Breaking Point** +// the largest the **Return on Investment** for fixing +// the issues. +// +// Often new and refactored types since baseline will be +// listed first, because issues on these types get a +// higher *Annual Interest* because it is important to +// focus first on new issues. +// +// +// -- +// +// Both issues on the type and its members are +// taken account. +// +// Only types with at least 30 minutes of Debt are listed +// to avoid parasiting the list with the numerous +// types with small *Debt*, on which the *Breaking Point* +// value makes less sense. +// +// The *Annual Interest* estimates the cost per year +// in man-days to leave these issues unfixed. +// +// Since untested code often generates a lot of +// Debt, the type size and percentage coverage is shown +// (just uncomment *t.PercentageCoverage* in the query +// source code once you've imported the coverage data). +// +// The *Debt Rating* and *Debt Ratio* are also shown +// for informational purpose. +//</Description>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Issues to Fix Priority</Name> +from i in Issues +// Don't show first issues with BreakingPoint equals to zero. +orderby i.BreakingPoint != TimeSpan.Zero ? i.BreakingPoint : TimeSpan.MaxValue +select new { i, + Debt = i.Debt, + AnnualInterest = i.AnnualInterest, + BreakingPoint = i.BreakingPoint, + CodeElement = i.CodeElement +} + +//<Description> +// This query lists issues per increasing +// **Debt Breaking Point**. +// +// Double-click an issue to edit its rule and +// select the issue in the rule result. This way +// you can view all information concerning the issue. +// +// For each issue the *Debt* estimates the +// effort to fix the issue, and the *Annual Interest* +// estimates the annual cost to leave the issue unfixed. +// The *Severity* of an issue is estimated through +// thresholds from the *Annual Interest* of the issue. +// +// The **Debt Breaking Point** represents the duration +// from now when the estimated cost to leave the issue unfixed +// costs as much as the estimated effort to fix it. +// +// Hence the shorter the **Debt Breaking Point** +// the largest the **Return on Investment** for fixing +// the issue. +// +// Often issues on new and refactored code elements since +// baseline will be listed first, because such issues get a +// higher *Annual Interest* because it is important to +// focus first on new issues on recent code. +//</Description>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Debt and Issues per Rule</Name> +from r in Rules +where r.IsViolated() +orderby r.Debt().Value descending +select new { + r, + Issues = r.Issues(), + Debt = r.Debt(), + AnnualInterest = r.AnnualInterest(), + BreakingPoint = r.BreakingPoint(), + Category = r.Category +} + +//<Description> +// This query lists violated rules with most *Debt* first. +// +// A rule violated has issues. For each issue the *Debt* +// estimates the effort to fix the issue. +// +// -- +// +// The amount of *Debt* is not a measure to prioritize +// the effort to fix issues, it is an estimation of how far +// the team is from clean code that abides by the rules set. +// +// For each issue the *Annual Interest* estimates the annual +// cost to leave the issues unfixed. The *Severity* of an issue +// is estimated through thresholds from the *Annual Interest*. +// +// The **Debt Breaking Point** represents the duration +// from now when the estimated cost to leave the issue unfixed +// costs as much as the estimated effort to fix it. +// +// Hence the shorter the **Debt Breaking Point** +// the largest the **Return on Investment** for fixing +// the issue. The **Breaking Point is the right metric +// to prioritize issues fix**. +// +// -- +// +// Notice that rules can be grouped in *Rule Category*. This +// way you'll see categories that generate most *Debt*. +// +// Typically the rules that generate most *Debt* are the +// ones related to *Code Coverage by Tests*, *Architecture* +// and *Code Smells*. +//</Description>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>New Debt and Issues per Rule</Name> +from r in Rules +where r.IsViolated() && r.IssuesAdded().Count() > 0 +orderby r.DebtDiff().Value descending +select new { + r, + IssuesAdded = r.IssuesAdded(), + IssuesFixed = r.IssuesFixed(), + Issues = r.Issues(), + Debt = r.Debt(), + DebtDiff = r.DebtDiff(), + Category = r.Category +} + +//<Description> +// This query lists violated rules that have new issues +// since baseline, with most **new Debt** first. +// +// A rule violated has issues. For each issue the *Debt* +// estimates the effort to fix the issue. +// +// -- +// +// New issues since the baseline are consequence of recent code +// refactoring sessions. They represent good opportunities +// of fix because the code recently refactored is fresh in +// the developers mind, which means fixing now costs less +// than fixing later. +// +// Fixing issues on recently touched code is also a good way +// to foster practices that will lead to higher code quality +// and maintainability, including writing unit-tests +// and avoiding unnecessary complex code. +// +// -- +// +// Notice that rules can be grouped in *Rule Category*. This +// way you'll see categories that generate most *Debt*. +// +// Typically the rules that generate most *Debt* are the +// ones related to *Code Coverage by Tests*, *Architecture* +// and *Code Smells*. +//</Description>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Debt and Issues per Code Element</Name> +from elem in CodeElements +where elem.HasIssue() +orderby elem.Debt().Value descending +select new { + elem, + Issues = elem.Issues(), + Debt = elem.Debt(), + AnnualInterest = elem.AnnualInterest(), + BreakingPoint = elem.BreakingPoint() +} + +//<Description> +// This query lists code elements that have issues, +// with most *Debt* first. +// +// For each code element the *Debt* estimates +// the effort to fix the element issues. +// +// The amount of *Debt* is not a measure to prioritize +// the effort to fix issues, it is an estimation of how far +// the team is from clean code that abides by the rules set. +// +// For each element the *Annual Interest* estimates the annual +// cost to leave the elements issues unfixed. The *Severity* of an +// issue is estimated through thresholds from the *Annual Interest* +// of the issue. +// +// The **Debt Breaking Point** represents the duration +// from now when the estimated cost to leave the issues unfixed +// costs as much as the estimated effort to fix it. +// +// Hence the shorter the **Debt Breaking Point** +// the largest the **Return on Investment** for fixing +// the issue. The **Breaking Point is the right metric +// to prioritize issues fix**. +//</Description>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>New Debt and Issues per Code Element</Name> +from elem in CodeElements +where elem.HasIssue() && elem.IssuesAdded().Count() > 0 +orderby elem.DebtDiff().Value descending +select new { + elem, + IssuesAdded = elem.IssuesAdded(), + IssuesFixed = elem.IssuesFixed(), + Issues = elem.Issues(), + Debt = elem.Debt(), + DebtDiff = elem.DebtDiff() +} + //<Description> +// This query lists code elements that have new issues +// since baseline, with most **new Debt** first. +// +// For each code element the *Debt* estimates +// the effort to fix the element issues. +// +// New issues since the baseline are consequence of recent code +// refactoring sessions. They represent good opportunities +// of fix because the code recently refactored is fresh in +// the developers mind, which means fixing now costs less +// than fixing later. +// +// Fixing issues on recently touched code is also a good way +// to foster practices that will lead to higher code quality +// and maintainability, including writing unit-tests +// and avoiding unnecessary complex code. +//</Description> +]]></Query> + </Group> + <Group Name="Code Smells" Active="True" ShownInReport="False"> + <Query Active="False" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="True"><![CDATA[// <Name>Avoid types too big</Name> +warnif count > 0 from t in JustMyCode.Types where + + // First filter on type to optimize + t.NbLinesOfCode > 200 + // # IL Instructions is commented, because with LINQ syntax, a few lines of code can compile to hundreds of IL instructions. + // || t.NbILInstructions > 3000 + + // What matters is the # lines of code in JustMyCode + let locJustMyCode = t.MethodsAndContructors.Where(m => JustMyCode.Contains(m)).Sum(m => m.NbLinesOfCode) + where locJustMyCode > 200 + + let isStaticWithNoMutableState = (t.IsStatic && t.Fields.Any(f => !f.IsImmutable)) + let staticFactor = (isStaticWithNoMutableState ? 0.2 : 1) + + orderby locJustMyCode descending +select new { + t, + locJustMyCode, + t.NbILInstructions, + t.Methods, + t.Fields, + + Debt = (staticFactor*locJustMyCode.Linear(200, 1, 2000, 10)).ToHours().ToDebt(), + + // The annual interest varies linearly from interest for severity major for 300 loc + // to interest for severity critical for 2000 loc + AnnualInterest = staticFactor*(locJustMyCode.Linear( + 200, Severity.Medium.AnnualInterestThreshold().Value.TotalMinutes, + 2000, Severity.Critical.AnnualInterestThreshold().Value.TotalMinutes)).ToMinutes().ToAnnualInterest() +} + +//<Description> +// This rule matches types with more than 200 lines of code. +// **Only lines of code in JustMyCode methods are taken account.** +// +// Types where *NbLinesOfCode > 200* are extremely complex +// to develop and maintain. +// See the definition of the NbLinesOfCode metric here +// http://www.ndepend.com/docs/code-metrics#NbLinesOfCode +// +// Maybe you are facing the **God Class** phenomenon: +// A **God Class** is a class that controls way too many other classes +// in the system and has grown beyond all logic to become +// *The Class That Does Everything*. +//</Description> + +//<HowToFix> +// Types with many lines of code +// should be split in a group of smaller types. +// +// To refactor a *God Class* you'll need patience, +// and you might even need to recreate everything from scratch. +// Here are a few refactoring advices: +// +// ⢠The logic in the *God Class* must be splitted in smaller classes. +// These smaller classes can eventually become private classes nested +// in the original *God Class*, whose instances objects become +// composed of instances of smaller nested classes. +// +// ⢠Smaller classes partitioning should be driven by the multiple +// responsibilities handled by the *God Class*. To identify these +// responsibilities it often helps to look for subsets of methods +// strongly coupled with subsets of fields. +// +// ⢠If the *God Class* contains way more logic than states, a good +// option can be to define one or several static classes that +// contains no static field but only pure static methods. A pure static +// method is a function that computes a result only from inputs +// parameters, it doesn't read nor assign any static or instance field. +// The main advantage of pure static methods is that they are easily +// testable. +// +// ⢠Try to maintain the interface of the *God Class* at first +// and delegate calls to the new extracted classes. +// In the end the *God Class* should be a pure facade without its own logic. +// Then you can keep it for convenience or throw it away and +// start to use the new classes only. +// +// ⢠Unit Tests can help: write tests for each method before extracting it +// to ensure you don't break functionality. +// +// The estimated Debt, which means the effort to fix such issue, +// varies linearly from 1 hour for a 200 lines of code type, +// up to 10 hours for a type with 2.000 or more lines of code. +// +// In Debt and Interest computation, this rule takes account of the fact +// that static types with no mutable fields are just a collection of +// static methods that can be easily splitted and moved from one type +// to another. +//</HowToFix>]]></Query> + <Query Active="False" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Avoid types with too many methods</Name> +warnif count > 0 from t in JustMyCode.Types + + // Optimization: Fast discard of non-relevant types + where t.Methods.Count() > 20 + + // Don't match these methods + let methods = t.Methods.Where( + m => !(m.IsGeneratedByCompiler || + m.IsConstructor || m.IsClassConstructor || + m.IsPropertyGetter || m.IsPropertySetter || + m.IsEventAdder || m.IsEventRemover)) + + where methods.Count() > 20 + orderby methods.Count() descending + + let isStaticWithNoMutableState = (t.IsStatic && t.Fields.Any(f => !f.IsImmutable)) + let staticFactor = (isStaticWithNoMutableState ? 0.2 : 1) + +select new { + t, + nbMethods = methods.Count(), + instanceMethods = methods.Where(m => !m.IsStatic), + staticMethods = methods.Where(m => m.IsStatic), + + t.NbLinesOfCode, + + Debt = (staticFactor*methods.Count().Linear(20, 1, 200, 10)).ToHours().ToDebt(), + + // The annual interest varies linearly from interest for severity major for 30 methods + // to interest for severity critical for 200 methods + AnnualInterest = (staticFactor*methods.Count().Linear( + 20, Severity.Medium.AnnualInterestThreshold().Value.TotalMinutes, + 200, Severity.Critical.AnnualInterestThreshold().Value.TotalMinutes)).ToMinutes().ToAnnualInterest() +} + +//<Description> +// This rule matches types with more than 20 methods. +// Such type might be hard to understand and maintain. +// +// Notice that methods like constructors or property +// and event accessors are not taken account. +// +// Having many methods for a type might be a symptom +// of too many responsibilities implemented. +// +// Maybe you are facing the **God Class** phenomenon: +// A **God Class** is a class that controls way too many other classes +// in the system and has grown beyond all logic to become +// *The Class That Does Everything*. +//</Description> + +//<HowToFix> +// To refactor properly a *God Class* please read *HowToFix advices* +// from the default rule **Types to Big**. +//// +// The estimated Debt, which means the effort to fix such issue, +// varies linearly from 1 hour for a type with 20 methods, +// up to 10 hours for a type with 200 or more methods. +// +// In Debt and Interest computation, this rule takes account of the fact +// that static types with no mutable fields are just a collection of +// static methods that can be easily splitted and moved from one type +// to another. +//</HowToFix>]]></Query> + <Query Active="False" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Avoid types with too many fields</Name> +warnif count > 0 from t in JustMyCode.Types + + // Optimization: Fast discard of non-relevant types + where !t.IsEnumeration && + t.Fields.Count() > 15 + + // Count instance fields and non-constant static fields + let fields = t.Fields.Where(f => + !f.IsGeneratedByCompiler && + !f.IsLiteral && + !(f.IsStatic && f.IsInitOnly) && + JustMyCode.Contains(f) ) + + where fields.Count() > 15 + + let methodsAssigningFields = fields.SelectMany(f => f.MethodsAssigningMe) + + orderby fields.Count() descending +select new { + t, + instanceFields = fields.Where(f => !f.IsStatic), + staticFields = fields.Where(f => f.IsStatic), +methodsAssigningFields , + + // See definition of Size of Instances metric here: + // http://www.ndepend.com/docs/code-metrics#SizeOfInst + t.SizeOfInst, + + Debt = fields.Count().Linear(15, 1, 200, 10).ToHours().ToDebt(), + + // The annual interest varies linearly from interest for severity major for 30 methods + // to interest for severity critical for 200 methods + AnnualInterest = fields.Count().Linear(15, Severity.Medium.AnnualInterestThreshold().Value.TotalMinutes, + 200, Severity.Critical.AnnualInterestThreshold().Value.TotalMinutes).ToMinutes().ToAnnualInterest() +} + +//<Description> +// This rule matches types with more than 15 fields. +// Such type might be hard to understand and maintain. +// +// Notice that constant fields and static-readonly fields are not counted. +// Enumerations types are not counted also. +// +// Having many fields for a type might be a symptom +// of too many responsibilities implemented. +//</Description> + +//<HowToFix> +// To refactor such type and increase code quality and maintainability, +// certainly you'll have to group subsets of fields into smaller types +// and dispatch the logic implemented into the methods +// into these smaller types. +// +// More refactoring advices can be found in the default rule +// **Types to Big**, *HowToFix* section. +// +// The estimated Debt, which means the effort to fix such issue, +// varies linearly from 1 hour for a type with 15 fields, +// to up to 10 hours for a type with 200 or more fields. +//</HowToFix>]]></Query> + <Query Active="False" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="True"><![CDATA[// <Name>Avoid methods too big, too complex</Name> +warnif count > 0 from m in JustMyCode.Methods where + m.ILNestingDepth > 2 && + (m.NbLinesOfCode > 35 || + m.CyclomaticComplexity > 20 || + m.ILCyclomaticComplexity > 60) + + let complexityScore = m.NbLinesOfCode/2 + m.CyclomaticComplexity + m.ILCyclomaticComplexity/3 + 3*m.ILNestingDepth + + orderby complexityScore descending, + m.CyclomaticComplexity descending, + m.ILCyclomaticComplexity descending, + m.ILNestingDepth descending +select new { + m, + m.NbLinesOfCode, + m.CyclomaticComplexity, + m.ILCyclomaticComplexity, + m.ILNestingDepth, + complexityScore, + + Debt = complexityScore.Linear(30, 40, 400, 8*60).ToMinutes().ToDebt(), + + // The annual interest varies linearly from interest for severity minor + // to interest for severity major + AnnualInterest = complexityScore .Linear(30, Severity.Medium.AnnualInterestThreshold().Value.TotalMinutes, + 200, 2*(Severity.High.AnnualInterestThreshold().Value.TotalMinutes)).ToMinutes().ToAnnualInterest() + +} + +//<Description> +// This rule matches methods where *ILNestingDepth* > 2 +// and (*NbLinesOfCode* > 35 +// or *CyclomaticComplexity* > 20 +// or *ILCyclomaticComplexity* > 60) +// Such method is typically hard to understand and maintain. +// +// Maybe you are facing the **God Method** phenomenon. +// A "God Method" is a method that does way too many processes in the system +// and has grown beyond all logic to become *The Method That Does Everything*. +// When need for new processes increases suddenly some programmers realize: +// why should I create a new method for each processe if I can only add an *if*. +// +// See the definition of the *CyclomaticComplexity* metric here: +// http://www.ndepend.com/docs/code-metrics#CC +// +// See the definition of the *ILCyclomaticComplexity* metric here: +// http://www.ndepend.com/docs/code-metrics#ILCC +// +// See the definition of the *ILNestingDepth* metric here: +// http://www.ndepend.com/docs/code-metrics#ILNestingDepth +//</Description> + +//<HowToFix> +// A large and complex method should be split in smaller methods, +// or even one or several classes can be created for that. +// +// During this process it is important to question the scope of each +// variable local to the method. This can be an indication if +// such local variable will become an instance field of the newly created class(es). +// +// Large *switchâ¦case* structures might be refactored through the help +// of a set of types that implement a common interface, the interface polymorphism +// playing the role of the *switch cases tests*. +// +// Unit Tests can help: write tests for each method before extracting it +// to ensure you don't break functionality. +// +// The estimated Debt, which means the effort to fix such issue, +// varies from 40 minutes to 8 hours, linearly from a weighted complexity score. +//</HowToFix>]]></Query> + <Query Active="False" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="True"><![CDATA[// <Name>Avoid methods with too many parameters</Name> +warnif count > 0 from m in JustMyCode.Methods where + m.NbParameters >= 7 + orderby m.NbParameters descending +select new { + m, + m.NbParameters, + + Debt = m.NbParameters.Linear(7, 1, 40, 6).ToHours().ToDebt(), + + // The annual interest varies linearly from interest for severity Medium for 7 parameters + // to interest for severity Critical for 40 parameters + AnnualInterest = m.NbParameters.Linear(7, Severity.Medium.AnnualInterestThreshold().Value.TotalMinutes, + 40, Severity.Critical.AnnualInterestThreshold().Value.TotalMinutes).ToMinutes().ToAnnualInterest() +} + +//<Description> +// This rule matches methods with more than 8 parameters. +// Such method is painful to call and might degrade performance. +// See the definition of the *NbParameters* metric here: +// http://www.ndepend.com/docs/code-metrics#NbParameters +//</Description> + +//<HowToFix> +// More properties/fields can be added to the declaring type to +// handle numerous states. An alternative is to provide +// a class or a structure dedicated to handle arguments passing. +// For example see the class *System.Diagnostics.ProcessStartInfo* +// and the method *System.Diagnostics.Process.Start(ProcessStartInfo)*. +// +// The estimated Debt, which means the effort to fix such issue, +// varies linearly from 1 hour for a method with 7 parameters, +// up to 6 hours for a methods with 40 or more parameters. +//</HowToFix>]]></Query> + <Query Active="False" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Avoid methods with too many local variables</Name> +warnif count > 0 from m in JustMyCode.Methods where + m.NbVariables > 15 + orderby m.NbVariables descending +select new { + m, + m.NbVariables, + + Debt = m.NbVariables.Linear(15, 1, 80, 6).ToHours().ToDebt(), + + // The annual interest varies linearly from interest for severity Medium for 15 variables + // to interest for severity Critical for 80 variables + AnnualInterest = m.NbVariables.Linear(15, Severity.Medium.AnnualInterestThreshold().Value.TotalMinutes, + 80, Severity.Critical.AnnualInterestThreshold().Value.TotalMinutes).ToMinutes().ToAnnualInterest() + +} + +//<Description> +// This rule matches methods with more than 15 variables. +// +// Methods where *NbVariables > 8* are hard to understand and maintain. +// Methods where *NbVariables > 15* are extremely complex and must be refactored. +// +// See the definition of the *Nbvariables* metric here: +// http://www.ndepend.com/docs/code-metrics#Nbvariables +//</Description> + +//<HowToFix> +// To refactor such method and increase code quality and maintainability, +// certainly you'll have to split the method into several smaller methods +// or even create one or several classes to implement the logic. +// +// During this process it is important to question the scope of each +// variable local to the method. This can be an indication if +// such local variable will become an instance field of the newly created class(es). +// +// The estimated Debt, which means the effort to fix such issue, +// varies linearly from 10 minutes for a method with 15 variables, +// up to 2 hours for a methods with 80 or more variables. +//</HowToFix>]]></Query> + <Query Active="False" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Avoid methods with too many overloads</Name> +warnif count > 0 + +let lookup = JustMyCode.Methods.Where(m => + m.NbOverloads >= 6 && + !m.IsOperator && // Don't report operator overload + + // Don't match overloads due tu the visitor pattern, based on a naming convention. + !m.SimpleName.ToLower().StartsWithAny("visit", "dispatch") +).ToLookup(m => m.ParentType.FullName + "."+ m.SimpleName) + +from @group in lookup +let overloads = @group.ToArray() +orderby overloads.Length descending + +select new { + m = @group.First(), + overloads, + Debt = (3*overloads.Length).ToMinutes().ToDebt(), + Severity = Severity.Medium +} + +//<Description> +// Method overloading is the ability to create multiple methods of the same name +// with different implementations, and various set of parameters. +// +// This rule matches sets of methods with 6 overloads or more. +// +// Such method set might be a problem to maintain +// and provokes coupling higher than necessary. +// +// See the definition of the *NbOverloads* metric here +// http://www.ndepend.com/docs/code-metrics#NbOverloads +//</Description> + +//<HowToFix> +// Typically the *too many overloads* phenomenon appears when an algorithm +// takes a various set of in-parameters. Each overload is presented as +// a facility to provide a various set of in-parameters. +// In such situation, the C# and VB.NET language feature named +// *Named and Optional arguments* should be used. +// +// The *too many overloads* phenomenon can also be a consequence of the usage +// of the **visitor design pattern** http://en.wikipedia.org/wiki/Visitor_pattern +// since a method named *Visit()* must be provided for each sub type. +// For this reason, the default version of this rule doesn't match overloads whose name +// start with "visit" or "dispatch" (case-unsensitive) to avoid match +// overload visitors, and you can adapt this rule to your own naming convention. +// +// Sometime *too many overloads* phenomenon is not the symptom of a problem, +// for example when a *numeric to something conversion* method applies to +// all numeric and nullable numeric types. +// +// The estimated Debt, which means the effort to fix such issue, +// is of 3 minutes per method overload. +//</HowToFix>]]></Query> + <Query Active="False" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Avoid methods potentially poorly commented</Name> +warnif count > 0 from m in JustMyCode.Methods where + m.PercentageComment < 10 && + m.NbLinesOfCode > 20 + + let nbLinesOfCodeNotCommented = m.NbLinesOfCode - m.NbLinesOfComment + + orderby nbLinesOfCodeNotCommented descending + +select new { + m, + m.PercentageComment, + m.NbLinesOfCode, + m.NbLinesOfComment, + nbLinesOfCodeNotCommented, + + Debt = nbLinesOfCodeNotCommented .Linear(20, 2, 200, 20).ToMinutes().ToDebt(), + + // The annual interest varies linearly from interest for severity major for 300 loc + // to interest for severity critical for 2000 loc + AnnualInterest = m.PercentageComment.Linear( + 0, 8 *(Severity.Medium.AnnualInterestThreshold().Value.TotalMinutes), + 20, Severity.Medium.AnnualInterestThreshold().Value.TotalMinutes).ToMinutes().ToAnnualInterest() +} + +//<Description> +// This rule matches methods with less than 10% of comment lines and that have +// at least 20 lines of code. Such method might need to be more commented. +// +// See the definitions of the *Comments metric* here: +// http://www.ndepend.com/docs/code-metrics#PercentageComment +// http://www.ndepend.com/docs/code-metrics#NbLinesOfComment +// +// Notice that only comments about the method implementation +// (comments in method body) are taken account. +//</Description> + +//<HowToFix> +// Typically add more comment. But code commenting is subject to controversy. +// While poorly written and designed code would needs a lot of comment +// to be understood, clean code doesn't need that much comment, especially +// if variables and methods are properly named and convey enough information. +// Unit-Test code can also play the role of code commenting. +// +// However, even when writing clean and well-tested code, one will have +// to write **hacks** at a point, usually to circumvent some API limitations or bugs. +// A hack is a non-trivial piece of code, that doesn't make sense at first glance, +// and that took time and web research to be found. +// In such situation comments must absolutely be used to express the intention, +// the need for the hacks and the source where the solution has been found. +// +// The estimated Debt, which means the effort to comment such method, +// varies linearly from 2 minutes for 10 lines of code not commented, +// up to 20 minutes for 200 or more, lines of code not commented. +//</HowToFix>]]></Query> + <Query Active="False" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Avoid types with poor cohesion</Name> +warnif count > 0 from t in JustMyCode.Types where + t.LCOM > 0.8 && + t.NbFields > 10 && + t.NbMethods >10 + + let poorCohesionScore = 1/(1.01 - t.LCOM) + orderby poorCohesionScore descending + + select new { + t, + t.LCOM, + t.NbMethods, + t.NbFields, + poorCohesionScore, + + Debt = poorCohesionScore.Linear(5, 5, 50, 4*60).ToMinutes().ToDebt(), + + // The annual interest varies linearly from interest for severity Medium for low poorCohesionScore + // to 4 times interest for severity High for high poorCohesionScore + AnnualInterest = poorCohesionScore.Linear(5, Severity.Medium.AnnualInterestThreshold().Value.TotalMinutes, + 50, 4*(Severity.High.AnnualInterestThreshold().Value.TotalMinutes)).ToMinutes().ToAnnualInterest() + +} + +//<Description> +// This rule is based on the *LCOM code metric*, +// LCOM stands for **Lack Of Cohesion of Methods**. +// See the definition of the LCOM metric here +// http://www.ndepend.com/docs/code-metrics#LCOM +// +// The LCOM metric measures the fact that most methods are using most fields. +// A class is considered utterly cohesive (which is good) +// if all its methods use all its instance fields. +// +// Only types with enough methods and fields are taken account to avoid bias. +// The LCOM takes its values in the range [0-1]. +// +// This rule matches types with LCOM higher than 0.8. +// Such value generally pinpoints a **poorly cohesive class**. +//</Description> + +//<HowToFix> +// To refactor a poorly cohesive type and increase code quality and maintainability, +// certainly you'll have to split the type into several smaller and more cohesive types +// that together, implement the same logic. +// +// The estimated Debt, which means the effort to fix such issue, +// varies linearly from 5 minutes for a type with a low poorCohesionScore, +// up to 4 hours for a type with high poorCohesionScore. +//</HowToFix>]]></Query> + </Group> + <Group Name="Code Smells Regression" Active="True" ShownInReport="False"> + <Query Active="True" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>From now, all types added should respect basic quality principles</Name> +warnif count > 0 from t in JustMyCode.Types where + +// Only match types added since Baseline. +// Uncomment this line to match also refactored types since Baseline. +// (t.WasAdded() || t.CodeWasChanged()) && + t.WasAdded() && + +// Eliminate interfaces, enumerations or types only with constant fields +// by making sure we are matching type with code. +t.NbLinesOfCode > 10 && + +// Optimization: Fast discard of non-relevant types +(t.Fields.Count() > 20 || t.Methods.Count() > 20) + +// Count instance fields and non-constant static fields +let fields = t.Fields.Where(f => + !f.IsLiteral && + !(f.IsStatic && f.IsInitOnly)) + +// Don't match these methods +let methods = t.Methods.Where( + m => !(m.IsConstructor || m.IsClassConstructor || + m.IsGeneratedByCompiler || + m.IsPropertyGetter || m.IsPropertySetter || + m.IsEventAdder || m.IsEventRemover)) + +where + +// Low Quality types Metrics' definitions are available here: +// http://www.ndepend.com/docs/code-metrics#MetricsOnTypes +( // Types with too many methods + fields.Count() > 20 || + + methods.Count() > 20 || + + // Complex Types that use more than 50 other types + t.NbTypesUsed > 50 +) +select new { + t, + t.NbLinesOfCode, + + instanceMethods = methods.Where(m => !m.IsStatic), + staticMethods = methods.Where(m => m.IsStatic), + + instanceFields = fields.Where(f => !f.IsStatic), + staticFields = fields.Where(f => f.IsStatic), + + t.TypesUsed, + + // Constant Debt estimation, since for such type rules in category "Code Smells" + // accurately estimate the Debt. + Debt = 10.ToMinutes().ToDebt(), + + // The Severity is higher for new types than for refactored types + AnnualInterest= (t.WasAdded() ? 3 : 1) * + Severity.High.AnnualInterestThreshold() +} + +//<Description> +// This rule is executed only if a *baseline for comparison* is defined (*diff mode*). +// This rule operates only on types added since baseline. +// +// This rule can be easily modified to also match types refactored since baseline, +// that don't satisfy all quality criterions. +// +// Types matched by this rule not only have been recently added or refactored, +// but also somehow violate one or several basic quality principles, +// whether it has too many methods, +// it has too many fields, +// or is using too many types. +// Any of these criterions is often a symptom of a type with too many responsibilities. +// +// Notice that to count methods and fields, methods like constructors +// or property and event accessors are not taken account. +// Notice that constants fields and static-readonly fields are not counted. +// Enumerations types are not counted also. +//</Description> + +//<HowToFix> +// To refactor such type and increase code quality and maintainability, +// certainly you'll have to split the type into several smaller types +// that together, implement the same logic. +// +// Issues of this rule have a constant 10 minutes Debt, because the Debt, +// which means the effort to fix such issue, is already estimated for issues +// of rules in the category **Code Smells**. +// +// However issues of this rule have a **High** severity, with even more +// interests for issues on new types since baseline, because the proper time +// to increase the quality of these types is **now**, before they get commited +// in the next production release. +//</HowToFix>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>From now, all types added should be 100% covered by tests</Name> +warnif count > 0 from t in JustMyCode.Types where + +// Only match types added since Baseline. +// Uncomment this line to match also refactored types since Baseline. +// (t.WasAdded() || t.CodeWasChanged()) && + t.WasAdded() && + + // â¦that are not 100% covered by tests + t.PercentageCoverage < 100 + + let methodsCulprit = t.Methods.Where(m => m.PercentageCoverage < 100) + +select new { + t, + t.PercentageCoverage, + methodsCulprit, + t.NbLinesOfCode, + + // Constant Debt estimation, since for such type rules in category "Coverage" + // accurately estimate the untested code Debt. + Debt = 10.ToMinutes().ToDebt(), + + // The Severity is higher for new types than for refactored types + AnnualInterest= (t.WasAdded() ? 3 : 1) * + Severity.High.AnnualInterestThreshold() +} + +//<Description> +// This rule is executed only if a *baseline for comparison* is defined (*diff mode*). +// This rule operates only on types added since baseline. +// +// This rule can be easily modified to also match types refactored since baseline, +// that are not 100% covered by tests. +// +// This rule is executed only if some code coverage data is imported +// from some code coverage files. +// +// Often covering 10% of remaining uncovered code of a class, +// requires as much work as covering the first 90%. +// For this reason, typically teams estimate that 90% coverage is enough. +// However *untestable code* usually means *poorly written code* +// which usually leads to *error prone code*. +// So it might be worth refactoring and making sure to cover the 10% remaining code +// because **most tricky bugs might come from this small portion of hard-to-test code**. +// +// Not all classes should be 100% covered by tests (like UI code can be hard to test) +// but you should make sure that most of the logic of your application +// is defined in some *easy-to-test classes*, 100% covered by tests. +// +// In this context, this rule warns when a type added or refactored since the baseline, +// is not fully covered by tests. +//</Description> + +//<HowToFix> +// Write more unit-tests dedicated to cover code not covered yet. +// If you find some *hard-to-test code*, it is certainly a sign that this code +// is not *well designed* and hence, needs refactoring. +// +// You'll find code impossible to cover by unit-tests, like calls to *MessageBox.Show()*. +// An infrastructure must be defined to be able to *mock* such code at test-time. +// +// Issues of this rule have a constant 10 minutes Debt, because the Debt, +// which means the effort to write tests for the culprit type, is already +// estimated for issues in the category **Code Coverage**. +// +// However issues of this rule have a **High** severity, with even more +// interests for issues on new types since baseline, because the proper time +// to write tests for these types is **now**, before they get commited +// in the next production release. +//</HowToFix>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>From now, all methods added should respect basic quality principles</Name> +warnif count > 0 from m in JustMyCode.Methods where + +// Only match methods added since Baseline. +// Uncomment this line to match also refactored methods since Baseline. +// (m.WasAdded() || m.CodeWasChanged()) && + m.WasAdded() && + +// Low Quality methods// Metrics' definitions +( m.NbLinesOfCode > 30 || // http://www.ndepend.com/docs/code-metrics#NbLinesOfCode + m.NbILInstructions > 200 || // http://www.ndepend.com/docs/code-metrics#NbILInstructions + m.CyclomaticComplexity > 20 || // http://www.ndepend.com/docs/code-metrics#CC + m.ILCyclomaticComplexity > 50 || // http://www.ndepend.com/docs/code-metrics#ILCC + m.ILNestingDepth > 4 || // http://www.ndepend.com/docs/code-metrics#ILNestingDepth + m.NbParameters > 5 || // http://www.ndepend.com/docs/code-metrics#NbParameters + m.NbVariables > 8 || // http://www.ndepend.com/docs/code-metrics#NbVariables + m.NbOverloads > 6 ) +select new { + m, + m.NbLinesOfCode, + m.NbILInstructions, + m.CyclomaticComplexity, + m.ILCyclomaticComplexity, + m.ILNestingDepth, + m.NbParameters, + m.NbVariables, + m.NbOverloads, // http://www.ndepend.com/docs/code-metrics#NbOverloads + + // Constant Debt estimation, since for such method rules in category "Code Smells" + // accurately estimate the Debt. + Debt = 5.ToMinutes().ToDebt(), + + // The Severity is higher for new methods than for refactored methods + AnnualInterest= (m.WasAdded() ? 3 : 1) * + Severity.High.AnnualInterestThreshold() +} + +//<Description> +// This rule is executed only if a *baseline for comparison* is defined (*diff mode*). +// This rule operates only on methods added or refactored since the baseline. +// +// This rule can be easily modified to also match methods refactored since baseline, +// that don't satisfy all quality criterions. +// +// Methods matched by this rule not only have been recently added or refactored, +// but also somehow violate one or several basic quality principles, +// whether it is too large (too many *lines of code*), +// too complex (too many *if*, *switch case*, loopsâ¦) +// has too many variables, too many parameters +// or has too many overloads. +//</Description> + +//<HowToFix> +// To refactor such method and increase code quality and maintainability, +// certainly you'll have to split the method into several smaller methods +// or even create one or several classes to implement the logic. +// +// During this process it is important to question the scope of each +// variable local to the method. This can be an indication if +// such local variable will become an instance field of the newly created class(es). +// +// Large *switchâ¦case* structures might be refactored through the help +// of a set of types that implement a common interface, the interface polymorphism +// playing the role of the *switch cases tests*. +// +// Unit Tests can help: write tests for each method before extracting it +// to ensure you don't break functionality. +// +// Issues of this rule have a constant 5 minutes Debt, because the Debt, +// which means the effort to fix such issue, is already estimated for issues +// of rules in the category **Code Smells**. +// +// However issues of this rule have a **High** severity, with even more +// interests for issues on new methods since baseline, because the proper time +// to increase the quality of these methods is **now**, before they get commited +// in the next production release. +//</HowToFix>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Avoid decreasing code coverage by tests of types</Name> +warnif count > 0 +from t in JustMyCode.Types where + t.IsPresentInBothBuilds() && t.CoverageDataAvailable && t.OlderVersion().CoverageDataAvailable +let locDiff = (int)t.NbLinesOfCode.Value - (int)t.OlderVersion().NbLinesOfCode.Value +where locDiff >= 0 +let uncoveredLoc = (int)t.NbLinesOfCodeNotCovered.Value - ((int)t.OlderVersion().NbLinesOfCodeNotCovered.Value + locDiff) +where uncoveredLoc > 0 + +orderby uncoveredLoc descending + +select new { + t, + OldCoveragePercent = t.OlderVersion().PercentageCoverage, + NewCoveragePercent = t.PercentageCoverage, + OldLoc = t.OlderVersion().NbLinesOfCode, + NewLoc = t.NbLinesOfCode, + uncoveredLoc, + + Debt = uncoveredLoc.Linear(1, 15, 100, 3*60).ToMinutes().ToDebt(), + + // The annual interest varies linearly from interest for severity High for one line of code that is not covered by tests anymore + // to interest for severity Critical for 50 lines of code that are not covered by tests anymore + AnnualInterest = uncoveredLoc.Linear(1, Severity.High.AnnualInterestThreshold().Value.TotalMinutes, + 50, 2*Severity.Critical.AnnualInterestThreshold().Value.TotalMinutes).ToMinutes().ToAnnualInterest() + + +} + +//<Description> +// This rule is executed only if a *baseline for comparison* is defined (*diff mode*). +// +// This rule is executed only if some code coverage data is imported +// from some code coverage files. +// +// This rule warns when the number of lines of a type covered by tests +// decreased since the baseline. In case the type faced some refactoring +// since the baseline, this loss in coverage is estimated only for types +// with more lines of code, where # lines of code covered now is lower +// than # lines of code covered in baseline + the extra number of +// lines of code. +// +// Such situation can mean that some tests have been removed +// but more often, this means that the type has been modified, +// and that changes haven't been covered properly by tests. +// +// To visualize changes in code, right-click a matched type and select: +// +// ⢠Compare older and newer versions of source file +// +// ⢠or Compare older and newer versions disassembled with Reflector +//</Description> + +//<HowToFix> +// Write more unit-tests dedicated to cover changes in matched types +// not covered yet. +// If you find some *hard-to-test code*, it is certainly a sign that this code +// is not *well designed* and hence, needs refactoring. +// +// The estimated Debt, which means the effort to cover by test +// code that used to be covered, varies linearly 15 minutes to 3 hours, +// depending on the number of lines of code that are not covered by tests anymore. +// +// Severity of issues of this rule varies from **High** to **Critical** +// depending on the number of lines of code that are not covered by tests anymore. +// Because the loss in code coverage happened since the baseline, +// the severity is high because it is important to focus on these issues +// **now**, before such code gets released in production. +//</HowToFix>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Avoid making complex methods even more complex</Name> +warnif count > 0 + +let complexityScoreProc = new Func<IMethod, double>(m => + (m.CyclomaticComplexity + m.ILCyclomaticComplexity/3 + 5*m.ILNestingDepth).Value) + +from m in JustMyCode.Methods where + !m.IsAbstract && + m.IsPresentInBothBuilds() && + m.CodeWasChanged() && + m.OlderVersion().CyclomaticComplexity > 6 + +let complexityScore = complexityScoreProc(m) +let oldComplexityScore = complexityScoreProc(m.OlderVersion()) +where complexityScore > oldComplexityScore + +let complexityScoreDiff = complexityScoreProc(m) - complexityScoreProc(m.OlderVersion()) +orderby complexityScoreDiff descending + +select new { + m, + oldComplexityScore , + complexityScore , + diff= complexityScoreDiff, + + Debt = complexityScoreDiff.Linear(1, 15, 50, 60).ToMinutes().ToDebt(), + + // The annual interest varies linearly from interest for severity Medium for a tiny complexity increment + // to interest for severity critical for 2000 loc + AnnualInterest = complexityScoreDiff.Linear(1, Severity.High.AnnualInterestThreshold().Value.TotalMinutes, + 50, 4*(Severity.High.AnnualInterestThreshold().Value.TotalMinutes)).ToMinutes().ToAnnualInterest() + +} + +//<Description> +// This rule is executed only if a *baseline for comparison* is defined (*diff mode*). +// +// The method complexity is measured through the code metric +// *Cyclomatic Complexity* defined here: +// http://www.ndepend.com/docs/code-metrics#CC +// +// This rule warns when a method already complex +// (i.e with *Cyclomatic Complexity* higher than 6) +// become even more complex since the baseline. +// +// This rule needs assemblies PDB files and source code +// to be available at analysis time, because the *Cyclomatic Complexity* +// is inferred from the source code and source code location +// is inferred from PDB files. See: +// http://www.ndepend.com/docs/ndepend-analysis-inputs-explanation +// +// To visualize changes in code, right-click a matched method and select: +// +// ⢠Compare older and newer versions of source file +// +// ⢠or Compare older and newer versions disassembled with Reflector +//</Description> + +//<HowToFix> +// A large and complex method should be split in smaller methods, +// or even one or several classes can be created for that. +// +// During this process it is important to question the scope of each +// variable local to the method. This can be an indication if +// such local variable will become an instance field of the newly created class(es). +// +// Large *switchâ¦case* structures might be refactored through the help +// of a set of types that implement a common interface, the interface polymorphism +// playing the role of the *switch cases tests*. +// +// Unit Tests can help: write tests for each method before extracting it +// to ensure you don't break functionality. +// +// The estimated Debt, which means the effort to fix such issue, +// varies linearly from 15 to 60 minutes depending on the extra complexity added. +// +// Issues of this rule have a **High** severity, because it is important to focus +// on these issues **now**, before such code gets released in production. +//</HowToFix>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Avoid making large methods even larger</Name> + +warnif count > 0 +from m in JustMyCode.Methods where + !m.IsAbstract && + + // Eliminate constructors from match, since they get larger + // as soons as some fields initialization are added. + !m.IsConstructor && + !m.IsClassConstructor && + + // Filter just here for optimization + m.NbLinesOfCode > 15 && + + m.IsPresentInBothBuilds() && + m.CodeWasChanged() + +let oldLoc = m.OlderVersion().NbLinesOfCode +where oldLoc > 15 && m.NbLinesOfCode > oldLoc + +let diff = m.NbLinesOfCode - oldLoc +where diff > 0 +orderby diff descending + +select new { + m, + oldLoc, + newLoc = m.NbLinesOfCode, + diff, + + Debt = diff.Linear(1, 10, 100, 60).ToMinutes().ToDebt(), + + // The annual interest varies linearly from interest for severity Medium for a tiny complexity increment + // to interest for severity critical for 2000 loc + AnnualInterest = diff .Linear(1, Severity.High.AnnualInterestThreshold().Value.TotalMinutes, + 100, 4*(Severity.High.AnnualInterestThreshold().Value.TotalMinutes)).ToMinutes().ToAnnualInterest() + +} + +//<Description> +// This rule is executed only if a *baseline for comparison* is defined (*diff mode*). +// +// This rule warns when a method already large +// (i.e with more than 15 lines of code) +// become even larger since the baseline. +// +// The method size is measured through the code metric +// *# Lines of Code* defined here: +// http://www.ndepend.com/docs/code-metrics#NbLinesOfCode +// +// This rule needs assemblies PDB files +// to be available at analysis time, because the *# Lines of Code* +// is inferred from PDB files. See: +// http://www.ndepend.com/docs/ndepend-analysis-inputs-explanation +// +// To visualize changes in code, right-click a matched method and select: +// +// ⢠Compare older and newer versions of source file +// +// ⢠or Compare older and newer versions disassembled with Reflector +//</Description> + +//<HowToFix> +// Usually too big methods should be split in smaller methods. +// +// But long methods with no branch conditions, that typically initialize some data, +// are not necessarily a problem to maintain, and might not need refactoring. +// +// The estimated Debt, which means the effort to fix such issue, +// varies linearly from 5 to 20 minutes depending +// on the number of lines of code added. +// +// The estimated Debt, which means the effort to fix such issue, +// varies linearly from 10 to 60 minutes depending on the extra complexity added. +// +// Issues of this rule have a **High** severity, because it is important to focus +// on these issues **now**, before such code gets released in production. +//</HowToFix>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Avoid adding methods to a type that already had many methods</Name> + +warnif count > 0 + +// Don't count constructors and methods generated by the compiler! +let getMethodsProc = new Func<IType, IList<IMethod>>( + t => t.Methods.Where(m => + !m.IsConstructor && !m.IsClassConstructor && + !m.IsGeneratedByCompiler).ToArray()) + + +from t in JustMyCode.Types where + + t.NbMethods > 30 && // Just here for optimization + + t.IsPresentInBothBuilds() + + // Optimization: fast discard of non-relevant types + where t.OlderVersion().NbMethods > 30 + + let oldMethods = getMethodsProc(t.OlderVersion()) + where oldMethods.Count > 30 + + let newMethods = getMethodsProc(t) + where newMethods.Count > oldMethods.Count + + let addedMethods = newMethods.Where(m => m.WasAdded()) + let removedMethods = oldMethods.Where(m => m.WasRemoved()) + + orderby addedMethods.Count() descending + +select new { + t, + nbOldMethods = oldMethods.Count, + nbNewMethods = newMethods.Count, + addedMethods, + removedMethods, + + Debt = (10*addedMethods.Count()).ToMinutes().ToDebt(), + AnnualInterest = addedMethods.Count().Linear( + 1, Severity.Medium.AnnualInterestThreshold().Value.TotalMinutes, + 100, 4*(Severity.High.AnnualInterestThreshold().Value.TotalMinutes)).ToMinutes().ToAnnualInterest() +} + +//<Description> +// This rule is executed only if a *baseline for comparison* is defined (*diff mode*). +// +// Types where number of methods is greater than 15 +// might be hard to understand and maintain. +// +// This rule lists types that already had more than 15 methods +// at the baseline time, and for which new methods have been added. +// +// Having many methods for a type might be a symptom +// of too many responsibilities implemented. +// +// Notice that constructors and methods generated by the compiler +// are not taken account. +//</Description> + +//<HowToFix> +// To refactor such type and increase code quality and maintainability, +// certainly you'll have to split the type into several smaller types +// that together, implement the same logic. +// +// The estimated Debt, which means the effort to fix such issue, +// is equal to 10 minutes per method added. +// +// Issues of this rule have a **High** severity, because it is important to focus +// on these issues **now**, before such code gets released in production. +//</HowToFix>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Avoid adding instance fields to a type that already had many instance fields</Name> + +warnif count > 0 + +let getFieldsProc = new Func<IType, IList<IField>>( + t => t.Fields.Where(f => + !f.IsLiteral && + !f.IsGeneratedByCompiler && + !f.IsStatic).ToArray()) + + +from t in JustMyCode.Types where + + !t.IsEnumeration && + t.IsPresentInBothBuilds() + + // Optimization: fast discard of non-relevant types + where t.OlderVersion().NbFields > 15 + + let oldFields = getFieldsProc(t.OlderVersion()) + where oldFields.Count > 15 + + let newFields = getFieldsProc(t) + where newFields.Count > oldFields.Count + + let addedFields = newFields.Where(f => f.WasAdded()) + let removedFields = oldFields.Where(f => f.WasRemoved()) + + orderby addedFields.Count() descending + +select new { + t, + nbOldFields = oldFields.Count, + nbNewFields = newFields.Count, + addedFields, + removedFields, + + Debt = (10*addedFields.Count()).ToMinutes().ToDebt(), + AnnualInterest = addedFields.Count().Linear( + 1, Severity.High.AnnualInterestThreshold().Value.TotalMinutes, + 100, 4*(Severity.High.AnnualInterestThreshold().Value.TotalMinutes)).ToMinutes().ToAnnualInterest() + +} + +//<Description> +// This rule is executed only if a *baseline for comparison* is defined (*diff mode*). +// +// Types where number of fields is greater than 15 +// might be hard to understand and maintain. +// +// This rule lists types that already had more than 15 fields +// at the baseline time, and for which new fields have been added. +// +// Having many fields for a type might be a symptom +// of too many responsibilities implemented. +// +// Notice that *constants* fields and *static-readonly* fields are not taken account. +// Enumerations types are not taken account also. +//</Description> + +//<HowToFix> +// To refactor such type and increase code quality and maintainability, +// certainly you'll have to group subsets of fields into smaller types +// and dispatch the logic implemented into the methods +// into these smaller types. +// +// The estimated Debt, which means the effort to fix such issue, +// is equal to 10 minutes per field added. +// +// Issues of this rule have a **High** severity, because it is important to focus +// on these issues **now**, before such code gets released in production. +//</HowToFix>]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[//<Name>Avoid transforming an immutable type into a mutable one</Name> + +warnif count > 0 +from t in Application.Types where + t.CodeWasChanged() && + t.OlderVersion().IsImmutable && + !t.IsImmutable && + // Don't take account of immutable types transformed into static types (not deemed as immutable) + !t.IsStatic + +let culpritFields = t.InstanceFields.Where(f => f.IsImmutable) +select new { + t, + culpritFields, + Debt = (10 + 10*culpritFields.Count()).ToMinutes().ToDebt(), + Severity = Severity.High +} + +//<Description> +// This rule is executed only if a *baseline for comparison* is defined (*diff mode*). +// +// A type is considered as *immutable* if its instance fields +// cannot be modified once an instance has been built by a constructor. +// +// Being immutable has several fortunate consequences for a type. +// For example its instance objects can be used concurrently +// from several threads without the need to synchronize accesses. +// +// Hence users of such type often rely on the fact that the type is immutable. +// If an immutable type becomes mutable, there are chances that this will break +// users code. +// +// This is why this rule warns about such immutable type that become mutable. +// +// The estimated Debt, which means the effort to fix such issue, +// is equal to 2 minutes per instance field that became mutable. +//</Description> + +//<HowToFix> +// If being immutable is an important property for a matched type, +// then the code must be refactored to preserve immutability. +// +// The estimated Debt, which means the effort to fix such issue, +// is equal to 10 minutes plus 10 minutes per instance fields of +// the matched type that is now mutable. +// +// Issues of this rule have a **High** severity, because it is important to focus +// on these issues **now**, before such code gets released in production. +//</HowToFix>]]></Query> + </Group> + <Group Name="Object Oriented Design" Active="True" ShownInReport="True"> + <Query Active="False" DisplayList="True" DisplayStat="True" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Avoid interfaces too big</Name> +warnif count > 0 + +from i in JustMyCode.Types +where i.IsInterface && i.NbMethods >= 10 // Optimization First threshold + +// A get;set; property count as one method +let properties = i.Methods.Where(m => m.SimpleName.Length > 4 && (m.IsPropertyGetter || m.IsPropertySetter)) + .Distinct(m => m.SimpleName.Substring(4, m.SimpleName.Length -4)) + +// An event count as one method +let events = i.Methods.Where(m => (m.IsEventAdder|| m.IsEventRemover)) + .Distinct(m => m.SimpleName.Replace("add_","").Replace("remove_","")) + +let methods = i.Methods.Where(m => !m.IsPropertyGetter && !m.IsPropertySetter && !m.IsEventAdder && !m.IsEventRemover) +let methodsCount = methods.Count() + properties.Count() + events.Count() +where methodsCount >= 10 +let publicFactor = i.IsPubliclyVisible ? 1 : 0.5 +orderby methodsCount descending +select new { + i, + Methods= methods, + Properties = properties, + Events = events, + Debt = (publicFactor*methodsCount.Linear(10, 20, 100, 7*60)).ToMinutes().ToDebt(), + // The annual interest varies linearly from interest for severity Medium for an interface with 10 methods + // to interest for severity Critical for an interface with 100 methods and more + AnnualInterest = (publicFactor*methodsCount.Linear( + 10, Severity.Medium.AnnualInterestThreshold().Value.TotalMinutes, + 100, Severity.Critical.AnnualInterestThreshold().Value.TotalMinutes)) + .ToMinutes().ToAnnualInterest() +} + + +//<Description> +// This rule matches interfaces with more than 10 methods. +// Interfaces are abstractions and are meant to simplify the code structure. +// An interface should represent a single responsibility. +// Making an interface too large, too complex, necessarily means +// that the interface has too many responsibilities. +// +// A property with getter or setter or both count as one method. +// An event count as one method. +//</Description> + +//<HowToFix> +// Typically to fix such issue, the interface must be refactored +// in a grape of smaller *single-responsibility* interfaces. +// +// A classic example is a *ISession* large interface, responsible +// for holding states, run commands and offer various accesses +// and facilities. +// +// The classic problem for a large public interface is that it has +// many clients that consume it. As a consequence splitting it in +// smaller interfaces has an important impact and it is not always +// feasible. +// +// The estimated Debt, which means the effort to fix such issue, +// varies linearly from 20 minutes for an interface with 10 methods, +// up to 7 hours for an interface with 100 or more methods. +// The Debt is divided by two if the interface is not publicly +// visible, because in such situation only the current project is impacted +// by the refactoring. +//</HowToFix> +]]></Query> + <Query Active="True" DisplayList="True" DisplayStat="False" DisplaySelectionView="False" IsCriticalRule="False"><![CDATA[// <Name>Base class should not use derivatives</Name> +warnif count > 0 +let excludedTypes = new[] {"TcpDiscoveryIpFinderBase", "EvictionPolicyBase", "PlatformTargetAdapter"} +from baseClass in JustMyCode.Types +where baseClass.IsClass && !excludedTypes.Contains(baseClass.Name) + && baseClass.NbChildren > 0 // <-- for optimization! +let derivedClassesUsed = baseClass.DerivedTypes.UsedBy(baseClass) + // Don't warn when a base class is using nested private derived class + .Where(derivedClass => + !(derivedClass.IsNested && + derivedClass.Visibility == Visibility.Private && + derivedClass.ParentType == baseClass + )) +where derivedClassesUsed.Count() > 0 + +let derivedClassesMemberUsed = derivedClassesUsed.SelectMany(c => c.Members).UsedBy(baseClass) +orderby derivedClassesMemberUsed.Count() descending + +select new { + baseClass, + derivedClassesUsed, + derivedClassesMemberUsed, + + Debt = 3*(derivedClassesUsed.Count()+derivedClassesMemberUsed.Count()).ToMinutes().ToDebt(), + Severity = Severity.High +} + +//<Description> +// In *Object-Oriented Programming*, the **open/closed principle** states: +// *software entities (components, classes, methods, etc.) should be open +// for extension, but closed for modification*. +// http://en.wikipedia.org/wiki/Open/closed_principle +// +// Hence a base class should be designed properly to make it easy to derive from, +// this is *extension*. But creating a new derived class, or modifying an +// existing one, shouldn't provoke any *modification* in the base class. +// And if a base class is using some derivative classes somehow, there +// are good chances that such *modification* will be needed. +// +// Extending the base class is not anymore a simple operation, +// this is not good design. +// +// Note that this rule doesn't warn when a base class is using a derived class +// that is nested in t
<TRUNCATED>
