Formatting updates
diff --git a/README.md b/README.md
index c989e57..498c4ca 100644
--- a/README.md
+++ b/README.md
@@ -61,7 +61,9 @@
 BENCHMARK(BM_memcpy)->Range(8, 8<<10);
 ```
 
-By default the arguments in the range are generated in multiples of eight and the command above selects [ 8, 64, 512, 4k, 8k ]. In the following code the range multiplier is changed to multiples of two.
+By default the arguments in the range are generated in multiples of eight and
+the command above selects [ 8, 64, 512, 4k, 8k ]. In the following code the
+range multiplier is changed to multiples of two.
 
 ```c++
 BENCHMARK(BM_memcpy)->RangeMultiplier(2)->Range(8, 8<<10);
@@ -117,7 +119,9 @@
 ```
 
 ### Calculate asymptotic complexity (Big O)
-Asymptotic complexity might be calculated for a family of benchmarks. The following code will calculate the coefficient for the high-order term in the running time and the normalized root-mean square error of string comparison.
+Asymptotic complexity might be calculated for a family of benchmarks. The
+following code will calculate the coefficient for the high-order term in the
+running time and the normalized root-mean square error of string comparison.
 
 ```c++
 static void BM_StringCompare(benchmark::State& state) {
@@ -127,14 +131,15 @@
     benchmark::DoNotOptimize(s1.compare(s2));
 }
 BENCHMARK(BM_StringCompare)
-	->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oN);
+    ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oN);
 ```
 
-As shown in the following invocation, asymptotic complexity might also be calculated automatically.
+As shown in the following invocation, asymptotic complexity might also be
+calculated automatically.
 
 ```c++
 BENCHMARK(BM_StringCompare)
-	->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oAuto);
+    ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oAuto);
 ```
 
 ### Templated benchmarks
diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h
index bf46d97..674b0b5 100644
--- a/include/benchmark/benchmark_api.h
+++ b/include/benchmark/benchmark_api.h
@@ -327,7 +327,7 @@
   // represent the length of N.
   BENCHMARK_ALWAYS_INLINE
   void SetComplexityN(size_t complexity_n) {
-	  complexity_n_ = complexity_n;
+    complexity_n_ = complexity_n;
   }
 
   BENCHMARK_ALWAYS_INLINE
diff --git a/include/benchmark/complexity.h b/include/benchmark/complexity.h
index 82dba82..93b26de 100644
--- a/include/benchmark/complexity.h
+++ b/include/benchmark/complexity.h
@@ -9,14 +9,14 @@
 // complexity for the benchmark. In case oAuto is selected, complexity will be 
 // calculated automatically to the best fit.
 enum BigO {
-	oNone,
-	o1,
-	oN,
-	oNSquared,
-	oNCubed,
-	oLogN,
-	oNLogN,
-	oAuto
+  oNone,
+  o1,
+  oN,
+  oNSquared,
+  oNCubed,
+  oLogN,
+  oNLogN,
+  oAuto
 };
 
 inline std::string GetBigO(BigO complexity) {
@@ -34,9 +34,9 @@
     case o1:
       return "* 1";
     default:
-      return "";      
+      return "";
   }
 }
-   
+
 } // end namespace benchmark
 #endif // COMPLEXITY_H_
diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h
index a912488..4a67f17 100644
--- a/include/benchmark/reporter.h
+++ b/include/benchmark/reporter.h
@@ -67,11 +67,11 @@
 
     // This is set to 0.0 if memory tracing is not enabled.
     double max_heapbytes_used;
-    
+
     // Keep track of arguments to compute asymptotic complexity
     BigO   complexity;
     int complexity_n;
-    
+
     // Inform print function whether the current run is a complexity report
     bool report_big_o;
     bool report_rms;
@@ -90,7 +90,7 @@
   // Note that all the grouped benchmark runs should refer to the same
   // benchmark, thus have the same name.
   virtual void ReportRuns(const std::vector<Run>& report) = 0;
-  
+
   // Called once at the last benchmark in a family of benchmarks, gives information
   // about asymptotic complexity and RMS. 
   // Note that all the benchmark runs in a range should refer to the same benchmark, 
@@ -103,8 +103,9 @@
 
   virtual ~BenchmarkReporter();
 protected:
-  static void ComputeStats(const std::vector<Run> & reports, Run* mean, Run* stddev);
-  static void ComputeBigO(const std::vector<Run> & reports, Run* bigO, Run* rms);
+  static void ComputeStats(const std::vector<Run>& reports,
+                           Run* mean, Run* stddev);
+  static void ComputeBigO(const std::vector<Run>& reports, Run* bigO, Run* rms);
   static TimeUnitMultiplier GetTimeUnitAndMultiplier(TimeUnit unit);
 };
 
diff --git a/src/benchmark.cc b/src/benchmark.cc
index 15274d8..bd9858f 100644
--- a/src/benchmark.cc
+++ b/src/benchmark.cc
@@ -702,7 +702,8 @@
 
 void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
                   BenchmarkReporter* br,
-                  std::vector<BenchmarkReporter::Run>& complexity_reports) EXCLUDES(GetBenchmarkLock()) {
+                  std::vector<BenchmarkReporter::Run>& complexity_reports)
+  EXCLUDES(GetBenchmarkLock()) {
   size_t iters = 1;
 
   std::vector<BenchmarkReporter::Run> reports;
@@ -803,10 +804,10 @@
         report.complexity_n = total.complexity_n;
         report.complexity = b.complexity;
         reports.push_back(report);
-        
-        if(report.complexity != oNone) 
+
+        if(report.complexity != oNone)
           complexity_reports.push_back(report);
-     
+
         break;
       }
 
@@ -830,12 +831,12 @@
     }
   }
   br->ReportRuns(reports);
-  
+
   if((b.complexity != oNone) && b.last_benchmark_instance) {
     br->ReportComplexity(complexity_reports);
     complexity_reports.clear();
   }
-  
+
   if (b.multithreaded) {
     for (std::thread& thread : pool)
       thread.join();
diff --git a/src/console_reporter.cc b/src/console_reporter.cc
index cf78a7f..41c00b9 100644
--- a/src/console_reporter.cc
+++ b/src/console_reporter.cc
@@ -84,11 +84,11 @@
     // We don't report asymptotic complexity data if there was a single run.
     return;
   }
-  
+
   Run big_o_data;
   Run rms_data;
   BenchmarkReporter::ComputeBigO(complexity_reports, &big_o_data, &rms_data);
-    
+
   // Output using PrintRun.
   PrintRunData(big_o_data);
   PrintRunData(rms_data);
@@ -112,7 +112,8 @@
   const char* timeLabel;
   std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(result.time_unit);
 
-  ColorPrintf((result.report_big_o ||result.report_rms) ? COLOR_BLUE : COLOR_GREEN, "%-*s ",
+  ColorPrintf((result.report_big_o ||result.report_rms) ? COLOR_BLUE :
+              COLOR_GREEN, "%-*s ",
               name_field_width_, result.benchmark_name.c_str());
 
   if(result.report_big_o) {
@@ -122,13 +123,11 @@
                 big_o.c_str(),
                 result.cpu_accumulated_time * multiplier,
                 big_o.c_str());
-  }  
-  else if(result.report_rms) {
+  } else if(result.report_rms) {
     ColorPrintf(COLOR_YELLOW, "%10.0f %% %10.0f %% ",
                 result.real_accumulated_time * multiplier * 100,
                 result.cpu_accumulated_time * multiplier * 100);
-  }  
-  else if (result.iterations == 0) {
+  } else if (result.iterations == 0) {
     ColorPrintf(COLOR_YELLOW, "%10.0f %s %10.0f %s ",
                 result.real_accumulated_time * multiplier,
                 timeLabel,
@@ -144,8 +143,9 @@
                 timeLabel);
   }
 
-  if(!result.report_big_o && !result.report_rms)
+  if(!result.report_big_o && !result.report_rms) {
     ColorPrintf(COLOR_CYAN, "%10lld", result.iterations);
+  }
 
   if (!rate.empty()) {
     ColorPrintf(COLOR_DEFAULT, " %*s", 13, rate.c_str());
diff --git a/src/csv_reporter.cc b/src/csv_reporter.cc
index 9bfd66b..9ac74b4 100644
--- a/src/csv_reporter.cc
+++ b/src/csv_reporter.cc
@@ -66,16 +66,16 @@
   }
 }
 
-void CSVReporter::ReportComplexity(const std::vector<Run> & complexity_reports) {
+void CSVReporter::ReportComplexity(const std::vector<Run>& complexity_reports) {
   if (complexity_reports.size() < 2) {
     // We don't report asymptotic complexity data if there was a single run.
     return;
   }
-  
+
   Run big_o_data;
   Run rms_data;
   BenchmarkReporter::ComputeBigO(complexity_reports, &big_o_data, &rms_data);
-  
+
   // Output using PrintRun.
   PrintRunData(big_o_data);
   PrintRunData(rms_data);
@@ -100,19 +100,19 @@
   std::cout << "\"" << name << "\",";
 
   // Do not print iteration on bigO and RMS report
-  if(!run.report_big_o && !run.report_rms)
-    std::cout << run.iterations << ",";
-  else
-    std::cout << ",";
-    
+  if(!run.report_big_o && !run.report_rms) {
+    std::cout << run.iterations;
+  }
+  std::cout << ",";
+
   std::cout << real_time << ",";
   std::cout << cpu_time << ",";
-  
+
   // Do not print timeLabel on RMS report
-  if(!run.report_rms)
-    std::cout << timeLabel << ",";
-  else
-    std::cout << ",";
+  if(!run.report_rms) {
+    std::cout << timeLabel;
+  }
+  std::cout << ",";
 
   if (run.bytes_per_second > 0.0) {
     std::cout << run.bytes_per_second;
diff --git a/src/json_reporter.cc b/src/json_reporter.cc
index c9d9cf1..743a223 100644
--- a/src/json_reporter.cc
+++ b/src/json_reporter.cc
@@ -120,17 +120,17 @@
     // We don't report asymptotic complexity data if there was a single run.
     return;
   }
-  
+
   std::string indent(4, ' ');
   std::ostream& out = std::cout;
   if (!first_report_) {
     out << ",\n";
   }
-  
+
   Run big_o_data;
   Run rms_data;
   BenchmarkReporter::ComputeBigO(complexity_reports, &big_o_data, &rms_data);
-  
+
   // Output using PrintRun.
   out << indent << "{\n";
   PrintRunData(big_o_data);
diff --git a/src/minimal_leastsq.cc b/src/minimal_leastsq.cc
index ea6bd46..2a73887 100644
--- a/src/minimal_leastsq.cc
+++ b/src/minimal_leastsq.cc
@@ -34,17 +34,21 @@
       return n * log2(n);
     case benchmark::o1:
     default:
-      return 1;   
+      return 1;
   }
 }
 
-// Internal function to find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error.
+// Internal function to find the coefficient for the high-order term in the
+// running time, by minimizing the sum of squares of relative error.
 //   - n          : Vector containing the size of the benchmark tests.
 //   - time       : Vector containing the times for the benchmark tests.
 //   - complexity : Fitting curve.
-// For a deeper explanation on the algorithm logic, look the README file at http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
+// For a deeper explanation on the algorithm logic, look the README file at
+// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
 
-LeastSq CalculateLeastSq(const std::vector<int>& n, const std::vector<double>& time, const benchmark::BigO complexity) {
+LeastSq CalculateLeastSq(const std::vector<int>& n,
+                         const std::vector<double>& time,
+                         const benchmark::BigO complexity) {
   CHECK_NE(complexity, benchmark::oAuto);
 
   double sigma_gn = 0;
@@ -64,12 +68,13 @@
   LeastSq result;
   result.complexity = complexity;
 
-  // Calculate complexity. 
+  // Calculate complexity.
   // o1 is treated as an special case
-  if (complexity != benchmark::o1)
+  if (complexity != benchmark::o1) {
     result.coef = sigma_time_gn / sigma_gn_squared;
-  else
+  } else {
     result.coef = sigma_time / n.size();
+  }
 
   // Calculate RMS
   double rms = 0;
@@ -80,36 +85,44 @@
 
   double mean = sigma_time / n.size();
 
-  result.rms = sqrt(rms / n.size()) / mean; // Normalized RMS by the mean of the observed values
+  // Normalized RMS by the mean of the observed values
+  result.rms = sqrt(rms / n.size()) / mean;
 
   return result;
 }
 
-// Find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error.
+// Find the coefficient for the high-order term in the running time, by
+// minimizing the sum of squares of relative error.
 //   - n          : Vector containing the size of the benchmark tests.
 //   - time       : Vector containing the times for the benchmark tests.
-//   - complexity : If different than oAuto, the fitting curve will stick to this one. If it is oAuto, it will be calculated 
-//                  the best fitting curve.
-
-LeastSq MinimalLeastSq(const std::vector<int>& n, const std::vector<double>& time, const benchmark::BigO complexity) {
+//   - complexity : If different than oAuto, the fitting curve will stick to
+//                  this one. If it is oAuto, it will be calculated the best
+//                  fitting curve.
+LeastSq MinimalLeastSq(const std::vector<int>& n,
+                       const std::vector<double>& time,
+                       const benchmark::BigO complexity) {
   CHECK_EQ(n.size(), time.size());
   CHECK_GE(n.size(), 2);  // Do not compute fitting curve is less than two benchmark runs are given
   CHECK_NE(complexity, benchmark::oNone);
 
   if(complexity == benchmark::oAuto) {
-    std::vector<benchmark::BigO> fit_curves = { benchmark::oLogN, benchmark::oN, benchmark::oNLogN, benchmark::oNSquared, benchmark::oNCubed };
+    std::vector<benchmark::BigO> fit_curves = {
+      benchmark::oLogN, benchmark::oN, benchmark::oNLogN, benchmark::oNSquared,
+      benchmark::oNCubed };
 
-    LeastSq best_fit = CalculateLeastSq(n, time, benchmark::o1); // Take o1 as default best fitting curve
+    // Take o1 as default best fitting curve
+    LeastSq best_fit = CalculateLeastSq(n, time, benchmark::o1);
 
     // Compute all possible fitting curves and stick to the best one
     for (const auto& fit : fit_curves) {
       LeastSq current_fit = CalculateLeastSq(n, time, fit);
-      if (current_fit.rms < best_fit.rms)
+      if (current_fit.rms < best_fit.rms) {
         best_fit = current_fit;
+      }
     }
 
     return best_fit;
   }
-  else
-    return CalculateLeastSq(n, time, complexity);
-}
\ No newline at end of file
+
+  return CalculateLeastSq(n, time, complexity);
+}
diff --git a/src/minimal_leastsq.h b/src/minimal_leastsq.h
index 6dcb894..0dc12b7 100644
--- a/src/minimal_leastsq.h
+++ b/src/minimal_leastsq.h
@@ -23,11 +23,13 @@
 #include <vector>
 
 // This data structure will contain the result returned by MinimalLeastSq
-//   - coef        : Estimated coeficient for the high-order term as interpolated from data.
+//   - coef        : Estimated coeficient for the high-order term as
+//                   interpolated from data.
 //   - rms         : Normalized Root Mean Squared Error.
-//   - complexity  : Scalability form (e.g. oN, oNLogN). In case a scalability form has been provided to MinimalLeastSq
-//                   this will return the same value. In case BigO::oAuto has been selected, this parameter will return the 
-//                   best fitting curve detected.
+//   - complexity  : Scalability form (e.g. oN, oNLogN). In case a scalability
+//                   form has been provided to MinimalLeastSq this will return
+//                   the same value. In case BigO::oAuto has been selected, this
+//                   parameter will return the best fitting curve detected.
 
 struct LeastSq {
   LeastSq() :
@@ -37,10 +39,13 @@
 
   double coef;
   double rms;
-  benchmark::BigO   complexity;
+  benchmark::BigO complexity;
 };
 
-// Find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error.
-LeastSq MinimalLeastSq(const std::vector<int>& n, const std::vector<double>& time, const benchmark::BigO complexity = benchmark::oAuto);
+// Find the coefficient for the high-order term in the running time, by
+// minimizing the sum of squares of relative error.
+LeastSq MinimalLeastSq(const std::vector<int>& n,
+                       const std::vector<double>& time,
+                       const benchmark::BigO complexity = benchmark::oAuto);
 
 #endif
diff --git a/src/reporter.cc b/src/reporter.cc
index 544df87..2830fa1 100644
--- a/src/reporter.cc
+++ b/src/reporter.cc
@@ -82,7 +82,9 @@
 void BenchmarkReporter::ComputeBigO(
     const std::vector<Run>& reports,
     Run* big_o, Run* rms) {
-  CHECK(reports.size() >= 2) << "Cannot compute asymptotic complexity for less than 2 reports";
+  CHECK(reports.size() >= 2)
+      << "Cannot compute asymptotic complexity for fewer than 2 reports";
+
   // Accumulators.
   std::vector<int> n;
   std::vector<double> real_time;
@@ -90,21 +92,21 @@
 
   // Populate the accumulators.
   for (const Run& run : reports) {
-    n.push_back(run.complexity_n); 
+    n.push_back(run.complexity_n);
     real_time.push_back(run.real_accumulated_time/run.iterations);
     cpu_time.push_back(run.cpu_accumulated_time/run.iterations);
   }
-  
+
   LeastSq result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
-  
+
   // result_cpu.complexity is passed as parameter to result_real because in case
-  // reports[0].complexity is oAuto, the noise on the measured data could make 
-  // the best fit function of Cpu and Real differ. In order to solve this, we take
-  // the best fitting function for the Cpu, and apply it to Real data.
+  // reports[0].complexity is oAuto, the noise on the measured data could make
+  // the best fit function of Cpu and Real differ. In order to solve this, we
+  // take the best fitting function for the Cpu, and apply it to Real data.
   LeastSq result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
 
   std::string benchmark_name = reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/'));
-  
+
   // Get the data from the accumulator to BenchmarkReporter::Run's.
   big_o->benchmark_name = benchmark_name + "_BigO";
   big_o->iterations = 0;
@@ -115,7 +117,8 @@
 
   double multiplier;
   const char* time_label;
-  std::tie(time_label, multiplier) = GetTimeUnitAndMultiplier(reports[0].time_unit);
+  std::tie(time_label, multiplier) =
+      GetTimeUnitAndMultiplier(reports[0].time_unit);
 
   // Only add label to mean/stddev if it is same for all runs
   big_o->report_label = reports[0].report_label;
diff --git a/test/complexity_test.cc b/test/complexity_test.cc
index e454ee4..b8cd440 100644
--- a/test/complexity_test.cc
+++ b/test/complexity_test.cc
@@ -40,7 +40,7 @@
 }
 BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oN);
 BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oAuto);
-   
+
 static void BM_Complexity_O_N_Squared(benchmark::State& state) {
   std::string s1(state.range_x(), '-');
   std::string s2(state.range_x(), '-');
@@ -54,7 +54,7 @@
     }
 }
 BENCHMARK(BM_Complexity_O_N_Squared) -> Range(1, 1<<8) -> Complexity(benchmark::oNSquared);
-    
+
 static void BM_Complexity_O_N_Cubed(benchmark::State& state) {
   std::string s1(state.range_x(), '-');
   std::string s2(state.range_x(), '-');
@@ -81,7 +81,7 @@
   }
   state.SetComplexityN(state.range_x());
 }
-BENCHMARK(BM_Complexity_O_log_N) 
+BENCHMARK(BM_Complexity_O_log_N)
     -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oLogN);
 
 static void BM_Complexity_O_N_log_N(benchmark::State& state) {
@@ -102,4 +102,4 @@
 BENCHMARK(BM_Extreme_Cases) -> Complexity(benchmark::oNLogN);
 BENCHMARK(BM_Extreme_Cases) -> Arg(42) -> Complexity(benchmark::oAuto);
 
-BENCHMARK_MAIN()
\ No newline at end of file
+BENCHMARK_MAIN()