Add divergence analysis to linter (#4465)

Currently, handles promotion of divergence due to reconvergence rules, but doesn't handle "late merges" caused by a later-than-necessary declared merge block.

Co-authored-by: Jakub Kuderski <kubak@google.com>
diff --git a/BUILD.bazel b/BUILD.bazel
index 4af0ce5..68e612a 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -236,7 +236,7 @@
 
 cc_library(
     name = "spirv_tools_lint",
-    srcs = glob(["source/lint/*.cpp"]),
+    srcs = glob(["source/lint/*.cpp", "source/lint/*.h"]),
     hdrs = ["include/spirv-tools/linter.hpp"],
     copts = COMMON_COPTS,
     linkstatic = 1,
diff --git a/source/lint/CMakeLists.txt b/source/lint/CMakeLists.txt
index d996e3c..f9cae28 100644
--- a/source/lint/CMakeLists.txt
+++ b/source/lint/CMakeLists.txt
@@ -12,7 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 set(SPIRV_TOOLS_LINT_SOURCES
+  divergence_analysis.h
+
   linter.cpp
+  divergence_analysis.cpp
 )
 
 if(MSVC AND (NOT ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")))
diff --git a/source/lint/divergence_analysis.cpp b/source/lint/divergence_analysis.cpp
new file mode 100644
index 0000000..b5a72b4
--- /dev/null
+++ b/source/lint/divergence_analysis.cpp
@@ -0,0 +1,245 @@
+// Copyright (c) 2021 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "source/lint/divergence_analysis.h"
+
+#include "source/opt/basic_block.h"
+#include "source/opt/control_dependence.h"
+#include "source/opt/dataflow.h"
+#include "source/opt/function.h"
+#include "source/opt/instruction.h"
+#include "spirv/unified1/spirv.h"
+
+namespace spvtools {
+namespace lint {
+
+void DivergenceAnalysis::EnqueueSuccessors(opt::Instruction* inst) {
+  // Enqueue control dependents of block, if applicable.
+  // There are two ways for a dependence source to be updated:
+  // 1. control -> control: source block is marked divergent.
+  // 2. data -> control: branch condition is marked divergent.
+  uint32_t block_id;
+  if (inst->IsBlockTerminator()) {
+    block_id = context().get_instr_block(inst)->id();
+  } else if (inst->opcode() == SpvOpLabel) {
+    block_id = inst->result_id();
+    opt::BasicBlock* bb = context().cfg()->block(block_id);
+    // Only enqueue phi instructions, as other uses don't affect divergence.
+    bb->ForEachPhiInst([this](opt::Instruction* phi) { Enqueue(phi); });
+  } else {
+    opt::ForwardDataFlowAnalysis::EnqueueUsers(inst);
+    return;
+  }
+  if (!cd_.HasBlock(block_id)) {
+    return;
+  }
+  for (const spvtools::opt::ControlDependence& dep :
+       cd_.GetDependenceTargets(block_id)) {
+    opt::Instruction* target_inst =
+        context().cfg()->block(dep.target_bb_id())->GetLabelInst();
+    Enqueue(target_inst);
+  }
+}
+
+opt::DataFlowAnalysis::VisitResult DivergenceAnalysis::Visit(
+    opt::Instruction* inst) {
+  if (inst->opcode() == SpvOpLabel) {
+    return VisitBlock(inst->result_id());
+  } else {
+    return VisitInstruction(inst);
+  }
+}
+
+opt::DataFlowAnalysis::VisitResult DivergenceAnalysis::VisitBlock(uint32_t id) {
+  if (!cd_.HasBlock(id)) {
+    return opt::DataFlowAnalysis::VisitResult::kResultFixed;
+  }
+  DivergenceLevel& cur_level = divergence_[id];
+  if (cur_level == DivergenceLevel::kDivergent) {
+    return opt::DataFlowAnalysis::VisitResult::kResultFixed;
+  }
+  DivergenceLevel orig = cur_level;
+  for (const spvtools::opt::ControlDependence& dep :
+       cd_.GetDependenceSources(id)) {
+    if (divergence_[dep.source_bb_id()] > cur_level) {
+      cur_level = divergence_[dep.source_bb_id()];
+      divergence_source_[id] = dep.source_bb_id();
+    } else if (dep.source_bb_id() != 0) {
+      uint32_t condition_id = dep.GetConditionID(*context().cfg());
+      DivergenceLevel dep_level = divergence_[condition_id];
+      // Check if we are along the chain of unconditional branches starting from
+      // the branch target.
+      if (follow_unconditional_branches_[dep.branch_target_bb_id()] !=
+          follow_unconditional_branches_[dep.target_bb_id()]) {
+        // We must have reconverged in order to reach this block.
+        // Promote partially uniform to divergent.
+        if (dep_level == DivergenceLevel::kPartiallyUniform) {
+          dep_level = DivergenceLevel::kDivergent;
+        }
+      }
+      if (dep_level > cur_level) {
+        cur_level = dep_level;
+        divergence_source_[id] = condition_id;
+        divergence_dependence_source_[id] = dep.source_bb_id();
+      }
+    }
+  }
+  return cur_level > orig ? VisitResult::kResultChanged
+                          : VisitResult::kResultFixed;
+}
+
+opt::DataFlowAnalysis::VisitResult DivergenceAnalysis::VisitInstruction(
+    opt::Instruction* inst) {
+  if (inst->IsBlockTerminator()) {
+    // This is called only when the condition has changed, so return changed.
+    return VisitResult::kResultChanged;
+  }
+  if (!inst->HasResultId()) {
+    return VisitResult::kResultFixed;
+  }
+  uint32_t id = inst->result_id();
+  DivergenceLevel& cur_level = divergence_[id];
+  if (cur_level == DivergenceLevel::kDivergent) {
+    return opt::DataFlowAnalysis::VisitResult::kResultFixed;
+  }
+  DivergenceLevel orig = cur_level;
+  cur_level = ComputeInstructionDivergence(inst);
+  return cur_level > orig ? VisitResult::kResultChanged
+                          : VisitResult::kResultFixed;
+}
+
+DivergenceAnalysis::DivergenceLevel
+DivergenceAnalysis::ComputeInstructionDivergence(opt::Instruction* inst) {
+  // TODO(kuhar): Check to see if inst is decorated with Uniform or UniformId
+  // and use that to short circuit other checks. Uniform is for subgroups which
+  // would satisfy derivative groups too. UniformId takes a scope, so if it is
+  // subgroup or greater it could satisfy derivative group and
+  // Device/QueueFamily could satisfy fully uniform.
+  uint32_t id = inst->result_id();
+  // Handle divergence roots.
+  if (inst->opcode() == SpvOpFunctionParameter) {
+    divergence_source_[id] = 0;
+    return divergence_[id] = DivergenceLevel::kDivergent;
+  } else if (inst->IsLoad()) {
+    spvtools::opt::Instruction* var = inst->GetBaseAddress();
+    if (var->opcode() != SpvOpVariable) {
+      // Assume divergent.
+      divergence_source_[id] = 0;
+      return DivergenceLevel::kDivergent;
+    }
+    DivergenceLevel ret = ComputeVariableDivergence(var);
+    if (ret > DivergenceLevel::kUniform) {
+      divergence_source_[inst->result_id()] = 0;
+    }
+    return divergence_[id] = ret;
+  }
+  // Get the maximum divergence of the operands.
+  DivergenceLevel ret = DivergenceLevel::kUniform;
+  inst->ForEachInId([this, inst, &ret](const uint32_t* op) {
+    if (!op) return;
+    if (divergence_[*op] > ret) {
+      divergence_source_[inst->result_id()] = *op;
+      ret = divergence_[*op];
+    }
+  });
+  divergence_[inst->result_id()] = ret;
+  return ret;
+}
+
+DivergenceAnalysis::DivergenceLevel
+DivergenceAnalysis::ComputeVariableDivergence(opt::Instruction* var) {
+  uint32_t type_id = var->type_id();
+  spvtools::opt::analysis::Pointer* type =
+      context().get_type_mgr()->GetType(type_id)->AsPointer();
+  assert(type != nullptr);
+  uint32_t def_id = var->result_id();
+  DivergenceLevel ret;
+  switch (type->storage_class()) {
+    case SpvStorageClassFunction:
+    case SpvStorageClassGeneric:
+    case SpvStorageClassAtomicCounter:
+    case SpvStorageClassStorageBuffer:
+    case SpvStorageClassPhysicalStorageBuffer:
+    case SpvStorageClassOutput:
+    case SpvStorageClassWorkgroup:
+    case SpvStorageClassImage:  // Image atomics probably aren't uniform.
+    case SpvStorageClassPrivate:
+      ret = DivergenceLevel::kDivergent;
+      break;
+    case SpvStorageClassInput:
+      ret = DivergenceLevel::kDivergent;
+      // If this variable has a Flat decoration, it is partially uniform.
+      // TODO(kuhar): Track access chain indices and also consider Flat members
+      // of a structure.
+      context().get_decoration_mgr()->WhileEachDecoration(
+          def_id, SpvDecorationFlat, [&ret](const opt::Instruction&) {
+            ret = DivergenceLevel::kPartiallyUniform;
+            return false;
+          });
+      break;
+    case SpvStorageClassUniformConstant:
+      // May be a storage image which is also written to; mark those as
+      // divergent.
+      if (!var->IsVulkanStorageImage() || var->IsReadOnlyPointer()) {
+        ret = DivergenceLevel::kUniform;
+      } else {
+        ret = DivergenceLevel::kDivergent;
+      }
+      break;
+    case SpvStorageClassUniform:
+    case SpvStorageClassPushConstant:
+    case SpvStorageClassCrossWorkgroup:  // Not for shaders; default uniform.
+    default:
+      ret = DivergenceLevel::kUniform;
+      break;
+  }
+  return ret;
+}
+
+void DivergenceAnalysis::Setup(opt::Function* function) {
+  // TODO(kuhar): Run functions called by |function| so we can detect
+  // reconvergence caused by multiple returns.
+  cd_.ComputeControlDependenceGraph(
+      *context().cfg(), *context().GetPostDominatorAnalysis(function));
+  context().cfg()->ForEachBlockInPostOrder(
+      function->entry().get(), [this](const opt::BasicBlock* bb) {
+        uint32_t id = bb->id();
+        if (bb->terminator() == nullptr ||
+            bb->terminator()->opcode() != SpvOpBranch) {
+          follow_unconditional_branches_[id] = id;
+        } else {
+          uint32_t target_id = bb->terminator()->GetSingleWordInOperand(0);
+          // Target is guaranteed to have been visited before us in postorder.
+          follow_unconditional_branches_[id] =
+              follow_unconditional_branches_[target_id];
+        }
+      });
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         DivergenceAnalysis::DivergenceLevel level) {
+  switch (level) {
+    case DivergenceAnalysis::DivergenceLevel::kUniform:
+      return os << "uniform";
+    case DivergenceAnalysis::DivergenceLevel::kPartiallyUniform:
+      return os << "partially uniform";
+    case DivergenceAnalysis::DivergenceLevel::kDivergent:
+      return os << "divergent";
+    default:
+      return os << "<invalid divergence level>";
+  }
+}
+
+}  // namespace lint
+}  // namespace spvtools
diff --git a/source/lint/divergence_analysis.h b/source/lint/divergence_analysis.h
new file mode 100644
index 0000000..4d595ec
--- /dev/null
+++ b/source/lint/divergence_analysis.h
@@ -0,0 +1,163 @@
+// Copyright (c) 2021 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SOURCE_LINT_DIVERGENCE_ANALYSIS_H_
+#define SOURCE_LINT_DIVERGENCE_ANALYSIS_H_
+
+#include <cstdint>
+#include <ostream>
+#include <unordered_map>
+
+#include "source/opt/basic_block.h"
+#include "source/opt/control_dependence.h"
+#include "source/opt/dataflow.h"
+#include "source/opt/function.h"
+#include "source/opt/instruction.h"
+
+namespace spvtools {
+namespace lint {
+
+// Computes the static divergence level for blocks (control flow) and values.
+//
+// A value is uniform if all threads that execute it are guaranteed to have the
+// same value. Similarly, a value is partially uniform if this is true only
+// within each derivative group. If neither apply, it is divergent.
+//
+// Control flow through a block is uniform if for any possible execution and
+// point in time, all threads are executing it, or no threads are executing it.
+// In particular, it is never possible for some threads to be inside the block
+// and some threads not executing.
+// TODO(kuhar): Clarify the difference between uniform, divergent, and
+// partially-uniform execution in this analysis.
+//
+// Caveat:
+// As we use control dependence to determine how divergence is propagated, this
+// analysis can be overly permissive when the merge block for a conditional
+// branch or switch is later than (strictly postdominates) the expected merge
+// block, which is the immediate postdominator. However, this is not expected to
+// be a problem in practice, given that SPIR-V is generally output by compilers
+// and other automated tools, which would assign the earliest possible merge
+// block, rather than written by hand.
+// TODO(kuhar): Handle late merges.
+class DivergenceAnalysis : public opt::ForwardDataFlowAnalysis {
+ public:
+  // The tightest (most uniform) level of divergence that can be determined
+  // statically for a value or control flow for a block.
+  //
+  // The values are ordered such that A > B means that A is potentially more
+  // divergent than B.
+  // TODO(kuhar): Rename |PartiallyUniform' to something less confusing. For
+  // example, the enum could be based on scopes.
+  enum class DivergenceLevel {
+    // The value or control flow is uniform across the entire invocation group.
+    kUniform = 0,
+    // The value or control flow is uniform across the derivative group, but not
+    // the invocation group.
+    kPartiallyUniform = 1,
+    // The value or control flow is not statically uniform.
+    kDivergent = 2,
+  };
+
+  DivergenceAnalysis(opt::IRContext& context)
+      : ForwardDataFlowAnalysis(context, LabelPosition::kLabelsAtEnd) {}
+
+  // Returns the divergence level for the given value (non-label instructions),
+  // or control flow for the given block.
+  DivergenceLevel GetDivergenceLevel(uint32_t id) {
+    auto it = divergence_.find(id);
+    if (it == divergence_.end()) {
+      return DivergenceLevel::kUniform;
+    }
+    return it->second;
+  }
+
+  // Returns the divergence source for the given id. The following types of
+  // divergence flows from A to B are possible:
+  //
+  // data -> data: A is used as an operand in the definition of B.
+  // data -> control: B is control-dependent on a branch with condition A.
+  // control -> data: B is a OpPhi instruction in which A is a block operand.
+  // control -> control: B is control-dependent on A.
+  uint32_t GetDivergenceSource(uint32_t id) {
+    auto it = divergence_source_.find(id);
+    if (it == divergence_source_.end()) {
+      return 0;
+    }
+    return it->second;
+  }
+
+  // Returns the dependence source for the control dependence for the given id.
+  // This only exists for data -> control edges.
+  //
+  // In other words, if block 2 is dependent on block 1 due to value 3 (e.g.
+  // block 1 terminates with OpBranchConditional %3 %2 %4):
+  // * GetDivergenceSource(2) = 3
+  // * GetDivergenceDependenceSource(2) = 1
+  //
+  // Returns 0 if not applicable.
+  uint32_t GetDivergenceDependenceSource(uint32_t id) {
+    auto it = divergence_dependence_source_.find(id);
+    if (it == divergence_dependence_source_.end()) {
+      return 0;
+    }
+    return it->second;
+  }
+
+  void InitializeWorklist(opt::Function* function,
+                          bool is_first_iteration) override {
+    // Since |EnqueueSuccessors| is complete, we only need one pass.
+    if (is_first_iteration) {
+      Setup(function);
+      opt::ForwardDataFlowAnalysis::InitializeWorklist(function, true);
+    }
+  }
+
+  void EnqueueSuccessors(opt::Instruction* inst) override;
+
+  VisitResult Visit(opt::Instruction* inst) override;
+
+ private:
+  VisitResult VisitBlock(uint32_t id);
+  VisitResult VisitInstruction(opt::Instruction* inst);
+
+  // Computes the divergence level for the result of the given instruction
+  // based on the current state of the analysis. This is always an
+  // underapproximation, which will be improved as the analysis proceeds.
+  DivergenceLevel ComputeInstructionDivergence(opt::Instruction* inst);
+
+  // Computes the divergence level for a variable, which is used for loads.
+  DivergenceLevel ComputeVariableDivergence(opt::Instruction* var);
+
+  // Initializes data structures for performing dataflow on the given function.
+  void Setup(opt::Function* function);
+
+  std::unordered_map<uint32_t, DivergenceLevel> divergence_;
+  std::unordered_map<uint32_t, uint32_t> divergence_source_;
+  std::unordered_map<uint32_t, uint32_t> divergence_dependence_source_;
+
+  // Stores the result of following unconditional branches starting from the
+  // given block. This is used to detect when reconvergence needs to be
+  // accounted for.
+  std::unordered_map<uint32_t, uint32_t> follow_unconditional_branches_;
+
+  opt::ControlDependenceAnalysis cd_;
+};
+
+std::ostream& operator<<(std::ostream& os,
+                         DivergenceAnalysis::DivergenceLevel level);
+
+}  // namespace lint
+}  // namespace spvtools
+
+#endif  // SOURCE_LINT_DIVERGENCE_ANALYSIS_H_
diff --git a/test/lint/CMakeLists.txt b/test/lint/CMakeLists.txt
index b9ef208..09bc6d3 100644
--- a/test/lint/CMakeLists.txt
+++ b/test/lint/CMakeLists.txt
@@ -13,6 +13,6 @@
 # limitations under the License.
 
 add_spvtools_unittest(TARGET lint
-  SRCS placeholder_test.cpp
+  SRCS divergence_analysis_test.cpp
   LIBS SPIRV-Tools-lint SPIRV-Tools-opt
 )
diff --git a/test/lint/divergence_analysis_test.cpp b/test/lint/divergence_analysis_test.cpp
new file mode 100644
index 0000000..36cd32d
--- /dev/null
+++ b/test/lint/divergence_analysis_test.cpp
@@ -0,0 +1,700 @@
+// Copyright (c) 2021 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "source/lint/divergence_analysis.h"
+
+#include <string>
+
+#include "gtest/gtest.h"
+#include "source/opt/build_module.h"
+#include "source/opt/ir_context.h"
+#include "source/opt/module.h"
+#include "spirv-tools/libspirv.h"
+
+namespace spvtools {
+namespace lint {
+namespace {
+
+void CLIMessageConsumer(spv_message_level_t level, const char*,
+                        const spv_position_t& position, const char* message) {
+  switch (level) {
+    case SPV_MSG_FATAL:
+    case SPV_MSG_INTERNAL_ERROR:
+    case SPV_MSG_ERROR:
+      std::cerr << "error: line " << position.index << ": " << message
+                << std::endl;
+      break;
+    case SPV_MSG_WARNING:
+      std::cout << "warning: line " << position.index << ": " << message
+                << std::endl;
+      break;
+    case SPV_MSG_INFO:
+      std::cout << "info: line " << position.index << ": " << message
+                << std::endl;
+      break;
+    default:
+      break;
+  }
+}
+
+class DivergenceTest : public ::testing::Test {
+ protected:
+  std::unique_ptr<opt::IRContext> context_;
+  std::unique_ptr<DivergenceAnalysis> divergence_;
+
+  void Build(std::string text, uint32_t function_id = 1) {
+    context_ = BuildModule(SPV_ENV_UNIVERSAL_1_0, CLIMessageConsumer, text,
+                           SPV_TEXT_TO_BINARY_OPTION_PRESERVE_NUMERIC_IDS);
+    ASSERT_NE(nullptr, context_.get());
+    opt::Module* module = context_->module();
+    ASSERT_NE(nullptr, module);
+    // First function should have the given ID.
+    ASSERT_NE(module->begin(), module->end());
+    opt::Function* function = &*module->begin();
+    ASSERT_EQ(function->result_id(), function_id);
+    divergence_.reset(new DivergenceAnalysis(*context_));
+    divergence_->Run(function);
+  }
+};
+
+// Makes assertions a bit shorter.
+using Level = DivergenceAnalysis::DivergenceLevel;
+
+namespace {
+std::string Preamble() {
+  return R"(
+               OpCapability Shader
+               OpMemoryModel Logical GLSL450
+               OpEntryPoint Fragment %1 "main" %x %y
+	       OpExecutionMode %1 OriginLowerLeft
+               OpDecorate %y Flat
+       %void = OpTypeVoid
+     %void_f = OpTypeFunction %void
+       %bool = OpTypeBool
+      %float = OpTypeFloat 32
+      %false = OpConstantFalse %bool
+       %true = OpConstantTrue %bool
+       %zero = OpConstant %float 0
+        %one = OpConstant %float 1
+        %x_t = OpTypePointer Input %float
+          %x = OpVariable %x_t Input
+          %y = OpVariable %x_t Input
+          %1 = OpFunction %void None %void_f
+  )";
+}
+}  // namespace
+
+TEST_F(DivergenceTest, SimpleTest) {
+  // pseudocode:
+  //     %10:
+  //     %11 = load x
+  //     if (%12 = (%11 < 0)) {
+  //       %13:
+  //       // do nothing
+  //     }
+  //     %14:
+  //     return
+  ASSERT_NO_FATAL_FAILURE(Build(Preamble() + R"(
+         %10 = OpLabel
+         %11 = OpLoad %float %x
+         %12 = OpFOrdLessThan %bool %11 %zero
+               OpSelectionMerge %14 None
+               OpBranchConditional %12 %13 %14
+         %13 = OpLabel
+               OpBranch %14
+         %14 = OpLabel
+               OpReturn
+               OpFunctionEnd
+  )"));
+  // Control flow divergence.
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(10));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(13));
+  EXPECT_EQ(12, divergence_->GetDivergenceSource(13));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(14));
+  // Value divergence.
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(11));
+  EXPECT_EQ(0, divergence_->GetDivergenceSource(11));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(12));
+  EXPECT_EQ(11, divergence_->GetDivergenceSource(12));
+}
+
+TEST_F(DivergenceTest, FlowTypesTest) {
+  // pseudocode:
+  //   %10:
+  //   %11 = load x
+  //   %12 = x < 0 // data -> data
+  //   if (%12) {
+  //     %13: // data -> control
+  //     if (true) {
+  //       %14: // control -> control
+  //     }
+  //     %15:
+  //     %16 = 1
+  //   } else {
+  //     %17:
+  //     %18 = 2
+  //   }
+  //   %19:
+  //   %19 = phi(%16 from %15, %18 from %17) // control -> data
+  //   return
+  ASSERT_NO_FATAL_FAILURE(Build(Preamble() + R"(
+         %10 = OpLabel
+         %11 = OpLoad %float %x
+         %12 = OpFOrdLessThan %bool %11 %zero
+               OpSelectionMerge %19 None
+               OpBranchConditional %12 %13 %17
+         %13 = OpLabel
+               OpSelectionMerge %15 None
+               OpBranchConditional %true %14 %15
+         %14 = OpLabel
+               OpBranch %15
+         %15 = OpLabel
+         %16 = OpFAdd %float %zero %zero
+               OpBranch %19
+         %17 = OpLabel
+         %18 = OpFAdd %float %zero %one
+               OpBranch %19
+         %19 = OpLabel
+         %20 = OpPhi %float %16 %15 %18 %17
+               OpReturn
+               OpFunctionEnd
+  )"));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(10));
+
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(11));
+  EXPECT_EQ(0, divergence_->GetDivergenceSource(11));
+
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(12));
+  EXPECT_EQ(11, divergence_->GetDivergenceSource(12));
+
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(13));
+  EXPECT_EQ(12, divergence_->GetDivergenceSource(13));
+  EXPECT_EQ(10, divergence_->GetDivergenceDependenceSource(13));
+
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(14));
+  EXPECT_EQ(13, divergence_->GetDivergenceSource(14));
+
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(15));
+  EXPECT_EQ(12, divergence_->GetDivergenceSource(15));
+  EXPECT_EQ(10, divergence_->GetDivergenceDependenceSource(15));
+
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(16));
+
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(17));
+  EXPECT_EQ(12, divergence_->GetDivergenceSource(17));
+  EXPECT_EQ(10, divergence_->GetDivergenceDependenceSource(17));
+
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(18));
+
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(19));
+
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(20));
+  EXPECT_TRUE(divergence_->GetDivergenceSource(20) == 15 ||
+              divergence_->GetDivergenceDependenceSource(20) == 17)
+      << "Got: " << divergence_->GetDivergenceDependenceSource(20);
+}
+
+TEST_F(DivergenceTest, ExitDependenceTest) {
+  // pseudocode:
+  //   %10:
+  //   %11 = load x
+  //   %12 = %11 < 0
+  //   %13:
+  //   do {
+  //     %14:
+  //     if (%12) {
+  //       %15:
+  //       continue;
+  //     }
+  //     %16:
+  //     %17:
+  //     continue;
+  //   } %18: while(false);
+  //   %19:
+  //   return
+  ASSERT_NO_FATAL_FAILURE(Build(Preamble() + R"(
+         %10 = OpLabel
+         %11 = OpLoad %float %x
+         %12 = OpFOrdLessThan %bool %11 %zero ; data -> data
+               OpBranch %13
+         %13 = OpLabel
+               OpLoopMerge %19 %18 None
+               OpBranch %14
+         %14 = OpLabel
+               OpSelectionMerge %16 None
+               OpBranchConditional %12 %15 %16
+         %15 = OpLabel
+               OpBranch %18  ; continue
+         %16 = OpLabel
+               OpBranch %17
+         %17 = OpLabel
+               OpBranch %18  ; continue
+         %18 = OpLabel
+               OpBranchConditional %false %13 %19
+         %19 = OpLabel
+               OpReturn
+               OpFunctionEnd
+  )"));
+
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(10));
+
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(11));
+  EXPECT_EQ(0, divergence_->GetDivergenceSource(11));
+
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(12));
+  EXPECT_EQ(11, divergence_->GetDivergenceSource(12));
+
+  // Since both branches continue, there's no divergent control dependence
+  // to 13.
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(13));
+
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(14));
+
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(15));
+  EXPECT_EQ(12, divergence_->GetDivergenceSource(15));
+  EXPECT_EQ(14, divergence_->GetDivergenceDependenceSource(15));
+
+  // These two blocks are outside the if but are still control dependent.
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(16));
+  EXPECT_EQ(12, divergence_->GetDivergenceSource(16));
+  EXPECT_EQ(14, divergence_->GetDivergenceDependenceSource(16));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(17));
+  EXPECT_EQ(12, divergence_->GetDivergenceSource(17));
+  EXPECT_EQ(14, divergence_->GetDivergenceDependenceSource(17));
+
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(18));
+
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(19));
+}
+
+TEST_F(DivergenceTest, ReconvergencePromotionTest) {
+  // pseudocode:
+  // %10:
+  // %11 = load y
+  // %12 = %11 < 0
+  // if (%12) {
+  //   %13:
+  //   %14:
+  //   %15:
+  //   if (true) {
+  //     %16:
+  //   }
+  //   // Reconvergence *not* guaranteed as
+  //   // control is not uniform on the IG level
+  //   // at %15.
+  //   %17:
+  //   %18:
+  //   %19:
+  //   %20 = load x
+  // }
+  // %21:
+  // %22 = phi(%11, %20)
+  // return
+  ASSERT_NO_FATAL_FAILURE(Build(Preamble() + R"(
+         %10 = OpLabel
+         %11 = OpLoad %float %y
+         %12 = OpFOrdLessThan %bool %11 %zero
+               OpSelectionMerge %21 None
+               OpBranchConditional %12 %13 %21
+         %13 = OpLabel
+               OpBranch %14
+         %14 = OpLabel
+               OpBranch %15
+         %15 = OpLabel
+               OpSelectionMerge %17 None
+               OpBranchConditional %true %16 %17
+         %16 = OpLabel
+               OpBranch %17
+         %17 = OpLabel
+               OpBranch %18
+         %18 = OpLabel
+               OpBranch %19
+         %19 = OpLabel
+         %20 = OpLoad %float %y
+               OpBranch %21
+         %21 = OpLabel
+         %22 = OpPhi %float %11 %10 %20 %19
+               OpReturn
+               OpFunctionEnd
+  )"));
+  ASSERT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(10));
+  ASSERT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(21));
+
+  ASSERT_EQ(Level::kPartiallyUniform, divergence_->GetDivergenceLevel(11));
+  ASSERT_EQ(0, divergence_->GetDivergenceSource(11));
+  ASSERT_EQ(Level::kPartiallyUniform, divergence_->GetDivergenceLevel(12));
+  ASSERT_EQ(11, divergence_->GetDivergenceSource(12));
+  ASSERT_EQ(Level::kPartiallyUniform, divergence_->GetDivergenceLevel(13));
+  ASSERT_EQ(12, divergence_->GetDivergenceSource(13));
+  ASSERT_EQ(10, divergence_->GetDivergenceDependenceSource(13));
+  ASSERT_EQ(Level::kPartiallyUniform, divergence_->GetDivergenceLevel(14));
+  ASSERT_EQ(12, divergence_->GetDivergenceSource(14));
+  ASSERT_EQ(10, divergence_->GetDivergenceDependenceSource(14));
+  ASSERT_EQ(Level::kPartiallyUniform, divergence_->GetDivergenceLevel(15));
+  ASSERT_EQ(12, divergence_->GetDivergenceSource(15));
+  ASSERT_EQ(10, divergence_->GetDivergenceDependenceSource(15));
+  ASSERT_EQ(Level::kPartiallyUniform, divergence_->GetDivergenceLevel(16));
+  ASSERT_EQ(15, divergence_->GetDivergenceSource(16));
+
+  ASSERT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(17));
+  ASSERT_EQ(12, divergence_->GetDivergenceSource(17));
+  ASSERT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(18));
+  ASSERT_EQ(12, divergence_->GetDivergenceSource(18));
+  ASSERT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(19));
+  ASSERT_EQ(12, divergence_->GetDivergenceSource(19));
+
+  ASSERT_EQ(Level::kPartiallyUniform, divergence_->GetDivergenceLevel(20));
+  ASSERT_EQ(0, divergence_->GetDivergenceSource(20));
+  ASSERT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(22));
+  ASSERT_EQ(19, divergence_->GetDivergenceSource(22));
+  ASSERT_EQ(10, divergence_->GetDivergenceDependenceSource(15));
+}
+
+TEST_F(DivergenceTest, FunctionCallTest) {
+  // pseudocode:
+  // %2() {
+  //   %20:
+  //   %21 = load x
+  //   %22 = %21 < 0
+  //   if (%22) {
+  //     %23:
+  //     return
+  //   }
+  //   %24:
+  //   return
+  // }
+  //
+  // main() {
+  //   %10:
+  //   %11 = %2();
+  //   // Reconvergence *not* guaranteed.
+  //   %12:
+  //   return
+  // }
+  ASSERT_NO_FATAL_FAILURE(Build(Preamble() + R"(
+         %10 = OpLabel
+         %11 = OpFunctionCall %void %2
+               OpBranch %12
+         %12 = OpLabel
+               OpReturn
+               OpFunctionEnd
+
+          %2 = OpFunction %void None %void_f
+         %20 = OpLabel
+         %21 = OpLoad %float %x
+         %22 = OpFOrdLessThan %bool %21 %zero
+               OpSelectionMerge %24 None
+               OpBranchConditional %22 %23 %24
+         %23 = OpLabel
+               OpReturn
+         %24 = OpLabel
+               OpReturn
+               OpFunctionEnd
+  )"));
+
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(10));
+  // Conservatively assume function return value is uniform.
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(11));
+  // TODO(dongja): blocks reachable from diverging function calls should be
+  // divergent.
+  // EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(12));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(12));  // Wrong!
+}
+
+TEST_F(DivergenceTest, LateMergeTest) {
+  // pseudocode:
+  // %10:
+  // %11 = load y
+  // %12 = %11 < 0
+  // [merge: %15]
+  // if (%12) {
+  //   %13:
+  // }
+  // %14: // Reconvergence hasn't happened by here.
+  // %15:
+  // return
+  ASSERT_NO_FATAL_FAILURE(Build(Preamble() + R"(
+         %10 = OpLabel
+         %11 = OpLoad %float %x
+         %12 = OpFOrdLessThan %bool %11 %zero
+               OpSelectionMerge %15 None
+               OpBranchConditional %12 %13 %14
+         %13 = OpLabel
+               OpBranch %14
+         %14 = OpLabel
+               OpBranch %15
+         %15 = OpLabel
+               OpReturn
+               OpFunctionEnd
+  )"));
+
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(10));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(11));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(12));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(13));
+  // TODO(dongja):
+  // EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(14));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(14));  // Wrong!
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(15));
+}
+
+// The following series of tests makes sure that we find the least fixpoint.
+TEST_F(DivergenceTest, UniformFixpointTest) {
+  // pseudocode:
+  //   %10:
+  //   %20 = load x
+  //   %21 = load y
+  //   do {
+  //     %11:
+  //     %12:
+  //     %13 = phi(%zero from %11, %14 from %16)
+  //     %14 = %13 + 1
+  //     %15 = %13 < 1
+  //   } %16: while (%15)
+  //   %17:
+  ASSERT_NO_FATAL_FAILURE(Build(Preamble() + R"(
+         %10 = OpLabel
+         %20 = OpLoad %float %x
+         %21 = OpLoad %float %y
+               OpBranch %11
+         %11 = OpLabel
+         %13 = OpPhi %float %zero %10 %14 %16
+               OpLoopMerge %17 %16 None
+               OpBranch %12
+         %12 = OpLabel
+         %14 = OpFAdd %float %13 %one
+         %15 = OpFOrdLessThan %bool %13 %one
+               OpBranch %16
+         %16 = OpLabel
+               OpBranchConditional %15 %11 %17
+         %17 = OpLabel
+               OpReturn
+               OpFunctionEnd
+  )"));
+
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(10));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(11));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(12));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(13));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(14));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(15));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(16));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(17));
+}
+
+TEST_F(DivergenceTest, PartiallyUniformFixpointTest) {
+  // pseudocode:
+  //   %10:
+  //   %20 = load x
+  //   %21 = load y
+  //   do {
+  //     %11:
+  //     %12:
+  //     %13 = phi(%zero from %11, %14 from %16)
+  //     %14 = %13 + 1
+  //     %15 = %13 < %21
+  //   } %16: while (%15)
+  //   %17:
+  ASSERT_NO_FATAL_FAILURE(Build(Preamble() + R"(
+         %10 = OpLabel
+         %20 = OpLoad %float %x
+         %21 = OpLoad %float %y
+               OpBranch %11
+         %11 = OpLabel
+         %13 = OpPhi %float %zero %10 %14 %16
+               OpLoopMerge %17 %16 None
+               OpBranch %12
+         %12 = OpLabel
+         %14 = OpFAdd %float %13 %one
+         %15 = OpFOrdLessThan %bool %13 %21
+               OpBranch %16
+         %16 = OpLabel
+               OpBranchConditional %15 %11 %17
+         %17 = OpLabel
+               OpReturn
+               OpFunctionEnd
+  )"));
+
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(10));
+  EXPECT_EQ(Level::kPartiallyUniform, divergence_->GetDivergenceLevel(11));
+  EXPECT_EQ(Level::kPartiallyUniform, divergence_->GetDivergenceLevel(12));
+  EXPECT_EQ(Level::kPartiallyUniform, divergence_->GetDivergenceLevel(13));
+  EXPECT_EQ(Level::kPartiallyUniform, divergence_->GetDivergenceLevel(14));
+  EXPECT_EQ(Level::kPartiallyUniform, divergence_->GetDivergenceLevel(15));
+  EXPECT_EQ(Level::kPartiallyUniform, divergence_->GetDivergenceLevel(16));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(17));
+}
+
+TEST_F(DivergenceTest, DivergentFixpointTest) {
+  // pseudocode:
+  //   %10:
+  //   %20 = load x
+  //   %21 = load y
+  //   do {
+  //     %11:
+  //     %12:
+  //     %13 = phi(%zero from %11, %14 from %16)
+  //     %14 = %13 + 1
+  //     %15 = %13 < %20
+  //   } %16: while (%15)
+  //   %17:
+  ASSERT_NO_FATAL_FAILURE(Build(Preamble() + R"(
+         %10 = OpLabel
+         %20 = OpLoad %float %x
+         %21 = OpLoad %float %y
+               OpBranch %11
+         %11 = OpLabel
+         %13 = OpPhi %float %zero %10 %14 %16
+               OpLoopMerge %17 %16 None
+               OpBranch %12
+         %12 = OpLabel
+         %14 = OpFAdd %float %13 %one
+         %15 = OpFOrdLessThan %bool %13 %20
+               OpBranch %16
+         %16 = OpLabel
+               OpBranchConditional %15 %11 %17
+         %17 = OpLabel
+               OpReturn
+               OpFunctionEnd
+  )"));
+
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(10));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(11));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(12));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(13));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(14));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(15));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(16));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(17));
+}
+
+TEST_F(DivergenceTest, DivergentOverridesPartiallyUniformTest) {
+  // pseudocode:
+  //   %10:
+  //   %20 = load x
+  //   %21 = load y
+  //   %11:
+  //   do {
+  //     %12:
+  //     %13 = phi(%21 from %11, %14 from %16)
+  //     %14 = %13 + 1
+  //     %15 = %13 < %20
+  //   } %16: while (%15)
+  //   %17:
+  ASSERT_NO_FATAL_FAILURE(Build(Preamble() + R"(
+         %10 = OpLabel
+         %20 = OpLoad %float %x
+         %21 = OpLoad %float %y
+               OpBranch %11
+         %11 = OpLabel
+         %13 = OpPhi %float %zero %10 %14 %16
+               OpLoopMerge %17 %16 None
+               OpBranch %12
+         %12 = OpLabel
+         %14 = OpFAdd %float %13 %one
+         %15 = OpFOrdLessThan %bool %13 %20
+               OpBranch %16
+         %16 = OpLabel
+               OpBranchConditional %15 %11 %17
+         %17 = OpLabel
+               OpReturn
+               OpFunctionEnd
+  )"));
+
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(10));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(11));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(12));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(13));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(14));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(15));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(16));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(17));
+}
+
+TEST_F(DivergenceTest, NestedFixpointTest) {
+  // pseudocode:
+  //   %10:
+  //   %20 = load x
+  //   %21 = load y
+  //   do {
+  //     %22:
+  //     %23:
+  //     %24 = phi(%zero from %22, %25 from %26)
+  //     %11:
+  //     do {
+  //       %12:
+  //       %13 = phi(%zero from %11, %14 from %16)
+  //       %14 = %13 + 1
+  //       %15 = %13 < %24
+  //     } %16: while (%15)
+  //     %17:
+  //     %25 = load x
+  //   } %26: while (false)
+  //   %27:
+  //   return
+  ASSERT_NO_FATAL_FAILURE(Build(Preamble() + R"(
+         %10 = OpLabel
+         %20 = OpLoad %float %x
+         %21 = OpLoad %float %y
+               OpBranch %22
+         %22 = OpLabel
+         %24 = OpPhi %float %zero %10 %25 %26
+               OpLoopMerge %27 %26 None
+               OpBranch %23
+         %23 = OpLabel
+               OpBranch %11
+         %11 = OpLabel
+         %13 = OpPhi %float %zero %23 %14 %16
+               OpLoopMerge %17 %16 None
+               OpBranch %12
+         %12 = OpLabel
+         %14 = OpFAdd %float %13 %one
+         %15 = OpFOrdLessThan %bool %13 %24
+               OpBranch %16
+         %16 = OpLabel
+               OpBranchConditional %15 %11 %17
+         %17 = OpLabel
+         %25 = OpLoad %float %x
+               OpBranch %26
+         %26 = OpLabel
+               OpBranchConditional %false %22 %27
+         %27 = OpLabel
+               OpReturn
+               OpFunctionEnd
+  )"));
+  // This test makes sure that divergent values flowing upward can influence the
+  // fixpoint of a loop.
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(10));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(11));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(12));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(13));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(14));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(15));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(16));
+  // Control of the outer loop is still uniform.
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(17));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(22));
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(23));
+  // Seed divergent values.
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(24));
+  EXPECT_EQ(Level::kDivergent, divergence_->GetDivergenceLevel(25));
+  // Outer loop control.
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(26));
+  // Merged.
+  EXPECT_EQ(Level::kUniform, divergence_->GetDivergenceLevel(27));
+}
+
+}  // namespace
+}  // namespace lint
+}  // namespace spvtools
diff --git a/test/lint/placeholder_test.cpp b/test/lint/placeholder_test.cpp
deleted file mode 100644
index a2ebec2..0000000
--- a/test/lint/placeholder_test.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (c) 2021 Google LLC.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "gtest/gtest.h"
-
-namespace spvtools {
-namespace lint {
-namespace {
-
-TEST(PlaceholderTest, PlaceholderTest) { ASSERT_TRUE(true); }
-
-}  // namespace
-}  // namespace lint
-}  // namespace spvtools