Revert "[flang][cuda] Avoid false positive on multi device symbol with components" (#192393)

Reverts llvm/llvm-project#192177

this breaks some downstream testing
diff --git a/flang/include/flang/Evaluate/tools.h b/flang/include/flang/Evaluate/tools.h
index a1c66e8..cab6ddb 100644
--- a/flang/include/flang/Evaluate/tools.h
+++ b/flang/include/flang/Evaluate/tools.h
@@ -1299,13 +1299,6 @@
     const std::optional<ActualArgument> &, const std::string &procName,
     const std::string &argName);
 
-// Get the symbol vectors of the expression where symbols are grouped together
-// if they are part of the same component expression.
-//
-// Example: a%b + c%d
-// Will be grouped as: [(a, b), (c, d)]
-std::vector<SymbolVector> GetSymbolVectors(const Expr<SomeType> &expr);
-
 bool IsCUDADeviceSymbol(const Symbol &sym);
 bool IsCUDADeviceOnlySymbol(const Symbol &sym);
 
@@ -1355,10 +1348,6 @@
   return symbols.size();
 }
 
-// Get the number of unique symbols with CUDA device attribute.
-int GetNbOfUniqueCUDADeviceSymbols(
-    const std::vector<SymbolVector> &symbolVectors);
-
 // Get the number of distinct symbols with CUDA managed or unified
 // attribute in the expression.
 template <typename A>
diff --git a/flang/lib/Evaluate/tools.cpp b/flang/lib/Evaluate/tools.cpp
index c0bd897..8aff09a 100644
--- a/flang/lib/Evaluate/tools.cpp
+++ b/flang/lib/Evaluate/tools.cpp
@@ -1130,40 +1130,6 @@
 template semantics::UnorderedSymbolSet CollectCudaSymbols(
     const Expr<SubscriptInteger> &);
 
-std::vector<SymbolVector> GetSymbolVectors(const Expr<SomeType> &expr) {
-  SymbolVector symbols{GetSymbolVector(expr)};
-  std::reverse(symbols.begin(), symbols.end());
-
-  std::vector<SymbolVector> symbolVectors;
-
-  SymbolVector crtSymbols;
-  for (const Symbol &sym : symbols) {
-    bool isComponent{sym.owner().IsDerivedType()};
-    if (isComponent) {
-      crtSymbols.push_back(sym);
-    } else {
-      crtSymbols.push_back(sym);
-      symbolVectors.push_back(crtSymbols);
-      crtSymbols.clear();
-    }
-  }
-  return symbolVectors;
-}
-
-int GetNbOfUniqueCUDADeviceSymbols(
-    const std::vector<SymbolVector> &symbolVectors) {
-  semantics::UnorderedSymbolSet symbols;
-  for (const auto &symbolVector : symbolVectors) {
-    for (const auto &sym : symbolVector) {
-      if (IsCUDADeviceSymbol(*sym)) {
-        symbols.insert(sym);
-        break;
-      }
-    }
-  }
-  return symbols.size();
-}
-
 bool HasCUDAImplicitTransfer(const Expr<SomeType> &expr) {
   semantics::UnorderedSymbolSet hostSymbols;
   semantics::UnorderedSymbolSet deviceSymbols;
diff --git a/flang/lib/Semantics/check-cuda.cpp b/flang/lib/Semantics/check-cuda.cpp
index 1ec233a..62ece9e 100644
--- a/flang/lib/Semantics/check-cuda.cpp
+++ b/flang/lib/Semantics/check-cuda.cpp
@@ -795,8 +795,7 @@
   }
 
   int nbLhs{evaluate::GetNbOfCUDADeviceSymbols(assign->lhs)};
-  int nbRhs{evaluate::GetNbOfUniqueCUDADeviceSymbols(
-      evaluate::GetSymbolVectors(assign->rhs))};
+  int nbRhs{evaluate::GetNbOfCUDADeviceSymbols(assign->rhs)};
   int nbRhsManaged{evaluate::GetNbOfCUDAManagedOrUnifiedSymbols(assign->rhs)};
 
   // device to host transfer with more than one device object on the rhs is not
diff --git a/flang/test/Lower/CUDA/cuda-data-transfer.cuf b/flang/test/Lower/CUDA/cuda-data-transfer.cuf
index e510ce2..1d0e510 100644
--- a/flang/test/Lower/CUDA/cuda-data-transfer.cuf
+++ b/flang/test/Lower/CUDA/cuda-data-transfer.cuf
@@ -673,29 +673,3 @@
 
 ! CHECK-LABEL: func.func @_QPsub37()
 ! CHECK: cuf.data_transfer
-
-subroutine sub38()
-  type :: bar
-    integer, device, allocatable :: m(:)
-  end type
-  type(bar), unified :: a
-  type(bar), managed :: ma
-  integer :: lm(5)
-
-
-  lm(1:5) = a%m(1:5)
-  lm(1:5) = ma%m(1:5)
-end subroutine
-
-subroutine sub39()
-  type :: foo
-    integer, device, allocatable :: m(:)
-  end type
-  type :: bar
-    type(foo) :: f
-  end type
-  type(bar) :: a
-  integer :: lm(5)
-
-  lm(1:5) = a%f%m(1:5)
-end subroutine