Add test for importing AHardwareBuffers in and out of Vulkan.

The general test creates an AHardwareBuffer. Imports into a backend to file with data.
And then imports it into another backend to read the HWB and draws it to a surface.
We then readback the pixels to make sure they match what we started with.

This version of the test tests all combinations of srcBackends (CPU, EGL, Vulkan) with the
dst backends (EGL, Vulkan).

Also cherry-picks in changes:
Add ctor to GrVkAlloc (https://skia-review.googlesource.com/111660)
Add testing only helper to flush and sync gpu (https://skia-review.googlesource.com/113122)

Bug: skia:
Change-Id: I732f371af7f48f5a590fd161021a69cbc6f2583c
Reviewed-on: https://skia-review.googlesource.com/117080
Reviewed-by: Jim Van Verth <jvanverth@google.com>
diff --git a/BUILD.gn b/BUILD.gn
index 75db39a..3d52019 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -1794,9 +1794,8 @@
   if (!is_win && skia_enable_gpu) {
     test_lib("skqp_lib") {
       public_include_dirs = [ "tools/skqp" ]
-      defines = [
-        "SK_SKQP_GLOBAL_ERROR_TOLERANCE=$skia_skqp_global_error_tolerance"
-      ]
+      defines =
+          [ "SK_SKQP_GLOBAL_ERROR_TOLERANCE=$skia_skqp_global_error_tolerance" ]
       if (skia_skqp_enable_driver_correctness_workarounds) {
         defines += [ "SK_SKQP_ENABLE_DRIVER_CORRECTNESS_WORKAROUNDS" ]
       }
diff --git a/gn/tests.gni b/gn/tests.gni
index b218c5c..66b6637 100644
--- a/gn/tests.gni
+++ b/gn/tests.gni
@@ -274,6 +274,7 @@
   "$_tests/UtilsTest.cpp",
   "$_tests/VerticesTest.cpp",
   "$_tests/VkClearTests.cpp",
+  "$_tests/VkHardwareBufferTest.cpp",
   "$_tests/VkHeapTests.cpp",
   "$_tests/VkMakeCopyPipelineTest.cpp",
   "$_tests/VkUploadPixelsTests.cpp",
diff --git a/include/gpu/vk/GrVkTypes.h b/include/gpu/vk/GrVkTypes.h
index 5e93733..e0370cd 100644
--- a/include/gpu/vk/GrVkTypes.h
+++ b/include/gpu/vk/GrVkTypes.h
@@ -31,19 +31,32 @@
  * Vulkan textures are really const GrVkImageInfo*
  */
 struct GrVkAlloc {
-    VkDeviceMemory fMemory = VK_NULL_HANDLE;  // can be VK_NULL_HANDLE iff is an RT and is borrowed
-    VkDeviceSize   fOffset = 0;
-    VkDeviceSize   fSize = 0;    // this can be indeterminate iff Tex uses borrow semantics
-    uint32_t       fFlags= 0;
+    GrVkAlloc()
+            : fMemory(VK_NULL_HANDLE)
+            , fOffset(0)
+            , fSize(0)
+            , fFlags(0)
+            , fUsesSystemHeap(false) {}
+
+    GrVkAlloc(VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, uint32_t flags)
+            : fMemory(memory)
+            , fOffset(offset)
+            , fSize(size)
+            , fFlags(flags)
+            , fUsesSystemHeap(false) {}
+
+    VkDeviceMemory fMemory;  // can be VK_NULL_HANDLE iff is an RT and is borrowed
+    VkDeviceSize   fOffset;
+    VkDeviceSize   fSize;    // this can be indeterminate iff Tex uses borrow semantics
+    uint32_t       fFlags;
 
     enum Flag {
         kNoncoherent_Flag = 0x1,   // memory must be flushed to device after mapping
     };
 private:
     friend class GrVkHeap; // For access to usesSystemHeap
-    bool fUsesSystemHeap = false;
+    bool fUsesSystemHeap;
 };
-
 struct GrVkImageInfo {
     /**
      * If the image's format is sRGB (GrVkFormatIsSRGB returns true), then the image must have
@@ -55,6 +68,8 @@
     VkImageLayout  fImageLayout;
     VkFormat       fFormat;
     uint32_t       fLevelCount;
+    uint32_t       fInitialQueueFamily = VK_QUEUE_FAMILY_IGNORED;
+    uint32_t       fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
 
     // This gives a way for a client to update the layout of the Image if they change the layout
     // while we're still holding onto the wrapped texture. They will first need to get a handle
diff --git a/platform_tools/android/apps/skqp/src/main/assets/skqp/KnownGpuUnitTests.txt b/platform_tools/android/apps/skqp/src/main/assets/skqp/KnownGpuUnitTests.txt
index 064fdc0..c48f6cc 100644
--- a/platform_tools/android/apps/skqp/src/main/assets/skqp/KnownGpuUnitTests.txt
+++ b/platform_tools/android/apps/skqp/src/main/assets/skqp/KnownGpuUnitTests.txt
@@ -137,6 +137,12 @@
 VkMakeCopyPipelineTest
 VkUploadPixelsTests
 VkWrapTests
+VulkanHardwareBuffer_CPU_Vulkan
+VulkanHardwareBuffer_EGL_Vulkan
+VulkanHardwareBuffer_Vulkan_Vulkan
+VulkanHardwareBuffer_CPU_EGL
+VulkanHardwareBuffer_EGL_EGL
+VulkanHardwareBuffer_Vulkan_EGL
 WrappedProxyConversionTest
 WrappedProxyTest
 WritePixelsNonTexture_Gpu
diff --git a/src/gpu/GrAHardwareBufferImageGenerator.cpp b/src/gpu/GrAHardwareBufferImageGenerator.cpp
index 085d27a..b95e394 100644
--- a/src/gpu/GrAHardwareBufferImageGenerator.cpp
+++ b/src/gpu/GrAHardwareBufferImageGenerator.cpp
@@ -162,7 +162,7 @@
 
     while (GL_NO_ERROR != glGetError()) {} //clear GL errors
 
-    EGLClientBuffer  clientBuffer = eglGetNativeClientBufferANDROID(fGraphicBuffer);
+    EGLClientBuffer clientBuffer = eglGetNativeClientBufferANDROID(fGraphicBuffer);
     EGLint attribs[] = { EGL_IMAGE_PRESERVED_KHR, EGL_TRUE,
                          EGL_NONE };
     EGLDisplay display = eglGetCurrentDisplay();
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index 30e565f..f0c8c18 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -470,6 +470,12 @@
     virtual void deleteTestingOnlyBackendTexture(GrBackendTexture*,
                                                  bool abandonTexture = false) = 0;
 
+    /**
+     * Flushes all work to the gpu and forces the GPU to wait until all the gpu work has completed.
+     * This is for testing purposes only.
+     */
+    virtual void testingOnly_flushGpuAndSync() = 0;
+
     // width and height may be larger than rt (if underlying API allows it).
     // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on
     // the GrStencilAttachment.
diff --git a/src/gpu/ddl/GrDDLGpu.h b/src/gpu/ddl/GrDDLGpu.h
index 0508a2e..25f647d 100644
--- a/src/gpu/ddl/GrDDLGpu.h
+++ b/src/gpu/ddl/GrDDLGpu.h
@@ -170,6 +170,8 @@
     bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
     void deleteTestingOnlyBackendTexture(GrBackendTexture*, bool abandon = false) override;
 
+    void testingOnly_flushGpuAndSync() override {}
+
     typedef GrGpu INHERITED;
 };
 
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index fb0622b..933251e 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -4498,6 +4498,10 @@
     fProgramCache->abandon();
 }
 
+void GrGLGpu::testingOnly_flushGpuAndSync() {
+    GL_CALL(Finish());
+}
+
 ///////////////////////////////////////////////////////////////////////////////
 
 GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLGpu* gpu,
diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h
index 0d55e50..285b9d3 100644
--- a/src/gpu/gl/GrGLGpu.h
+++ b/src/gpu/gl/GrGLGpu.h
@@ -168,6 +168,8 @@
 
     void resetShaderCacheForTesting() const override;
 
+    void testingOnly_flushGpuAndSync() override;
+
     GrFence SK_WARN_UNUSED_RESULT insertFence() override;
     bool waitFence(GrFence, uint64_t timeout) override;
     void deleteFence(GrFence) const override;
diff --git a/src/gpu/mock/GrMockGpu.h b/src/gpu/mock/GrMockGpu.h
index b99857f..ff41c27 100644
--- a/src/gpu/mock/GrMockGpu.h
+++ b/src/gpu/mock/GrMockGpu.h
@@ -137,6 +137,8 @@
     bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
     void deleteTestingOnlyBackendTexture(GrBackendTexture*, bool abandonTexture = false) override;
 
+    void testingOnly_flushGpuAndSync() override {}
+
     static int NextInternalTextureID();
     static int NextExternalTextureID();
 
diff --git a/src/gpu/mtl/GrMtlGpu.h b/src/gpu/mtl/GrMtlGpu.h
index 9199f03..8ae71b9 100644
--- a/src/gpu/mtl/GrMtlGpu.h
+++ b/src/gpu/mtl/GrMtlGpu.h
@@ -151,6 +151,8 @@
     bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override { return false; }
     void deleteTestingOnlyBackendTexture(GrBackendTexture*, bool abandon = false) override {}
 
+    void testingOnly_flushGpuAndSync() override {}
+
     sk_sp<GrMtlCaps> fMtlCaps;
 
     id<MTLDevice> fDevice;
diff --git a/src/gpu/vk/GrVkExtensions.cpp b/src/gpu/vk/GrVkExtensions.cpp
index 17737f1..31cc986 100644
--- a/src/gpu/vk/GrVkExtensions.cpp
+++ b/src/gpu/vk/GrVkExtensions.cpp
@@ -89,9 +89,7 @@
         return false;
     }
     for (uint32_t i = 0; i < extensionCount; ++i) {
-        if (nonPatchVersion >= remove_patch_version(extensions[i].specVersion)) {
-            fInstanceExtensionStrings->push_back() = extensions[i].extensionName;
-        }
+        fInstanceExtensionStrings->push_back() = extensions[i].extensionName;
     }
     delete [] extensions;
     // sort so we can search
@@ -183,9 +181,7 @@
         return false;
     }
     for (uint32_t i = 0; i < extensionCount; ++i) {
-        if (nonPatchVersion >= remove_patch_version(extensions[i].specVersion)) {
-            fDeviceExtensionStrings->push_back() = extensions[i].extensionName;
-        }
+        fDeviceExtensionStrings->push_back() = extensions[i].extensionName;
     }
     delete[] extensions;
     if (!fDeviceExtensionStrings->empty()) {
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 1269eaa..2433039 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -1575,6 +1575,10 @@
     }
 }
 
+void GrVkGpu::testingOnly_flushGpuAndSync() {
+    this->submitCommandBuffer(kForce_SyncQueue);
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 
 void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h
index 85fb213..53701bf 100644
--- a/src/gpu/vk/GrVkGpu.h
+++ b/src/gpu/vk/GrVkGpu.h
@@ -50,6 +50,7 @@
 
     VkDevice device() const { return fDevice; }
     VkQueue  queue() const { return fQueue; }
+    uint32_t  queueIndex() const { return fBackendContext->fGraphicsQueueIndex; }
     VkCommandPool cmdPool() const { return fCmdPool; }
     VkPhysicalDeviceProperties physicalDeviceProperties() const {
         return fPhysDevProps;
@@ -89,6 +90,8 @@
     bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
     void deleteTestingOnlyBackendTexture(GrBackendTexture*, bool abandonTexture = false) override;
 
+    void testingOnly_flushGpuAndSync() override;
+
     GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*,
                                                                 int width,
                                                                 int height) override;
diff --git a/src/gpu/vk/GrVkImage.cpp b/src/gpu/vk/GrVkImage.cpp
index 5e0ffe2..71e3682 100644
--- a/src/gpu/vk/GrVkImage.cpp
+++ b/src/gpu/vk/GrVkImage.cpp
@@ -28,14 +28,21 @@
 void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
                                VkAccessFlags dstAccessMask,
                                VkPipelineStageFlags dstStageMask,
-                               bool byRegion) {
+                               bool byRegion,
+                               bool releaseFamilyQueue) {
     SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
              VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout);
     VkImageLayout currentLayout = this->currentLayout();
 
+    if (releaseFamilyQueue && fInfo.fCurrentQueueFamily == fInfo.fInitialQueueFamily) {
+        // We never transfered the image to this queue and we are releasing it so don't do anything.
+        return;
+    }
+
     // If the old and new layout are the same and the layout is a read only layout, there is no need
     // to put in a barrier.
     if (newLayout == currentLayout &&
+        !releaseFamilyQueue &&
         (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
          VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
          VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
@@ -46,6 +53,19 @@
     VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(currentLayout);
 
     VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
+
+    uint32_t srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+    uint32_t dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+    if (VK_QUEUE_FAMILY_IGNORED != fInfo.fCurrentQueueFamily) {
+        srcQueueFamilyIndex = fInfo.fInitialQueueFamily;
+        dstQueueFamilyIndex = gpu->queueIndex();
+        fInfo.fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
+    } else if (releaseFamilyQueue) {
+        srcQueueFamilyIndex = gpu->queueIndex();
+        dstQueueFamilyIndex = fInfo.fInitialQueueFamily;
+        fInfo.fCurrentQueueFamily = fInfo.fInitialQueueFamily;
+    }
+
     VkImageMemoryBarrier imageMemoryBarrier = {
         VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,          // sType
         nullptr,                                         // pNext
@@ -53,8 +73,8 @@
         dstAccessMask,                                   // inputMask
         currentLayout,                                   // oldLayout
         newLayout,                                       // newLayout
-        VK_QUEUE_FAMILY_IGNORED,                         // srcQueueFamilyIndex
-        VK_QUEUE_FAMILY_IGNORED,                         // dstQueueFamilyIndex
+        srcQueueFamilyIndex,                             // srcQueueFamilyIndex
+        dstQueueFamilyIndex,                             // dstQueueFamilyIndex
         fInfo.fImage,                                    // image
         { aspectFlags, 0, fInfo.fLevelCount, 0, 1 }      // subresourceRange
     };
@@ -139,6 +159,9 @@
 }
 
 void GrVkImage::releaseImage(const GrVkGpu* gpu) {
+    if (VK_QUEUE_FAMILY_IGNORED != fInfo.fInitialQueueFamily) {
+        this->setImageLayout(gpu, fInfo.fImageLayout, 0, 0, false, true);
+    }
     if (fResource) {
         fResource->unref(gpu);
         fResource = nullptr;
diff --git a/src/gpu/vk/GrVkImage.h b/src/gpu/vk/GrVkImage.h
index 587c3a8..9adbc2b 100644
--- a/src/gpu/vk/GrVkImage.h
+++ b/src/gpu/vk/GrVkImage.h
@@ -50,7 +50,8 @@
                         VkImageLayout newLayout,
                         VkAccessFlags dstAccessMask,
                         VkPipelineStageFlags dstStageMask,
-                        bool byRegion);
+                        bool byRegion,
+                        bool releaseFamilyQueue = false);
 
     struct ImageDesc {
         VkImageType         fImageType;
diff --git a/src/sksl/SkSLParser.cpp b/src/sksl/SkSLParser.cpp
index 1b6690d..986d8c0 100644
--- a/src/sksl/SkSLParser.cpp
+++ b/src/sksl/SkSLParser.cpp
@@ -1449,6 +1449,7 @@
                 result = std::unique_ptr<ASTExpression>(new ASTBinaryExpression(std::move(result),
                                                                                 std::move(t),
                                                                                 std::move(right)));
+                return result;
             }
             default:
                 return result;
diff --git a/tests/GrMipMappedTest.cpp b/tests/GrMipMappedTest.cpp
index 0436bd4..d02095b 100644
--- a/tests/GrMipMappedTest.cpp
+++ b/tests/GrMipMappedTest.cpp
@@ -201,14 +201,9 @@
             }
 
             // Must make sure the uses of the backend texture have finished (we possibly have a
-            // queued up copy) before we delete the backend texture. Thus we use readPixels here
-            // just to force the synchronization.
-            sk_sp<GrSurfaceContext> surfContext =
-                    context->contextPriv().makeWrappedSurfaceContext(genProxy);
-
-            SkBitmap bitmap;
-            bitmap.allocPixels(imageInfo);
-            surfContext->readPixels(imageInfo, bitmap.getPixels(), 0, 0, 0, 0);
+            // queued up copy) before we delete the backend texture.
+            context->flush();
+            gpu->testingOnly_flushGpuAndSync();
 
             gpu->deleteTestingOnlyBackendTexture(&backendTex);
         }
diff --git a/tests/ImageTest.cpp b/tests/ImageTest.cpp
index a1472d4..689fc38 100644
--- a/tests/ImageTest.cpp
+++ b/tests/ImageTest.cpp
@@ -859,13 +859,9 @@
             otherTestContext->makeCurrent();
             canvas->flush();
 
-            // This readPixels call is needed for Vulkan to make sure the ReleaseProc is called.
-            // Even though we flushed above, this does not guarantee the command buffer will finish
-            // which is when we call the ReleaseProc. The readPixels forces a CPU sync so we know
-            // that the command buffer has finished and we've called the ReleaseProc.
-            SkBitmap bitmap;
-            bitmap.allocPixels(info);
-            canvas->readPixels(bitmap, 0, 0);
+            // This is specifically here for vulkan to guarantee the command buffer will finish
+            // which is when we call the ReleaseProc.
+            otherCtx->contextPriv().getGpu()->testingOnly_flushGpuAndSync();
         }
 
         // Case #6: Verify that only one context can be using the image at a time
diff --git a/tests/VkHardwareBufferTest.cpp b/tests/VkHardwareBufferTest.cpp
new file mode 100644
index 0000000..86f673e
--- /dev/null
+++ b/tests/VkHardwareBufferTest.cpp
@@ -0,0 +1,1245 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// This is a GPU-backend specific test. It relies on static intializers to work
+
+#include "SkTypes.h"
+
+#ifdef SKQP_BUILD_HARDWAREBUFFER_TEST
+#if SK_SUPPORT_GPU && defined(SK_VULKAN)
+
+#include "GrContext.h"
+#include "GrContextFactory.h"
+#include "GrContextPriv.h"
+#include "GrGpu.h"
+#include "GrProxyProvider.h"
+#include "GrTest.h"
+#include "SkAutoMalloc.h"
+#include "SkCanvas.h"
+#include "SkGr.h"
+#include "SkImage.h"
+#include "SkSurface.h"
+#include "Test.h"
+#include "../tools/gpu/vk/VkTestUtils.h"
+#include "gl/GrGLDefines.h"
+#include "gl/GrGLUtil.h"
+#include "vk/GrVkBackendContext.h"
+#include "vk/GrVkExtensions.h"
+
+#include <android/hardware_buffer.h>
+#include <cinttypes>
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES/gl.h>
+#include <GLES/glext.h>
+
+static const int DEV_W = 16, DEV_H = 16;
+
+class BaseTestHelper {
+public:
+    virtual ~BaseTestHelper() {}
+
+    virtual void cleanup() = 0;
+    virtual void releaseImage() = 0;
+
+    virtual sk_sp<SkImage> importHardwareBufferForRead(skiatest::Reporter* reporter,
+                                                       AHardwareBuffer* buffer) = 0;
+    virtual sk_sp<SkSurface> importHardwareBufferForWrite(skiatest::Reporter* reporter,
+                                                          AHardwareBuffer* buffer) = 0;
+
+    virtual void doClientSync() = 0;
+
+    virtual GrContext* grContext() = 0;
+
+protected:
+    BaseTestHelper() {}
+};
+
+class EGLTestHelper : public BaseTestHelper {
+public:
+    EGLTestHelper(const GrContextOptions& options) : fFactory(options) {}
+
+    ~EGLTestHelper() override {}
+
+    void releaseImage() override {
+        if (!fGLCtx) {
+            return;
+        }
+        if (EGL_NO_IMAGE_KHR != fImage) {
+            fGLCtx->destroyEGLImage(fImage);
+            fImage = EGL_NO_IMAGE_KHR;
+        }
+        if (fTexID) {
+            GR_GL_CALL(fGLCtx->gl(), DeleteTextures(1, &fTexID));
+            fTexID = 0;
+        }
+    }
+
+    void cleanup() override {
+        this->releaseImage();
+    }
+
+    bool init(skiatest::Reporter* reporter);
+
+    sk_sp<SkImage> importHardwareBufferForRead(skiatest::Reporter* reporter,
+                                               AHardwareBuffer* buffer) override;
+    sk_sp<SkSurface> importHardwareBufferForWrite(skiatest::Reporter* reporter,
+                                                  AHardwareBuffer* buffer) override;
+
+    void doClientSync() override;
+
+    GrContext* grContext() override { return fGrContext; }
+
+private:
+    bool importHardwareBuffer(skiatest::Reporter* reporter, AHardwareBuffer* buffer);
+
+    typedef EGLClientBuffer (*EGLGetNativeClientBufferANDROIDProc)(const struct AHardwareBuffer*);
+    typedef EGLImageKHR (*EGLCreateImageKHRProc)(EGLDisplay, EGLContext, EGLenum, EGLClientBuffer,
+                                                 const EGLint*);
+    typedef void (*EGLImageTargetTexture2DOESProc)(EGLenum, void*);
+    EGLGetNativeClientBufferANDROIDProc fEGLGetNativeClientBufferANDROID;
+    EGLCreateImageKHRProc fEGLCreateImageKHR;
+    EGLImageTargetTexture2DOESProc fEGLImageTargetTexture2DOES;
+
+    EGLImageKHR fImage = EGL_NO_IMAGE_KHR;
+    GrGLuint fTexID = 0;
+
+    sk_gpu_test::GrContextFactory fFactory;
+    sk_gpu_test::ContextInfo fGLESContextInfo;
+
+    sk_gpu_test::GLTestContext* fGLCtx = nullptr;
+    GrContext* fGrContext = nullptr;
+};
+
+bool EGLTestHelper::init(skiatest::Reporter* reporter) {
+    fGLESContextInfo = fFactory.getContextInfo(sk_gpu_test::GrContextFactory::kGLES_ContextType);
+    fGrContext = fGLESContextInfo.grContext();
+    fGLCtx = fGLESContextInfo.glContext();
+    if (!fGrContext || !fGLCtx) {
+        return false;
+    }
+
+    if (kGLES_GrGLStandard != fGLCtx->gl()->fStandard) {
+        return false;
+    }
+
+    // Confirm we have egl and the needed extensions
+    if (!fGLCtx->gl()->hasExtension("EGL_KHR_image") ||
+        !fGLCtx->gl()->hasExtension("EGL_ANDROID_get_native_client_buffer") ||
+        !fGLCtx->gl()->hasExtension("GL_OES_EGL_image_external") ||
+        !fGLCtx->gl()->hasExtension("GL_OES_EGL_image") ||
+        !fGLCtx->gl()->hasExtension("EGL_KHR_fence_sync")) {
+        return false;
+    }
+
+    fEGLGetNativeClientBufferANDROID =
+        (EGLGetNativeClientBufferANDROIDProc) eglGetProcAddress("eglGetNativeClientBufferANDROID");
+    if (!fEGLGetNativeClientBufferANDROID) {
+        ERRORF(reporter, "Failed to get the eglGetNativeClientBufferAndroid proc");
+        return false;
+    }
+
+    fEGLCreateImageKHR = (EGLCreateImageKHRProc) eglGetProcAddress("eglCreateImageKHR");
+    if (!fEGLCreateImageKHR) {
+        ERRORF(reporter, "Failed to get the proc eglCreateImageKHR");
+        return false;
+    }
+
+    fEGLImageTargetTexture2DOES =
+            (EGLImageTargetTexture2DOESProc) eglGetProcAddress("glEGLImageTargetTexture2DOES");
+    if (!fEGLImageTargetTexture2DOES) {
+        ERRORF(reporter, "Failed to get the proc EGLImageTargetTexture2DOES");
+        return false;
+    }
+    return true;
+}
+
+bool EGLTestHelper::importHardwareBuffer(skiatest::Reporter* reporter, AHardwareBuffer* buffer) {
+    GrGLClearErr(fGLCtx->gl());
+
+    EGLClientBuffer eglClientBuffer = fEGLGetNativeClientBufferANDROID(buffer);
+    EGLint eglAttribs[] = { EGL_IMAGE_PRESERVED_KHR, EGL_TRUE,
+                            EGL_NONE };
+    EGLDisplay eglDisplay = eglGetCurrentDisplay();
+    fImage = fEGLCreateImageKHR(eglDisplay, EGL_NO_CONTEXT,
+                                EGL_NATIVE_BUFFER_ANDROID,
+                                eglClientBuffer, eglAttribs);
+    if (EGL_NO_IMAGE_KHR == fImage) {
+        SkDebugf("Could not create EGL image, err = (%#x)\n", (int) eglGetError() );
+        return false;
+    }
+
+    GR_GL_CALL(fGLCtx->gl(), GenTextures(1, &fTexID));
+    if (!fTexID) {
+        ERRORF(reporter, "Failed to create GL Texture");
+        return false;
+    }
+    GR_GL_CALL_NOERRCHECK(fGLCtx->gl(), BindTexture(GR_GL_TEXTURE_2D, fTexID));
+    if (GR_GL_GET_ERROR(fGLCtx->gl()) != GR_GL_NO_ERROR) {
+        ERRORF(reporter, "Failed to bind GL Texture");
+        return false;
+    }
+
+    fEGLImageTargetTexture2DOES(GL_TEXTURE_2D, fImage);
+    GLenum status = GL_NO_ERROR;
+    if ((status = glGetError()) != GL_NO_ERROR) {
+        ERRORF(reporter, "EGLImageTargetTexture2DOES failed (%#x)", (int) status);
+        return false;
+    }
+
+    fGrContext->resetContext(kTextureBinding_GrGLBackendState);
+    return true;
+}
+
+sk_sp<SkImage> EGLTestHelper::importHardwareBufferForRead(skiatest::Reporter* reporter,
+                                                          AHardwareBuffer* buffer) {
+    if (!this->importHardwareBuffer(reporter, buffer)) {
+        return nullptr;
+    }
+    GrGLTextureInfo textureInfo;
+    textureInfo.fTarget = GR_GL_TEXTURE_2D;
+    textureInfo.fID = fTexID;
+    textureInfo.fFormat = GR_GL_RGBA8;
+
+    GrBackendTexture backendTex(DEV_W, DEV_H, GrMipMapped::kNo, textureInfo);
+    REPORTER_ASSERT(reporter, backendTex.isValid());
+
+    sk_sp<SkImage> image = SkImage::MakeFromTexture(fGrContext,
+                                                               backendTex,
+                                                               kTopLeft_GrSurfaceOrigin,
+                                                               kRGBA_8888_SkColorType,
+                                                               kPremul_SkAlphaType,
+                                                               nullptr);
+
+    if (!image) {
+        ERRORF(reporter, "Failed to make wrapped GL SkImage");
+        return nullptr;
+    }
+
+    return image;
+}
+
+sk_sp<SkSurface> EGLTestHelper::importHardwareBufferForWrite(skiatest::Reporter* reporter,
+                                                             AHardwareBuffer* buffer) {
+    if (!this->importHardwareBuffer(reporter, buffer)) {
+        return nullptr;
+    }
+    GrGLTextureInfo textureInfo;
+    textureInfo.fTarget = GR_GL_TEXTURE_2D;
+    textureInfo.fID = fTexID;
+    textureInfo.fFormat = GR_GL_RGBA8;
+
+    GrBackendTexture backendTex(DEV_W, DEV_H, GrMipMapped::kNo, textureInfo);
+    REPORTER_ASSERT(reporter, backendTex.isValid());
+
+    sk_sp<SkSurface> surface = SkSurface::MakeFromBackendTexture(fGrContext,
+                                                                 backendTex,
+                                                                 kTopLeft_GrSurfaceOrigin,
+                                                                 1,
+                                                                 kRGBA_8888_SkColorType,
+                                                                 nullptr, nullptr);
+
+    if (!surface) {
+        ERRORF(reporter, "Failed to make wrapped GL SkSurface");
+        return nullptr;
+    }
+
+    return surface;
+}
+
+void EGLTestHelper::doClientSync() {
+    sk_gpu_test::FenceSync* fenceSync = fGLCtx->fenceSync();
+    sk_gpu_test::PlatformFence fence = fenceSync->insertFence();
+    fenceSync->waitFence(fence);
+    fenceSync->deleteFence(fence);
+}
+
+#define DECLARE_VK_PROC(name) PFN_vk##name fVk##name
+
+#define ACQUIRE_VK_PROC(name, instance, device)                                        \
+    fVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
+    if (fVk##name == nullptr) {                                                        \
+        ERRORF(reporter, "Function ptr for vk%s could not be acquired\n", #name);      \
+        return false;                                                                  \
+    }
+
+#define ACQUIRE_INST_VK_PROC(name)                                                          \
+    fVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, fInst, VK_NULL_HANDLE)); \
+    if (fVk##name == nullptr) {                                                             \
+        ERRORF(reporter, "Function ptr for vk%s could not be acquired\n", #name);           \
+        fVkDestroyInstance(fInst, nullptr);                                                 \
+        return false;                                                                       \
+    }
+
+#define ACQUIRE_DEVICE_VK_PROC(name)                                                          \
+    fVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, VK_NULL_HANDLE, fDevice)); \
+    if (fVk##name == nullptr) {                                                               \
+        ERRORF(reporter, "Function ptr for vk%s could not be acquired\n", #name);             \
+        fVkDestroyDevice(fDevice, nullptr);                                                   \
+        fVkDestroyInstance(fInst, nullptr);                                                   \
+        return false;                                                                         \
+    }
+
+#ifdef SK_ENABLE_VK_LAYERS
+const char* kMyDebugLayerNames[] = {
+    // elements of VK_LAYER_LUNARG_standard_validation
+    "VK_LAYER_GOOGLE_threading",
+    "VK_LAYER_LUNARG_parameter_validation",
+    "VK_LAYER_LUNARG_object_tracker",
+    "VK_LAYER_LUNARG_image",
+    "VK_LAYER_LUNARG_core_validation",
+    "VK_LAYER_LUNARG_swapchain",
+    "VK_LAYER_GOOGLE_unique_objects",
+    // not included in standard_validation
+    //"VK_LAYER_LUNARG_api_dump",
+    //"VK_LAYER_LUNARG_vktrace",
+    //"VK_LAYER_LUNARG_screenshot",
+};
+#endif
+
+class VulkanTestHelper : public BaseTestHelper {
+public:
+    VulkanTestHelper() {}
+
+    ~VulkanTestHelper() override {}
+
+    void releaseImage() override {
+        if (VK_NULL_HANDLE == fDevice) {
+            return;
+        }
+        if (fImage != VK_NULL_HANDLE) {
+            SkASSERT(fVkDestroyImage);
+            fVkDestroyImage(fDevice, fImage, nullptr);
+            fImage = VK_NULL_HANDLE;
+        }
+
+        if (fMemory != VK_NULL_HANDLE) {
+            SkASSERT(fVkFreeMemory);
+            fVkFreeMemory(fDevice, fMemory, nullptr);
+            fMemory = VK_NULL_HANDLE;
+        }
+    }
+    void cleanup() override {
+        this->releaseImage();
+
+        fGrContext.reset();
+        fBackendContext.reset();
+
+        fInst = VK_NULL_HANDLE;
+        fPhysDev = VK_NULL_HANDLE;
+        fDevice = VK_NULL_HANDLE;
+    }
+
+    bool init(skiatest::Reporter* reporter);
+
+    void doClientSync() override {
+        if (!fGrContext) {
+            return;
+        }
+
+        fGrContext->contextPriv().getGpu()->testingOnly_flushGpuAndSync();
+    }
+
+    bool checkOptimalHardwareBuffer(skiatest::Reporter* reporter);
+
+    sk_sp<SkImage> importHardwareBufferForRead(skiatest::Reporter* reporter,
+                                               AHardwareBuffer* buffer) override;
+
+    sk_sp<SkSurface> importHardwareBufferForWrite(skiatest::Reporter* reporter,
+                                                  AHardwareBuffer* buffer) override;
+
+    GrContext* grContext() override { return fGrContext.get(); }
+
+private:
+    bool importHardwareBuffer(skiatest::Reporter* reporter, AHardwareBuffer* buffer, bool forWrite,
+                              GrVkImageInfo* outImageInfo);
+
+    DECLARE_VK_PROC(EnumerateInstanceVersion);
+    DECLARE_VK_PROC(CreateInstance);
+    DECLARE_VK_PROC(DestroyInstance);
+    DECLARE_VK_PROC(EnumeratePhysicalDevices);
+    DECLARE_VK_PROC(GetPhysicalDeviceProperties);
+    DECLARE_VK_PROC(GetPhysicalDeviceMemoryProperties2);
+    DECLARE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties);
+    DECLARE_VK_PROC(GetPhysicalDeviceFeatures);
+    DECLARE_VK_PROC(CreateDevice);
+    DECLARE_VK_PROC(GetDeviceQueue);
+    DECLARE_VK_PROC(DeviceWaitIdle);
+    DECLARE_VK_PROC(DestroyDevice);
+    DECLARE_VK_PROC(GetPhysicalDeviceImageFormatProperties2);
+    DECLARE_VK_PROC(CreateImage);
+    DECLARE_VK_PROC(GetImageMemoryRequirements2);
+    DECLARE_VK_PROC(GetAndroidHardwareBufferPropertiesANDROID);
+    DECLARE_VK_PROC(AllocateMemory);
+    DECLARE_VK_PROC(BindImageMemory2);
+    DECLARE_VK_PROC(DestroyImage);
+    DECLARE_VK_PROC(FreeMemory);
+
+    VkInstance fInst = VK_NULL_HANDLE;
+    VkPhysicalDevice fPhysDev = VK_NULL_HANDLE;
+    VkDevice fDevice = VK_NULL_HANDLE;
+
+    VkImage fImage = VK_NULL_HANDLE;
+    VkDeviceMemory fMemory = VK_NULL_HANDLE;
+
+    sk_sp<GrVkBackendContext> fBackendContext;
+    sk_sp<GrContext> fGrContext;
+};
+
+bool VulkanTestHelper::init(skiatest::Reporter* reporter) {
+    PFN_vkGetInstanceProcAddr instProc;
+    PFN_vkGetDeviceProcAddr devProc;
+    if (!sk_gpu_test::LoadVkLibraryAndGetProcAddrFuncs(&instProc, &devProc)) {
+        return false;
+    }
+
+    auto getProc = [&instProc, &devProc](const char* proc_name,
+                                         VkInstance instance, VkDevice device) {
+        if (device != VK_NULL_HANDLE) {
+            return devProc(device, proc_name);
+        }
+        return instProc(instance, proc_name);
+    };
+
+    VkResult err;
+
+    ACQUIRE_VK_PROC(EnumerateInstanceVersion, VK_NULL_HANDLE, VK_NULL_HANDLE);
+    uint32_t instanceVersion = 0;
+    if (fVkEnumerateInstanceVersion) {
+        err = fVkEnumerateInstanceVersion(&instanceVersion);
+        if (err) {
+            ERRORF(reporter, "failed ot enumerate instance version. Err: %d\n", err);
+            return false;
+        }
+    }
+    if (instanceVersion < VK_MAKE_VERSION(1, 1, 0)) {
+        return false;
+    }
+
+    const VkApplicationInfo app_info = {
+        VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
+        nullptr,                            // pNext
+        "vkHWBTest",                        // pApplicationName
+        0,                                  // applicationVersion
+        "vkHWBTest",                        // pEngineName
+        0,                                  // engineVerison
+        instanceVersion,                    // apiVersion
+    };
+
+    GrVkExtensions extensions(getProc);
+    extensions.initInstance(instanceVersion);
+
+    SkTArray<const char*> instanceLayerNames;
+    SkTArray<const char*> instanceExtensionNames;
+    uint32_t extensionFlags = 0;
+#ifdef SK_ENABLE_VK_LAYERS
+    for (size_t i = 0; i < SK_ARRAY_COUNT(kMyDebugLayerNames); ++i) {
+        if (extensions.hasInstanceLayer(kMyDebugLayerNames[i])) {
+            instanceLayerNames.push_back(kMyDebugLayerNames[i]);
+        }
+    }
+    if (extensions.hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
+        instanceExtensionNames.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
+        extensionFlags |= kEXT_debug_report_GrVkExtensionFlag;
+    }
+#endif
+
+    const VkInstanceCreateInfo instance_create = {
+        VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,    // sType
+        nullptr,                                   // pNext
+        0,                                         // flags
+        &app_info,                                 // pApplicationInfo
+        (uint32_t) instanceLayerNames.count(),     // enabledLayerNameCount
+        instanceLayerNames.begin(),                // ppEnabledLayerNames
+        (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
+        instanceExtensionNames.begin(),            // ppEnabledExtensionNames
+    };
+
+    ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
+    err = fVkCreateInstance(&instance_create, nullptr, &fInst);
+    if (err < 0) {
+        ERRORF(reporter, "vkCreateInstance failed: %d\n", err);
+        return false;
+    }
+
+    ACQUIRE_VK_PROC(DestroyInstance, fInst, VK_NULL_HANDLE);
+    ACQUIRE_INST_VK_PROC(EnumeratePhysicalDevices);
+    ACQUIRE_INST_VK_PROC(GetPhysicalDeviceProperties);
+    ACQUIRE_INST_VK_PROC(GetPhysicalDeviceMemoryProperties2);
+    ACQUIRE_INST_VK_PROC(GetPhysicalDeviceQueueFamilyProperties);
+    ACQUIRE_INST_VK_PROC(GetPhysicalDeviceFeatures);
+    ACQUIRE_INST_VK_PROC(CreateDevice);
+    ACQUIRE_INST_VK_PROC(GetDeviceQueue);
+    ACQUIRE_INST_VK_PROC(DeviceWaitIdle);
+    ACQUIRE_INST_VK_PROC(DestroyDevice);
+    ACQUIRE_INST_VK_PROC(GetPhysicalDeviceImageFormatProperties2);
+
+    uint32_t gpuCount;
+    err = fVkEnumeratePhysicalDevices(fInst, &gpuCount, nullptr);
+    if (err) {
+        ERRORF(reporter, "vkEnumeratePhysicalDevices failed: %d\n", err);
+        fVkDestroyInstance(fInst, nullptr);
+        return false;
+    }
+    if (!gpuCount) {
+        // We can no physical devices so this isn't an error and failure in the test.
+        fVkDestroyInstance(fInst, nullptr);
+        return false;
+    }
+    // Just returning the first physical device instead of getting the whole array.
+    // TODO: find best match for our needs
+    gpuCount = 1;
+    err = fVkEnumeratePhysicalDevices(fInst, &gpuCount, &fPhysDev);
+    if (err) {
+        ERRORF(reporter, "vkEnumeratePhysicalDevices failed: %d\n", err);
+        fVkDestroyInstance(fInst, nullptr);
+        return false;
+    }
+
+    // query to get the initial queue props size
+    uint32_t queueCount;
+    fVkGetPhysicalDeviceQueueFamilyProperties(fPhysDev, &queueCount, nullptr);
+    if (!queueCount) {
+        ERRORF(reporter, "vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
+        fVkDestroyInstance(fInst, nullptr);
+        return false;
+    }
+
+    SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
+    // now get the actual queue props
+    VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
+
+    fVkGetPhysicalDeviceQueueFamilyProperties(fPhysDev, &queueCount, queueProps);
+
+    // iterate to find the graphics queue
+    uint32_t graphicsQueueIndex = queueCount;
+    for (uint32_t i = 0; i < queueCount; i++) {
+        if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+            graphicsQueueIndex = i;
+            break;
+        }
+    }
+    if (graphicsQueueIndex == queueCount) {
+        ERRORF(reporter, "Could not find any supported graphics queues.\n");
+        fVkDestroyInstance(fInst, nullptr);
+        return false;
+    }
+
+    VkPhysicalDeviceProperties physDevProperties;
+    fVkGetPhysicalDeviceProperties(fPhysDev, &physDevProperties);
+    int physDevVersion = physDevProperties.apiVersion;
+
+    extensions.initDevice(physDevVersion, fInst, fPhysDev);
+
+    SkTArray<const char*> deviceLayerNames;
+    SkTArray<const char*> deviceExtensionNames;
+#ifdef SK_ENABLE_VK_LAYERS
+    for (size_t i = 0; i < SK_ARRAY_COUNT(kMyDebugLayerNames); ++i) {
+        if (extensions.hasDeviceLayer(kMyDebugLayerNames[i])) {
+            deviceLayerNames.push_back(kMyDebugLayerNames[i]);
+        }
+    }
+#endif
+
+    if (extensions.hasDeviceExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME)) {
+        deviceExtensionNames.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
+    } else {
+        fVkDestroyInstance(fInst, nullptr);
+        return false;
+    }
+
+    if (extensions.hasDeviceExtension(
+            VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) {
+        deviceExtensionNames.push_back(
+                VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
+    } else {
+        fVkDestroyInstance(fInst, nullptr);
+        return false;
+    }
+
+    if (extensions.hasDeviceExtension(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME)) {
+        deviceExtensionNames.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
+    } else {
+        SkDebugf("We don't have the extension for VK_EXT_QUEUE_FAMILY_FOREIGN\n");
+        //fVkDestroyInstance(fInst, nullptr);
+        //return false;
+    }
+
+    // query to get the physical device properties
+    VkPhysicalDeviceFeatures deviceFeatures;
+    fVkGetPhysicalDeviceFeatures(fPhysDev, &deviceFeatures);
+    // this looks like it would slow things down,
+    // and we can't depend on it on all platforms
+    deviceFeatures.robustBufferAccess = VK_FALSE;
+
+    uint32_t featureFlags = 0;
+    if (deviceFeatures.geometryShader) {
+        featureFlags |= kGeometryShader_GrVkFeatureFlag;
+    }
+    if (deviceFeatures.dualSrcBlend) {
+        featureFlags |= kDualSrcBlend_GrVkFeatureFlag;
+    }
+    if (deviceFeatures.sampleRateShading) {
+        featureFlags |= kSampleRateShading_GrVkFeatureFlag;
+    }
+
+    float queuePriorities[1] = { 0.0 };
+    // Here we assume no need for swapchain queue
+    // If one is needed, the client will need its own setup code
+    const VkDeviceQueueCreateInfo queueInfo[1] = {
+        {
+            VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
+            nullptr,                                    // pNext
+            0,                                          // VkDeviceQueueCreateFlags
+            graphicsQueueIndex,                         // queueFamilyIndex
+            1,                                          // queueCount
+            queuePriorities,                            // pQueuePriorities
+        }
+    };
+    uint32_t queueInfoCount = 1;
+
+    const VkDeviceCreateInfo deviceInfo = {
+        VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,    // sType
+        nullptr,                                 // pNext
+        0,                                       // VkDeviceCreateFlags
+        queueInfoCount,                          // queueCreateInfoCount
+        queueInfo,                               // pQueueCreateInfos
+        (uint32_t) deviceLayerNames.count(),     // layerCount
+        deviceLayerNames.begin(),                // ppEnabledLayerNames
+        (uint32_t) deviceExtensionNames.count(), // extensionCount
+        deviceExtensionNames.begin(),            // ppEnabledExtensionNames
+        &deviceFeatures                          // ppEnabledFeatures
+    };
+
+    err = fVkCreateDevice(fPhysDev, &deviceInfo, nullptr, &fDevice);
+    if (err) {
+        ERRORF(reporter, "CreateDevice failed: %d\n", err);
+        fVkDestroyInstance(fInst, nullptr);
+        return false;
+    }
+
+    ACQUIRE_DEVICE_VK_PROC(CreateImage);
+    ACQUIRE_DEVICE_VK_PROC(GetImageMemoryRequirements2);
+    ACQUIRE_DEVICE_VK_PROC(GetAndroidHardwareBufferPropertiesANDROID);
+    ACQUIRE_DEVICE_VK_PROC(AllocateMemory);
+    ACQUIRE_DEVICE_VK_PROC(BindImageMemory2);
+    ACQUIRE_DEVICE_VK_PROC(DestroyImage);
+    ACQUIRE_DEVICE_VK_PROC(FreeMemory);
+
+    VkQueue queue;
+    fVkGetDeviceQueue(fDevice, graphicsQueueIndex, 0, &queue);
+
+    // Setting up actual skia things now
+    auto interface =
+            sk_make_sp<GrVkInterface>(getProc, fInst, fDevice, extensionFlags);
+    if (!interface->validate(extensionFlags)) {
+        ERRORF(reporter, "Vulkan interface validation failed\n");
+        fVkDeviceWaitIdle(fDevice);
+        fVkDestroyDevice(fDevice, nullptr);
+        fVkDestroyInstance(fInst, nullptr);
+        return false;
+    }
+
+    fBackendContext.reset(new GrVkBackendContext());
+    fBackendContext->fInstance = fInst;
+    fBackendContext->fPhysicalDevice = fPhysDev;
+    fBackendContext->fDevice = fDevice;
+    fBackendContext->fQueue = queue;
+    fBackendContext->fGraphicsQueueIndex = graphicsQueueIndex;
+    fBackendContext->fMinAPIVersion = instanceVersion;
+    fBackendContext->fExtensions = extensionFlags;
+    fBackendContext->fFeatures = featureFlags;
+    fBackendContext->fInterface.reset(interface.release());
+    fBackendContext->fOwnsInstanceAndDevice = true;
+
+    fGrContext = GrContext::MakeVulkan(fBackendContext);
+    REPORTER_ASSERT(reporter, fGrContext.get());
+
+    return true;
+}
+
+bool VulkanTestHelper::checkOptimalHardwareBuffer(skiatest::Reporter* reporter) {
+    VkResult err;
+
+    VkPhysicalDeviceExternalImageFormatInfo externalImageFormatInfo;
+    externalImageFormatInfo.sType =
+            VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO;
+    externalImageFormatInfo.pNext = nullptr;
+    externalImageFormatInfo.handleType =
+            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
+    //externalImageFormatInfo.handType = 0x80;
+
+    // We will create the hardware buffer with gpu sampled so these usages should all be valid
+    VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT |
+                                   VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+                                   VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+    VkPhysicalDeviceImageFormatInfo2 imageFormatInfo;
+    imageFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
+    imageFormatInfo.pNext = &externalImageFormatInfo;
+    imageFormatInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
+    imageFormatInfo.type = VK_IMAGE_TYPE_2D;
+    imageFormatInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+    imageFormatInfo.usage = usageFlags;
+    imageFormatInfo.flags = 0;
+
+    VkAndroidHardwareBufferUsageANDROID hwbUsage;
+    hwbUsage.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID;
+    hwbUsage.pNext = nullptr;
+
+    VkExternalImageFormatProperties externalImgFormatProps;
+    externalImgFormatProps.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES;
+    externalImgFormatProps.pNext = &hwbUsage;
+
+    VkImageFormatProperties2 imgFormProps;
+    imgFormProps.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
+    imgFormProps.pNext = &externalImgFormatProps;
+
+    err = fVkGetPhysicalDeviceImageFormatProperties2(fPhysDev, &imageFormatInfo,
+                                                        &imgFormProps);
+    if (VK_SUCCESS != err) {
+        ERRORF(reporter, "vkGetPhysicalDeviceImageFormatProperites failed, err: %d", err);
+        return false;
+    }
+
+    const VkImageFormatProperties& imageFormatProperties = imgFormProps.imageFormatProperties;
+    REPORTER_ASSERT(reporter, DEV_W <= imageFormatProperties.maxExtent.width);
+    REPORTER_ASSERT(reporter, DEV_H <= imageFormatProperties.maxExtent.height);
+
+    const VkExternalMemoryProperties& externalImageFormatProps =
+            externalImgFormatProps.externalMemoryProperties;
+    REPORTER_ASSERT(reporter, SkToBool(VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT &
+                                       externalImageFormatProps.externalMemoryFeatures));
+    REPORTER_ASSERT(reporter, SkToBool(VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT &
+                                       externalImageFormatProps.externalMemoryFeatures));
+
+    REPORTER_ASSERT(reporter, SkToBool(AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE &
+                                       hwbUsage.androidHardwareBufferUsage));
+
+    return true;
+}
+
+bool VulkanTestHelper::importHardwareBuffer(skiatest::Reporter* reporter,
+                                            AHardwareBuffer* buffer,
+                                            bool forWrite,
+                                            GrVkImageInfo* outImageInfo) {
+    VkResult err;
+
+    VkAndroidHardwareBufferFormatPropertiesANDROID hwbFormatProps;
+    hwbFormatProps.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
+    hwbFormatProps.pNext = nullptr;
+
+    VkAndroidHardwareBufferPropertiesANDROID hwbProps;
+    hwbProps.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
+    hwbProps.pNext = &hwbFormatProps;
+
+    err = fVkGetAndroidHardwareBufferPropertiesANDROID(fDevice, buffer, &hwbProps);
+    if (VK_SUCCESS != err) {
+        ERRORF(reporter, "GetAndroidHardwareBufferPropertiesAndoird failed, err: %d", err);
+        return false;
+    }
+
+    REPORTER_ASSERT(reporter, VK_FORMAT_R8G8B8A8_UNORM == hwbFormatProps.format);
+    REPORTER_ASSERT(reporter,
+                    SkToBool(VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT & hwbFormatProps.formatFeatures) &&
+                    SkToBool(VK_FORMAT_FEATURE_TRANSFER_SRC_BIT & hwbFormatProps.formatFeatures) &&
+                    SkToBool(VK_FORMAT_FEATURE_TRANSFER_DST_BIT & hwbFormatProps.formatFeatures));
+    if (forWrite) {
+        REPORTER_ASSERT(reporter,
+                SkToBool(VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT & hwbFormatProps.formatFeatures));
+
+    }
+
+    bool useExternalFormat = VK_FORMAT_UNDEFINED == hwbFormatProps.format;
+    const VkExternalFormatANDROID externalFormatInfo {
+        VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID,             // sType
+        nullptr,                                               // pNext
+        useExternalFormat ? hwbFormatProps.externalFormat : 0, // externalFormat
+    };
+
+    const VkExternalMemoryImageCreateInfo externalMemoryImageInfo {
+        VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, // sType
+        &externalFormatInfo, // pNext
+        //nullptr, // pNext
+        VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID, // handleTypes
+        //0x80, // handleTypes
+    };
+
+    VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT |
+                                   VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+                                   VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+    if (forWrite) {
+        usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+    }
+
+    const VkImageCreateInfo imageCreateInfo = {
+        VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,         // sType
+        &externalMemoryImageInfo,                    // pNext
+        0,                                           // VkImageCreateFlags
+        VK_IMAGE_TYPE_2D,                            // VkImageType
+        hwbFormatProps.format,                       // VkFormat
+        { DEV_W, DEV_H, 1 },                         // VkExtent3D
+        1,                                           // mipLevels
+        1,                                           // arrayLayers
+        VK_SAMPLE_COUNT_1_BIT,                       // samples
+        VK_IMAGE_TILING_OPTIMAL,                     // VkImageTiling
+        usageFlags,                                  // VkImageUsageFlags
+        VK_SHARING_MODE_EXCLUSIVE,                   // VkSharingMode
+        0,                                           // queueFamilyCount
+        0,                                           // pQueueFamilyIndices
+        VK_IMAGE_LAYOUT_UNDEFINED,                   // initialLayout
+    };
+
+    err = fVkCreateImage(fDevice, &imageCreateInfo, nullptr, &fImage);
+    if (VK_SUCCESS != err) {
+        ERRORF(reporter, "Create Image failed, err: %d", err);
+        return false;
+    }
+
+    VkImageMemoryRequirementsInfo2 memReqsInfo;
+    memReqsInfo.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2;
+    memReqsInfo.pNext = nullptr;
+    memReqsInfo.image = fImage;
+
+    VkMemoryDedicatedRequirements dedicatedMemReqs;
+    dedicatedMemReqs.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS;
+    dedicatedMemReqs.pNext = nullptr;
+
+    VkMemoryRequirements2 memReqs;
+    memReqs.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2;
+    memReqs.pNext = &dedicatedMemReqs;
+
+    fVkGetImageMemoryRequirements2(fDevice, &memReqsInfo, &memReqs);
+    REPORTER_ASSERT(reporter, VK_TRUE == dedicatedMemReqs.requiresDedicatedAllocation);
+
+    VkPhysicalDeviceMemoryProperties2 phyDevMemProps;
+    phyDevMemProps.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2;
+    phyDevMemProps.pNext = nullptr;
+
+    uint32_t typeIndex = 0;
+    uint32_t heapIndex = 0;
+    bool foundHeap = false;
+    fVkGetPhysicalDeviceMemoryProperties2(fPhysDev, &phyDevMemProps);
+    uint32_t memTypeCnt = phyDevMemProps.memoryProperties.memoryTypeCount;
+    for (uint32_t i = 0; i < memTypeCnt && !foundHeap; ++i) {
+        if (hwbProps.memoryTypeBits & (1 << i)) {
+            const VkPhysicalDeviceMemoryProperties& pdmp = phyDevMemProps.memoryProperties;
+            uint32_t supportedFlags = pdmp.memoryTypes[i].propertyFlags &
+                    VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+            if (supportedFlags == VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
+                typeIndex = i;
+                heapIndex = pdmp.memoryTypes[i].heapIndex;
+                foundHeap = true;
+            }
+        }
+    }
+    if (!foundHeap) {
+        ERRORF(reporter, "Failed to find valid heap for imported memory");
+        return false;
+    }
+
+    VkImportAndroidHardwareBufferInfoANDROID hwbImportInfo;
+    hwbImportInfo.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID;
+    hwbImportInfo.pNext = nullptr;
+    hwbImportInfo.buffer = buffer;
+
+    VkMemoryDedicatedAllocateInfo dedicatedAllocInfo;
+    dedicatedAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
+    dedicatedAllocInfo.pNext = &hwbImportInfo;
+    dedicatedAllocInfo.image = fImage;
+    dedicatedAllocInfo.buffer = VK_NULL_HANDLE;
+
+    VkMemoryAllocateInfo allocInfo = {
+        VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,      // sType
+        &dedicatedAllocInfo,                         // pNext
+        hwbProps.allocationSize,                     // allocationSize
+        typeIndex,                                   // memoryTypeIndex
+    };
+
+    err = fVkAllocateMemory(fDevice, &allocInfo, nullptr, &fMemory);
+    if (VK_SUCCESS != err) {
+        ERRORF(reporter, "AllocateMemory failed for imported buffer, err: %d", err);
+        return false;
+    }
+
+    VkBindImageMemoryInfo bindImageInfo;
+    bindImageInfo.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
+    bindImageInfo.pNext = nullptr;
+    bindImageInfo.image = fImage;
+    bindImageInfo.memory = fMemory;
+    bindImageInfo.memoryOffset = 0;
+
+    err = fVkBindImageMemory2(fDevice, 1, &bindImageInfo);
+    if (VK_SUCCESS != err) {
+        ERRORF(reporter, "BindImageMemory failed for imported buffer, err: %d", err);
+        return false;
+    }
+
+    outImageInfo->fImage = fImage;
+    outImageInfo->fAlloc = GrVkAlloc(fMemory, 0, hwbProps.allocationSize, 0);
+    outImageInfo->fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+    outImageInfo->fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+    outImageInfo->fFormat = VK_FORMAT_R8G8B8A8_UNORM;
+    outImageInfo->fLevelCount = 1;
+    outImageInfo->fInitialQueueFamily = VK_QUEUE_FAMILY_EXTERNAL;
+    outImageInfo->fCurrentQueueFamily = VK_QUEUE_FAMILY_EXTERNAL;
+    return true;
+}
+
+sk_sp<SkImage> VulkanTestHelper::importHardwareBufferForRead(skiatest::Reporter* reporter,
+                                                             AHardwareBuffer* buffer) {
+    GrVkImageInfo imageInfo;
+    if (!this->importHardwareBuffer(reporter, buffer, false, &imageInfo)) {
+        return nullptr;
+    }
+
+    GrBackendTexture backendTex(DEV_W, DEV_H, imageInfo);
+
+    sk_sp<SkImage> wrappedImage = SkImage::MakeFromTexture(fGrContext.get(),
+                                                           backendTex,
+                                                           kTopLeft_GrSurfaceOrigin,
+                                                           kRGBA_8888_SkColorType,
+                                                           kPremul_SkAlphaType,
+                                                           nullptr);
+
+    if (!wrappedImage.get()) {
+        ERRORF(reporter, "Failed to create wrapped Vulkan SkImage");
+        return nullptr;
+    }
+
+    return wrappedImage;
+}
+
+sk_sp<SkSurface> VulkanTestHelper::importHardwareBufferForWrite(skiatest::Reporter* reporter,
+                                                                AHardwareBuffer* buffer) {
+    GrVkImageInfo imageInfo;
+    if (!this->importHardwareBuffer(reporter, buffer, false, &imageInfo)) {
+        return nullptr;
+    }
+
+    GrBackendTexture backendTex(DEV_W, DEV_H, imageInfo);
+
+    sk_sp<SkSurface> surface = SkSurface::MakeFromBackendTexture(fGrContext.get(),
+                                                                 backendTex,
+                                                                 kTopLeft_GrSurfaceOrigin,
+                                                                 1,
+                                                                 kRGBA_8888_SkColorType,
+                                                                 nullptr, nullptr);
+
+    if (!surface.get()) {
+        ERRORF(reporter, "Failed to create wrapped Vulkan SkSurface");
+        return nullptr;
+    }
+
+    return surface;
+}
+
+static SkPMColor get_src_color(int x, int y) {
+    SkASSERT(x >= 0 && x < DEV_W);
+    SkASSERT(y >= 0 && y < DEV_H);
+
+    U8CPU r = x;
+    U8CPU g = y;
+    U8CPU b = 0xc;
+
+    U8CPU a = 0xff;
+    switch ((x+y) % 5) {
+        case 0:
+            a = 0xff;
+            break;
+        case 1:
+            a = 0x80;
+            break;
+        case 2:
+            a = 0xCC;
+            break;
+        case 4:
+            a = 0x01;
+            break;
+        case 3:
+            a = 0x00;
+            break;
+    }
+    a = 0xff;
+    return SkPremultiplyARGBInline(a, r, g, b);
+}
+
+static SkBitmap make_src_bitmap() {
+    static SkBitmap bmp;
+    if (bmp.isNull()) {
+        bmp.allocN32Pixels(DEV_W, DEV_H);
+        intptr_t pixels = reinterpret_cast<intptr_t>(bmp.getPixels());
+        for (int y = 0; y < DEV_H; ++y) {
+            for (int x = 0; x < DEV_W; ++x) {
+                SkPMColor* pixel = reinterpret_cast<SkPMColor*>(
+                        pixels + y * bmp.rowBytes() + x * bmp.bytesPerPixel());
+                *pixel = get_src_color(x, y);
+            }
+        }
+    }
+    return bmp;
+}
+
+static bool check_read(skiatest::Reporter* reporter, const SkBitmap& srcBitmap,
+                       const SkBitmap& dstBitmap) {
+    bool result = true;
+    for (int y = 0; y < DEV_H; ++y) {
+        for (int x = 0; x < DEV_W; ++x) {
+            const uint32_t srcPixel = *srcBitmap.getAddr32(x, y);
+            const uint32_t dstPixel = *dstBitmap.getAddr32(x, y);
+            if (srcPixel != dstPixel) {
+                ERRORF(reporter, "Expected readback pixel (%d, %d) value 0x%08x, got 0x%08x.",
+                       x, y,  srcPixel, dstPixel);
+                result = false;
+            } /*else {
+                ERRORF(reporter, "Got good readback pixel (%d, %d) value 0x%08x, got 0x%08x.",
+                       x, y,  srcPixel, dstPixel);
+
+            }*/
+        }
+    }
+    return result;
+}
+
+static void cleanup_resources(BaseTestHelper* srcHelper, BaseTestHelper* dstHelper,
+                              AHardwareBuffer* buffer) {
+    if (srcHelper) {
+        srcHelper->cleanup();
+    }
+    if (dstHelper) {
+        dstHelper->cleanup();
+    }
+    if (buffer) {
+        AHardwareBuffer_release(buffer);
+    }
+}
+
+enum class SrcType {
+    kCPU,
+    kEGL,
+    kVulkan,
+};
+
+enum class DstType {
+    kEGL,
+    kVulkan,
+};
+
+void run_test(skiatest::Reporter* reporter, const GrContextOptions& options,
+              SrcType srcType, DstType dstType) {
+    VulkanTestHelper vulkanHelper;
+    EGLTestHelper eglHelper(options);
+    AHardwareBuffer* buffer = nullptr;
+    if (SrcType::kVulkan == srcType || DstType::kVulkan == dstType) {
+        if (!vulkanHelper.init(reporter)) {
+            cleanup_resources(&vulkanHelper, &eglHelper, buffer);
+            return;
+        }
+        if (!vulkanHelper.checkOptimalHardwareBuffer(reporter)) {
+            cleanup_resources(&vulkanHelper, &eglHelper, buffer);
+            return;
+        }
+    }
+
+    if (SrcType::kEGL == srcType || DstType::kEGL == dstType) {
+        if (!eglHelper.init(reporter)) {
+            cleanup_resources(&vulkanHelper, &eglHelper, buffer);
+            return;
+        }
+    }
+
+    BaseTestHelper* srcHelper = nullptr;
+    BaseTestHelper* dstHelper = nullptr;
+    if (SrcType::kVulkan == srcType) {
+        srcHelper = &vulkanHelper;
+    } else if (SrcType::kEGL ==srcType) {
+        srcHelper = &eglHelper;
+    }
+
+    if (DstType::kVulkan == dstType) {
+        dstHelper = &vulkanHelper;
+    } else {
+        SkASSERT(DstType::kEGL == dstType);
+        dstHelper = &eglHelper;
+    }
+
+    ///////////////////////////////////////////////////////////////////////////
+    // Setup SkBitmaps
+    ///////////////////////////////////////////////////////////////////////////
+
+    SkBitmap srcBitmap = make_src_bitmap();
+    SkBitmap dstBitmapEGL;
+    dstBitmapEGL.allocN32Pixels(DEV_W, DEV_H);
+    SkBitmap dstBitmapVk;
+    dstBitmapVk.allocN32Pixels(DEV_W, DEV_H);
+
+    ///////////////////////////////////////////////////////////////////////////
+    // Setup AHardwareBuffer
+    ///////////////////////////////////////////////////////////////////////////
+
+    AHardwareBuffer_Desc hwbDesc;
+    hwbDesc.width = DEV_W;
+    hwbDesc.height = DEV_H;
+    hwbDesc.layers = 1;
+    if (SrcType::kCPU == srcType) {
+        hwbDesc.usage = AHARDWAREBUFFER_USAGE_CPU_READ_NEVER |
+                        AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN |
+                        AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
+    } else {
+        hwbDesc.usage = AHARDWAREBUFFER_USAGE_CPU_READ_NEVER |
+                        AHARDWAREBUFFER_USAGE_CPU_WRITE_NEVER |
+                        AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
+                        AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT;
+    }
+    hwbDesc.format = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
+    // The following three are not used in the allocate
+    hwbDesc.stride = 0;
+    hwbDesc.rfu0= 0;
+    hwbDesc.rfu1= 0;
+
+    if (int error = AHardwareBuffer_allocate(&hwbDesc, &buffer)) {
+        ERRORF(reporter, "Failed to allocated hardware buffer, error: %d", error);
+        cleanup_resources(srcHelper, dstHelper, buffer);
+        return;
+    }
+
+    if (SrcType::kCPU == srcType) {
+        // Get actual desc for allocated buffer so we know the stride for uploading cpu data.
+        AHardwareBuffer_describe(buffer, &hwbDesc);
+
+        uint32_t* bufferAddr;
+        if (AHardwareBuffer_lock(buffer, AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN, -1, nullptr,
+                                 reinterpret_cast<void**>(&bufferAddr))) {
+            ERRORF(reporter, "Failed to lock hardware buffer");
+            cleanup_resources(srcHelper, dstHelper, buffer);
+            return;
+        }
+
+        int bbp = srcBitmap.bytesPerPixel();
+        uint32_t* src = (uint32_t*)srcBitmap.getPixels();
+        uint32_t* dst = bufferAddr;
+        for (int y = 0; y < DEV_H; ++y) {
+            memcpy(dst, src, DEV_W * bbp);
+            src += DEV_W;
+            dst += hwbDesc.stride;
+        }
+
+        for (int y = 0; y < DEV_H; ++y) {
+            for (int x = 0; x < DEV_W; ++x) {
+                const uint32_t srcPixel = *srcBitmap.getAddr32(x, y);
+                uint32_t dstPixel = bufferAddr[y * hwbDesc.stride + x];
+                if (srcPixel != dstPixel) {
+                    ERRORF(reporter, "CPU HWB Expected readpix (%d, %d) value 0x%08x, got 0x%08x.",
+                           x, y, srcPixel, dstPixel);
+                }
+            }
+        }
+
+        AHardwareBuffer_unlock(buffer, nullptr);
+
+    } else {
+        sk_sp<SkSurface> surface = srcHelper->importHardwareBufferForWrite(reporter, buffer);
+
+        if (!surface) {
+            cleanup_resources(srcHelper, dstHelper, buffer);
+            return;
+        }
+
+        sk_sp<SkImage> srcBmpImage = SkImage::MakeFromBitmap(srcBitmap);
+        surface->getCanvas()->drawImage(srcBmpImage, 0, 0);
+
+        bool readResult = surface->readPixels(dstBitmapEGL, 0, 0);
+        if (!readResult) {
+            ERRORF(reporter, "Read Pixels on surface failed");
+            surface.reset();
+            cleanup_resources(srcHelper, dstHelper, buffer);
+            return;
+        }
+
+        REPORTER_ASSERT(reporter, check_read(reporter, srcBitmap, dstBitmapEGL));
+
+        ///////////////////////////////////////////////////////////////////////////
+        // Cleanup GL/EGL and add syncs
+        ///////////////////////////////////////////////////////////////////////////
+
+        surface.reset();
+        srcHelper->doClientSync();
+        srcHelper->releaseImage();
+    }
+
+    ///////////////////////////////////////////////////////////////////////////
+    // Import the HWB into backend and draw it to a surface
+    ///////////////////////////////////////////////////////////////////////////
+
+    sk_sp<SkImage> wrappedImage = dstHelper->importHardwareBufferForRead(reporter, buffer);
+    if (!wrappedImage) {
+        cleanup_resources(srcHelper, dstHelper, buffer);
+        return;
+    }
+
+    GrContext* grContext = dstHelper->grContext();
+
+    // Make SkSurface to render wrapped HWB into.
+    SkImageInfo imageInfo = SkImageInfo::Make(DEV_W, DEV_H, kRGBA_8888_SkColorType,
+                                              kPremul_SkAlphaType, nullptr);
+
+    sk_sp<SkSurface> dstSurf = SkSurface::MakeRenderTarget(grContext,
+                                                           SkBudgeted::kNo, imageInfo, 0,
+                                                           kTopLeft_GrSurfaceOrigin,
+                                                           nullptr, false);
+    if (!dstSurf.get()) {
+        ERRORF(reporter, "Failed to create destination SkSurface");
+        wrappedImage.reset();
+        cleanup_resources(srcHelper, dstHelper, buffer);
+        return;
+    }
+
+    dstSurf->getCanvas()->drawImage(wrappedImage, 0, 0);
+
+    bool readResult = dstSurf->readPixels(dstBitmapVk, 0, 0);
+    if (!readResult) {
+        ERRORF(reporter, "Read Pixels failed");
+        wrappedImage.reset();
+        dstHelper->doClientSync();
+        cleanup_resources(srcHelper, dstHelper, buffer);
+        return;
+    }
+
+    REPORTER_ASSERT(reporter, check_read(reporter, srcBitmap, dstBitmapVk));
+
+    wrappedImage.reset();
+    dstHelper->doClientSync();
+    cleanup_resources(srcHelper, dstHelper, buffer);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_CPU_Vulkan, reporter, options) {
+    run_test(reporter, options, SrcType::kCPU, DstType::kVulkan);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_EGL_Vulkan, reporter, options) {
+    run_test(reporter, options, SrcType::kEGL, DstType::kVulkan);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_Vulkan_Vulkan, reporter, options) {
+    run_test(reporter, options, SrcType::kEGL, DstType::kVulkan);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_CPU_EGL, reporter, options) {
+    run_test(reporter, options, SrcType::kCPU, DstType::kEGL);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_EGL_EGL, reporter, options) {
+    run_test(reporter, options, SrcType::kEGL, DstType::kEGL);
+}
+
+DEF_GPUTEST(VulkanHardwareBuffer_Vulkan_EGL, reporter, options) {
+    run_test(reporter, options, SrcType::kEGL, DstType::kEGL);
+}
+
+#endif
+#endif
+
diff --git a/tools/skqp/gn_to_bp.py b/tools/skqp/gn_to_bp.py
index f134e7c..822018c 100644
--- a/tools/skqp/gn_to_bp.py
+++ b/tools/skqp/gn_to_bp.py
@@ -37,6 +37,7 @@
         $cflags
         "-Wno-unused-parameter",
         "-Wno-unused-variable",
+        "-DSKQP_BUILD_HARDWAREBUFFER_TEST",
     ],
 
     cppflags:[