diff -up chromium-81.0.4044.113/media/gpu/vaapi/vaapi_video_decode_accelerator.cc.vaapi-intel-fix chromium-81.0.4044.113/media/gpu/vaapi/vaapi_video_decode_accelerator.cc --- chromium-81.0.4044.113/media/gpu/vaapi/vaapi_video_decode_accelerator.cc.vaapi-intel-fix 2020-04-15 19:25:53.000000000 -0400 +++ chromium-81.0.4044.113/media/gpu/vaapi/vaapi_video_decode_accelerator.cc 2020-04-16 14:41:41.303904954 -0400 @@ -64,6 +64,7 @@ void ReportToUMA(VAVDADecoderFailure fai VAVDA_DECODER_FAILURES_MAX + 1); } +#if defined(OS_ANDROID) || defined(OS_CHROMEOS) // Returns true if the CPU is an Intel Gemini Lake or later (including Kaby // Lake) Cpu platform id's are referenced from the following file in kernel // source arch/x86/include/asm/intel-family.h @@ -76,6 +77,7 @@ bool IsGeminiLakeOrLater() { cpuid.model() >= kGeminiLakeModelId; return is_geminilake_or_later; } +#endif } // namespace @@ -1171,6 +1173,8 @@ VaapiVideoDecodeAccelerator::DecideBuffe if (output_mode_ == VideoDecodeAccelerator::Config::OutputMode::IMPORT) return BufferAllocationMode::kNormal; +#if defined(OS_ANDROID) || defined(OS_CHROMEOS) + // Move this to chromeOs only as it is causing problem in some intel linux drivers // On Gemini Lake, Kaby Lake and later we can pass to libva the client's // PictureBuffers to decode onto, which skips the use of the Vpp unit and its // associated format reconciliation copy, avoiding all internal buffer @@ -1187,6 +1191,7 @@ VaapiVideoDecodeAccelerator::DecideBuffe num_extra_pics_ = 3; return BufferAllocationMode::kNone; } +#endif // If we're here, we have to use the Vpp unit and allocate buffers for // |decoder_|; usually we'd have to allocate the |decoder_|s