text
stringlengths
6
13.6M
id
stringlengths
13
176
metadata
dict
__index_level_0__
int64
0
1.69k
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/flow/frame_timings.h" #include <memory> #include <string> #include "flutter/common/settings.h" #include "flutter/fml/logging.h" #include "flutter/fml/time/time_point.h" namespace flutter { namespace { const char* StateToString(FrameTimingsRecorder::State state) { #ifndef NDEBUG switch (state) { case FrameTimingsRecorder::State::kUninitialized: return "kUninitialized"; case FrameTimingsRecorder::State::kVsync: return "kVsync"; case FrameTimingsRecorder::State::kBuildStart: return "kBuildStart"; case FrameTimingsRecorder::State::kBuildEnd: return "kBuildEnd"; case FrameTimingsRecorder::State::kRasterStart: return "kRasterStart"; case FrameTimingsRecorder::State::kRasterEnd: return "kRasterEnd"; }; FML_UNREACHABLE(); #endif return ""; } } // namespace std::atomic<uint64_t> FrameTimingsRecorder::frame_number_gen_ = {1}; FrameTimingsRecorder::FrameTimingsRecorder() : frame_number_(frame_number_gen_++), frame_number_trace_arg_val_(std::to_string(frame_number_)) {} FrameTimingsRecorder::FrameTimingsRecorder(uint64_t frame_number) : frame_number_(frame_number), frame_number_trace_arg_val_(std::to_string(frame_number_)) {} FrameTimingsRecorder::~FrameTimingsRecorder() = default; fml::TimePoint FrameTimingsRecorder::GetVsyncStartTime() const { std::scoped_lock state_lock(state_mutex_); FML_DCHECK(state_ >= State::kVsync); return vsync_start_; } fml::TimePoint FrameTimingsRecorder::GetVsyncTargetTime() const { std::scoped_lock state_lock(state_mutex_); FML_DCHECK(state_ >= State::kVsync); return vsync_target_; } fml::TimePoint FrameTimingsRecorder::GetBuildStartTime() const { std::scoped_lock state_lock(state_mutex_); FML_DCHECK(state_ >= State::kBuildStart); return build_start_; } fml::TimePoint FrameTimingsRecorder::GetBuildEndTime() const { std::scoped_lock state_lock(state_mutex_); FML_DCHECK(state_ >= State::kBuildEnd); return build_end_; } fml::TimePoint FrameTimingsRecorder::GetRasterStartTime() const { std::scoped_lock state_lock(state_mutex_); FML_DCHECK(state_ >= State::kRasterStart); return raster_start_; } fml::TimePoint FrameTimingsRecorder::GetRasterEndTime() const { std::scoped_lock state_lock(state_mutex_); FML_DCHECK(state_ >= State::kRasterEnd); return raster_end_; } fml::TimePoint FrameTimingsRecorder::GetRasterEndWallTime() const { std::scoped_lock state_lock(state_mutex_); FML_DCHECK(state_ >= State::kRasterEnd); return raster_end_wall_time_; } fml::TimeDelta FrameTimingsRecorder::GetBuildDuration() const { std::scoped_lock state_lock(state_mutex_); FML_DCHECK(state_ >= State::kBuildEnd); return build_end_ - build_start_; } /// Count of the layer cache entries size_t FrameTimingsRecorder::GetLayerCacheCount() const { std::scoped_lock state_lock(state_mutex_); FML_DCHECK(state_ >= State::kRasterEnd); return layer_cache_count_; } /// Total bytes in all layer cache entries size_t FrameTimingsRecorder::GetLayerCacheBytes() const { std::scoped_lock state_lock(state_mutex_); FML_DCHECK(state_ >= State::kRasterEnd); return layer_cache_bytes_; } /// Count of the picture cache entries size_t FrameTimingsRecorder::GetPictureCacheCount() const { std::scoped_lock state_lock(state_mutex_); FML_DCHECK(state_ >= State::kRasterEnd); return picture_cache_count_; } /// Total bytes in all picture cache entries size_t FrameTimingsRecorder::GetPictureCacheBytes() const { std::scoped_lock state_lock(state_mutex_); FML_DCHECK(state_ >= State::kRasterEnd); return picture_cache_bytes_; } void FrameTimingsRecorder::RecordVsync(fml::TimePoint vsync_start, fml::TimePoint vsync_target) { fml::Status status = RecordVsyncImpl(vsync_start, vsync_target); FML_DCHECK(status.ok()); (void)status; } void FrameTimingsRecorder::RecordBuildStart(fml::TimePoint build_start) { fml::Status status = RecordBuildStartImpl(build_start); FML_DCHECK(status.ok()); (void)status; } void FrameTimingsRecorder::RecordBuildEnd(fml::TimePoint build_end) { fml::Status status = RecordBuildEndImpl(build_end); FML_DCHECK(status.ok()); (void)status; } void FrameTimingsRecorder::RecordRasterStart(fml::TimePoint raster_start) { fml::Status status = RecordRasterStartImpl(raster_start); FML_DCHECK(status.ok()); (void)status; } fml::Status FrameTimingsRecorder::RecordVsyncImpl(fml::TimePoint vsync_start, fml::TimePoint vsync_target) { std::scoped_lock state_lock(state_mutex_); if (state_ != State::kUninitialized) { return fml::Status(fml::StatusCode::kFailedPrecondition, "Check failed: state_ == State::kUninitialized."); } state_ = State::kVsync; vsync_start_ = vsync_start; vsync_target_ = vsync_target; return fml::Status(); } fml::Status FrameTimingsRecorder::RecordBuildStartImpl( fml::TimePoint build_start) { std::scoped_lock state_lock(state_mutex_); if (state_ != State::kVsync) { return fml::Status(fml::StatusCode::kFailedPrecondition, "Check failed: state_ == State::kVsync."); } state_ = State::kBuildStart; build_start_ = build_start; return fml::Status(); } fml::Status FrameTimingsRecorder::RecordBuildEndImpl(fml::TimePoint build_end) { std::scoped_lock state_lock(state_mutex_); if (state_ != State::kBuildStart) { return fml::Status(fml::StatusCode::kFailedPrecondition, "Check failed: state_ == State::kBuildStart."); } state_ = State::kBuildEnd; build_end_ = build_end; return fml::Status(); } fml::Status FrameTimingsRecorder::RecordRasterStartImpl( fml::TimePoint raster_start) { std::scoped_lock state_lock(state_mutex_); if (state_ != State::kBuildEnd) { return fml::Status(fml::StatusCode::kFailedPrecondition, "Check failed: state_ == State::kBuildEnd."); } state_ = State::kRasterStart; raster_start_ = raster_start; return fml::Status(); } FrameTiming FrameTimingsRecorder::RecordRasterEnd(const RasterCache* cache) { std::scoped_lock state_lock(state_mutex_); FML_DCHECK(state_ == State::kRasterStart); state_ = State::kRasterEnd; raster_end_ = fml::TimePoint::Now(); raster_end_wall_time_ = fml::TimePoint::CurrentWallTime(); if (cache) { const RasterCacheMetrics& layer_metrics = cache->layer_metrics(); const RasterCacheMetrics& picture_metrics = cache->picture_metrics(); layer_cache_count_ = layer_metrics.total_count(); layer_cache_bytes_ = layer_metrics.total_bytes(); picture_cache_count_ = picture_metrics.total_count(); picture_cache_bytes_ = picture_metrics.total_bytes(); } else { layer_cache_count_ = layer_cache_bytes_ = picture_cache_count_ = picture_cache_bytes_ = 0; } timing_.Set(FrameTiming::kVsyncStart, vsync_start_); timing_.Set(FrameTiming::kBuildStart, build_start_); timing_.Set(FrameTiming::kBuildFinish, build_end_); timing_.Set(FrameTiming::kRasterStart, raster_start_); timing_.Set(FrameTiming::kRasterFinish, raster_end_); timing_.Set(FrameTiming::kRasterFinishWallTime, raster_end_wall_time_); timing_.SetFrameNumber(GetFrameNumber()); timing_.SetRasterCacheStatistics(layer_cache_count_, layer_cache_bytes_, picture_cache_count_, picture_cache_bytes_); return timing_; } FrameTiming FrameTimingsRecorder::GetRecordedTime() const { std::scoped_lock state_lock(state_mutex_); FML_DCHECK(state_ == State::kRasterEnd); return timing_; } std::unique_ptr<FrameTimingsRecorder> FrameTimingsRecorder::CloneUntil( State state) { std::scoped_lock state_lock(state_mutex_); std::unique_ptr<FrameTimingsRecorder> recorder = std::make_unique<FrameTimingsRecorder>(frame_number_); FML_DCHECK(state_ >= state); recorder->state_ = state; if (state >= State::kVsync) { recorder->vsync_start_ = vsync_start_; recorder->vsync_target_ = vsync_target_; } if (state >= State::kBuildStart) { recorder->build_start_ = build_start_; } if (state >= State::kBuildEnd) { recorder->build_end_ = build_end_; } if (state >= State::kRasterStart) { recorder->raster_start_ = raster_start_; } if (state >= State::kRasterEnd) { recorder->raster_end_ = raster_end_; recorder->raster_end_wall_time_ = raster_end_wall_time_; recorder->layer_cache_count_ = layer_cache_count_; recorder->layer_cache_bytes_ = layer_cache_bytes_; recorder->picture_cache_count_ = picture_cache_count_; recorder->picture_cache_bytes_ = picture_cache_bytes_; } return recorder; } uint64_t FrameTimingsRecorder::GetFrameNumber() const { return frame_number_; } const char* FrameTimingsRecorder::GetFrameNumberTraceArg() const { return frame_number_trace_arg_val_.c_str(); } void FrameTimingsRecorder::AssertInState(State state) const { FML_DCHECK(state_ == state) << "Expected state " << StateToString(state) << ", actual state " << StateToString(state_); } } // namespace flutter
engine/flow/frame_timings.cc/0
{ "file_path": "engine/flow/frame_timings.cc", "repo_id": "engine", "token_count": 3488 }
151
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_FLOW_LAYERS_CLIP_RECT_LAYER_H_ #define FLUTTER_FLOW_LAYERS_CLIP_RECT_LAYER_H_ #include "flutter/flow/layers/clip_shape_layer.h" namespace flutter { class ClipRectLayer : public ClipShapeLayer<SkRect> { public: ClipRectLayer(const SkRect& clip_rect, Clip clip_behavior); protected: const SkRect& clip_shape_bounds() const override; void ApplyClip(LayerStateStack::MutatorContext& mutator) const override; private: FML_DISALLOW_COPY_AND_ASSIGN(ClipRectLayer); }; } // namespace flutter #endif // FLUTTER_FLOW_LAYERS_CLIP_RECT_LAYER_H_
engine/flow/layers/clip_rect_layer.h/0
{ "file_path": "engine/flow/layers/clip_rect_layer.h", "repo_id": "engine", "token_count": 263 }
152
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_FLOW_LAYERS_DISPLAY_LIST_RASTER_CACHE_ITEM_H_ #define FLUTTER_FLOW_LAYERS_DISPLAY_LIST_RASTER_CACHE_ITEM_H_ #include <memory> #include <optional> #include "flutter/display_list/display_list.h" #include "flutter/flow/embedded_views.h" #include "flutter/flow/raster_cache_item.h" #include "third_party/skia/include/core/SkMatrix.h" #include "third_party/skia/include/core/SkPoint.h" namespace flutter { class DisplayListRasterCacheItem : public RasterCacheItem { public: DisplayListRasterCacheItem(const sk_sp<DisplayList>& display_list, const SkPoint& offset, bool is_complex = true, bool will_change = false); static std::unique_ptr<DisplayListRasterCacheItem> Make( const sk_sp<DisplayList>&, const SkPoint& offset, bool is_complex, bool will_change); void PrerollSetup(PrerollContext* context, const SkMatrix& matrix) override; void PrerollFinalize(PrerollContext* context, const SkMatrix& matrix) override; bool Draw(const PaintContext& context, const DlPaint* paint) const override; bool Draw(const PaintContext& context, DlCanvas* canvas, const DlPaint* paint) const override; bool TryToPrepareRasterCache(const PaintContext& context, bool parent_cached = false) const override; void ModifyMatrix(SkPoint offset) const { matrix_ = matrix_.preTranslate(offset.x(), offset.y()); } const DisplayList* display_list() const { return display_list_.get(); } private: SkMatrix transformation_matrix_; sk_sp<DisplayList> display_list_; SkPoint offset_; bool is_complex_; bool will_change_; }; } // namespace flutter #endif // FLUTTER_FLOW_LAYERS_DISPLAY_LIST_RASTER_CACHE_ITEM_H_
engine/flow/layers/display_list_raster_cache_item.h/0
{ "file_path": "engine/flow/layers/display_list_raster_cache_item.h", "repo_id": "engine", "token_count": 788 }
153
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/flow/layers/offscreen_surface.h" #include <memory> #include "gtest/gtest.h" #include "include/core/SkColor.h" #include "third_party/skia/include/core/SkCanvas.h" #include "third_party/skia/include/core/SkData.h" namespace flutter::testing { TEST(OffscreenSurfaceTest, EmptySurfaceIsInvalid) { auto surface = std::make_unique<OffscreenSurface>(nullptr, SkISize::MakeEmpty()); ASSERT_FALSE(surface->IsValid()); } TEST(OffscreenSurfaceTest, OnexOneSurfaceIsValid) { auto surface = std::make_unique<OffscreenSurface>(nullptr, SkISize::Make(1, 1)); ASSERT_TRUE(surface->IsValid()); } TEST(OffscreenSurfaceTest, PaintSurfaceBlack) { auto surface = std::make_unique<OffscreenSurface>(nullptr, SkISize::Make(1, 1)); DlCanvas* canvas = surface->GetCanvas(); canvas->Clear(DlColor::kBlack()); canvas->Flush(); auto raster_data = surface->GetRasterData(false); const uint32_t* actual = reinterpret_cast<const uint32_t*>(raster_data->data()); // picking black as the color since byte ordering seems to matter. ASSERT_EQ(actual[0], 0xFF000000u); } } // namespace flutter::testing
engine/flow/layers/offscreen_surface_unittests.cc/0
{ "file_path": "engine/flow/layers/offscreen_surface_unittests.cc", "repo_id": "engine", "token_count": 463 }
154
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/flow/layers/transform_layer.h" #include <optional> namespace flutter { TransformLayer::TransformLayer(const SkM44& transform) : transform_(transform) { // Checks (in some degree) that SkM44 transform_ is valid and initialized. // // If transform_ is uninitialized, this assert may look flaky as it doesn't // fail all the time, and some rerun may make it pass. But don't ignore it and // just rerun the test if this is triggered, since even a flaky failure here // may signify a potentially big problem in the code. // // We have to write this flaky test because there is no reliable way to test // whether a variable is initialized or not in C++. FML_DCHECK(transform_.isFinite()); if (!transform_.isFinite()) { FML_LOG(ERROR) << "TransformLayer is constructed with an invalid matrix."; transform_.setIdentity(); } } void TransformLayer::Diff(DiffContext* context, const Layer* old_layer) { DiffContext::AutoSubtreeRestore subtree(context); auto* prev = static_cast<const TransformLayer*>(old_layer); if (!context->IsSubtreeDirty()) { FML_DCHECK(prev); if (transform_ != prev->transform_) { context->MarkSubtreeDirty(context->GetOldLayerPaintRegion(old_layer)); } } context->PushTransform(transform_); DiffChildren(context, prev); context->SetLayerPaintRegion(this, context->CurrentSubtreeRegion()); } void TransformLayer::Preroll(PrerollContext* context) { auto mutator = context->state_stack.save(); mutator.transform(transform_); SkRect child_paint_bounds = SkRect::MakeEmpty(); PrerollChildren(context, &child_paint_bounds); // We convert to a 3x3 matrix here primarily because the SkM44 object // does not support a mapRect operation. // https://bugs.chromium.org/p/skia/issues/detail?id=11720&q=mapRect&can=2 // // All geometry is X,Y only which means the 3rd row of the 4x4 matrix // is ignored and the output of the 3rd column is also ignored. // So we can transform the rectangle using just the 3x3 SkMatrix // equivalent without any loss of information. // // Performance consideration: // Skia has an internal mapRect for their SkM44 object that is faster // than what SkMatrix does when it has perspective elements. But SkMatrix // is otherwise optimal for non-perspective matrices. If SkM44 ever exposes // a mapRect operation, or if SkMatrix ever optimizes its handling of // the perspective elements, this issue will become moot. transform_.asM33().mapRect(&child_paint_bounds); set_paint_bounds(child_paint_bounds); } void TransformLayer::Paint(PaintContext& context) const { FML_DCHECK(needs_painting(context)); auto mutator = context.state_stack.save(); mutator.transform(transform_); PaintChildren(context); } } // namespace flutter
engine/flow/layers/transform_layer.cc/0
{ "file_path": "engine/flow/layers/transform_layer.cc", "repo_id": "engine", "token_count": 876 }
155
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_FLOW_SKIA_GPU_OBJECT_H_ #define FLUTTER_FLOW_SKIA_GPU_OBJECT_H_ #include <mutex> #include <queue> #include "flutter/fml/memory/ref_counted.h" #include "flutter/fml/memory/weak_ptr.h" #include "flutter/fml/task_runner.h" #include "flutter/fml/trace_event.h" #include "third_party/skia/include/core/SkRefCnt.h" #include "third_party/skia/include/gpu/GrBackendSurface.h" #include "third_party/skia/include/gpu/GrDirectContext.h" #include "third_party/skia/include/gpu/GrTypes.h" namespace flutter { // A queue that holds Skia objects that must be destructed on the given task // runner. template <class T> class UnrefQueue : public fml::RefCountedThreadSafe<UnrefQueue<T>> { public: using ResourceContext = T; void Unref(SkRefCnt* object) { if (drain_immediate_) { object->unref(); return; } std::scoped_lock lock(mutex_); objects_.push_back(object); if (!drain_pending_) { drain_pending_ = true; task_runner_->PostDelayedTask( [strong = fml::Ref(this)]() { strong->Drain(); }, drain_delay_); } } void DeleteTexture(const GrBackendTexture& texture) { // drain_immediate_ should only be used on Impeller. FML_DCHECK(!drain_immediate_); std::scoped_lock lock(mutex_); textures_.push_back(texture); if (!drain_pending_) { drain_pending_ = true; task_runner_->PostDelayedTask( [strong = fml::Ref(this)]() { strong->Drain(); }, drain_delay_); } } // Usually, the drain is called automatically. However, during IO manager // shutdown (when the platform side reference to the OpenGL context is about // to go away), we may need to pre-emptively drain the unref queue. It is the // responsibility of the caller to ensure that no further unrefs are queued // after this call. void Drain() { TRACE_EVENT0("flutter", "SkiaUnrefQueue::Drain"); std::deque<SkRefCnt*> skia_objects; std::deque<GrBackendTexture> textures; { std::scoped_lock lock(mutex_); objects_.swap(skia_objects); textures_.swap(textures); drain_pending_ = false; } DoDrain(skia_objects, textures, context_); } void UpdateResourceContext(sk_sp<ResourceContext> context) { context_ = context; } private: const fml::RefPtr<fml::TaskRunner> task_runner_; const fml::TimeDelta drain_delay_; std::mutex mutex_; std::deque<SkRefCnt*> objects_; std::deque<GrBackendTexture> textures_; bool drain_pending_ = false; sk_sp<ResourceContext> context_; // Enabled when there is an impeller context, which removes the usage of // the queue altogether. bool drain_immediate_; // The `GrDirectContext* context` is only used for signaling Skia to // performDeferredCleanup. It can be nullptr when such signaling is not needed // (e.g., in unit tests). UnrefQueue(fml::RefPtr<fml::TaskRunner> task_runner, fml::TimeDelta delay, sk_sp<ResourceContext> context = nullptr, bool drain_immediate = false) : task_runner_(std::move(task_runner)), drain_delay_(delay), context_(context), drain_immediate_(drain_immediate) {} ~UnrefQueue() { // The ResourceContext must be deleted on the task runner thread. // Transfer ownership of the UnrefQueue's ResourceContext reference // into a task queued to that thread. ResourceContext* raw_context = context_.release(); fml::TaskRunner::RunNowOrPostTask( task_runner_, [objects = std::move(objects_), textures = std::move(textures_), raw_context]() mutable { sk_sp<ResourceContext> context(raw_context); DoDrain(objects, textures, context); context.reset(); }); } // static static void DoDrain(const std::deque<SkRefCnt*>& skia_objects, const std::deque<GrBackendTexture>& textures, sk_sp<ResourceContext> context) { for (SkRefCnt* skia_object : skia_objects) { skia_object->unref(); } if (context) { for (const GrBackendTexture& texture : textures) { context->deleteBackendTexture(texture); } if (!skia_objects.empty()) { context->performDeferredCleanup(std::chrono::milliseconds(0)); } context->flushAndSubmit(GrSyncCpu::kYes); } } FML_FRIEND_REF_COUNTED_THREAD_SAFE(UnrefQueue); FML_FRIEND_MAKE_REF_COUNTED(UnrefQueue); FML_DISALLOW_COPY_AND_ASSIGN(UnrefQueue); }; using SkiaUnrefQueue = UnrefQueue<GrDirectContext>; /// An object whose deallocation needs to be performed on an specific unref /// queue. The template argument U need to have a call operator that returns /// that unref queue. template <class T> class SkiaGPUObject { public: using SkiaObjectType = T; SkiaGPUObject() = default; SkiaGPUObject(sk_sp<SkiaObjectType> object, fml::RefPtr<SkiaUnrefQueue> queue) : object_(std::move(object)), queue_(std::move(queue)) { FML_DCHECK(object_); } SkiaGPUObject(SkiaGPUObject&&) = default; ~SkiaGPUObject() { reset(); } SkiaGPUObject& operator=(SkiaGPUObject&&) = default; sk_sp<SkiaObjectType> skia_object() const { return object_; } void reset() { if (object_ && queue_) { queue_->Unref(object_.release()); } queue_ = nullptr; FML_DCHECK(object_ == nullptr); } private: sk_sp<SkiaObjectType> object_; fml::RefPtr<SkiaUnrefQueue> queue_; FML_DISALLOW_COPY_AND_ASSIGN(SkiaGPUObject); }; } // namespace flutter #endif // FLUTTER_FLOW_SKIA_GPU_OBJECT_H_
engine/flow/skia_gpu_object.h/0
{ "file_path": "engine/flow/skia_gpu_object.h", "repo_id": "engine", "token_count": 2201 }
156
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_FLOW_TESTING_DIFF_CONTEXT_TEST_H_ #define FLUTTER_FLOW_TESTING_DIFF_CONTEXT_TEST_H_ #include <utility> #include "flutter/flow/layers/container_layer.h" #include "flutter/flow/layers/display_list_layer.h" #include "flutter/flow/layers/opacity_layer.h" #include "flutter/flow/testing/layer_test.h" namespace flutter { namespace testing { class MockLayerTree { public: explicit MockLayerTree(SkISize size = SkISize::Make(1000, 1000)) : root_(std::make_shared<ContainerLayer>()), size_(size) {} ContainerLayer* root() { return root_.get(); } const ContainerLayer* root() const { return root_.get(); } PaintRegionMap& paint_region_map() { return paint_region_map_; } const PaintRegionMap& paint_region_map() const { return paint_region_map_; } const SkISize& size() const { return size_; } private: std::shared_ptr<ContainerLayer> root_; PaintRegionMap paint_region_map_; SkISize size_; }; class DiffContextTest : public LayerTest { public: DiffContextTest(); Damage DiffLayerTree(MockLayerTree& layer_tree, const MockLayerTree& old_layer_tree, const SkIRect& additional_damage = SkIRect::MakeEmpty(), int horizontal_clip_alignment = 0, int vertical_alignment = 0, bool use_raster_cache = true, bool impeller_enabled = false); // Create display list consisting of filled rect with given color; Being able // to specify different color is useful to test deep comparison of pictures sk_sp<DisplayList> CreateDisplayList(const SkRect& bounds, DlColor color = DlColor::kBlack()); std::shared_ptr<DisplayListLayer> CreateDisplayListLayer( const sk_sp<DisplayList>& display_list, const SkPoint& offset = SkPoint::Make(0, 0)); std::shared_ptr<ContainerLayer> CreateContainerLayer( std::initializer_list<std::shared_ptr<Layer>> layers); std::shared_ptr<ContainerLayer> CreateContainerLayer( std::shared_ptr<Layer> l) { return CreateContainerLayer({std::move(l)}); } std::shared_ptr<OpacityLayer> CreateOpacityLater( std::initializer_list<std::shared_ptr<Layer>> layers, SkAlpha alpha, const SkPoint& offset = SkPoint::Make(0, 0)); }; } // namespace testing } // namespace flutter #endif // FLUTTER_FLOW_TESTING_DIFF_CONTEXT_TEST_H_
engine/flow/testing/diff_context_test.h/0
{ "file_path": "engine/flow/testing/diff_context_test.h", "repo_id": "engine", "token_count": 976 }
157
# Copyright 2013 The Flutter Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import("//flutter/build/dart/dart.gni") import("//flutter/common/config.gni") copy("frontend_server") { if (flutter_prebuilt_dart_sdk) { snapshot = "$host_prebuilt_dart_sdk/bin/snapshots/frontend_server.dart.snapshot" } else { deps = [ "$dart_src/utils/kernel-service:frontend_server" ] snapshot = "$root_out_dir/frontend_server.dart.snapshot" } sources = [ snapshot ] outputs = [ "$root_gen_dir/frontend_server.dart.snapshot" ] }
engine/flutter_frontend_server/BUILD.gn/0
{ "file_path": "engine/flutter_frontend_server/BUILD.gn", "repo_id": "engine", "token_count": 234 }
158
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/fml/backtrace.h" #include <csignal> #include <sstream> #include "flutter/fml/build_config.h" #include "flutter/fml/logging.h" #include "flutter/fml/paths.h" #include "flutter/third_party/abseil-cpp/absl/debugging/symbolize.h" #ifdef FML_OS_WIN #include <Windows.h> #include <crtdbg.h> #include <debugapi.h> #else // FML_OS_WIN #include <execinfo.h> #endif // FML_OS_WIN namespace fml { static std::string kKUnknownFrameName = "Unknown"; static std::string GetSymbolName(void* symbol) { char name[1024]; if (!absl::Symbolize(symbol, name, sizeof(name))) { return kKUnknownFrameName; } return name; } static int Backtrace(void** symbols, int size) { #if FML_OS_WIN return CaptureStackBackTrace(0, size, symbols, NULL); #else return ::backtrace(symbols, size); #endif // FML_OS_WIN } std::string BacktraceHere(size_t offset) { constexpr size_t kMaxFrames = 256; void* symbols[kMaxFrames]; const auto available_frames = Backtrace(symbols, kMaxFrames); if (available_frames <= 0) { return ""; } // Exclude here. offset += 2; std::stringstream stream; for (int i = offset; i < available_frames; ++i) { stream << "Frame " << i - offset << ": " << symbols[i] << " " << GetSymbolName(symbols[i]) << std::endl; } return stream.str(); } static size_t kKnownSignalHandlers[] = { SIGABRT, // abort program SIGFPE, // floating-point exception SIGTERM, // software termination signal SIGSEGV, // segmentation violation #if !FML_OS_WIN SIGBUS, // bus error SIGSYS, // non-existent system call invoked SIGPIPE, // write on a pipe with no reader SIGALRM, // real-time timer expired #endif // !FML_OS_WIN }; static std::string SignalNameToString(int signal) { switch (signal) { case SIGABRT: return "SIGABRT"; case SIGFPE: return "SIGFPE"; case SIGSEGV: return "SIGSEGV"; case SIGTERM: return "SIGTERM"; #if !FML_OS_WIN case SIGBUS: return "SIGBUS"; case SIGSYS: return "SIGSYS"; case SIGPIPE: return "SIGPIPE"; case SIGALRM: return "SIGALRM"; #endif // !FML_OS_WIN }; return std::to_string(signal); } static void ToggleSignalHandlers(bool set); static void SignalHandler(int signal) { // We are a crash signal handler. This can only happen once. Since we don't // want to catch crashes while we are generating the crash reports, disable // all set signal handlers to their default values before reporting the crash // and re-raising the signal. ToggleSignalHandlers(false); FML_LOG(ERROR) << "Caught signal " << SignalNameToString(signal) << " during program execution." << std::endl << BacktraceHere(3); ::raise(signal); } static void ToggleSignalHandlers(bool set) { for (size_t i = 0; i < sizeof(kKnownSignalHandlers) / sizeof(size_t); ++i) { auto signal_name = kKnownSignalHandlers[i]; auto handler = set ? &SignalHandler : SIG_DFL; if (::signal(signal_name, handler) == SIG_ERR) { FML_LOG(ERROR) << "Could not attach signal handler for " << signal_name; } } } void InstallCrashHandler() { #if FML_OS_WIN if (!IsDebuggerPresent()) { _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE | _CRTDBG_MODE_DEBUG); _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR); } #endif auto exe_path = fml::paths::GetExecutablePath(); if (exe_path.first) { absl::InitializeSymbolizer(exe_path.second.c_str()); } ToggleSignalHandlers(true); } bool IsCrashHandlingSupported() { return true; } } // namespace fml
engine/fml/backtrace.cc/0
{ "file_path": "engine/fml/backtrace.cc", "repo_id": "engine", "token_count": 1458 }
159
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/fml/hash_combine.h" #include "flutter/testing/testing.h" namespace fml { namespace testing { TEST(HashCombineTest, CanHash) { ASSERT_EQ(HashCombine(), HashCombine()); ASSERT_EQ(HashCombine("Hello"), HashCombine("Hello")); ASSERT_NE(HashCombine("Hello"), HashCombine("World")); ASSERT_EQ(HashCombine("Hello", "World"), HashCombine("Hello", "World")); ASSERT_NE(HashCombine("World", "Hello"), HashCombine("Hello", "World")); ASSERT_EQ(HashCombine(12u), HashCombine(12u)); ASSERT_NE(HashCombine(12u), HashCombine(12.0f)); ASSERT_EQ(HashCombine('a'), HashCombine('a')); } } // namespace testing } // namespace fml
engine/fml/hash_combine_unittests.cc/0
{ "file_path": "engine/fml/hash_combine_unittests.cc", "repo_id": "engine", "token_count": 290 }
160
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_FML_MAPPING_H_ #define FLUTTER_FML_MAPPING_H_ #include <initializer_list> #include <memory> #include <string> #include <vector> #include "flutter/fml/build_config.h" #include "flutter/fml/file.h" #include "flutter/fml/macros.h" #include "flutter/fml/native_library.h" #include "flutter/fml/unique_fd.h" namespace fml { class Mapping { public: Mapping(); virtual ~Mapping(); virtual size_t GetSize() const = 0; virtual const uint8_t* GetMapping() const = 0; // Whether calling madvise(DONTNEED) on the mapping is non-destructive. // Generally true for file-mapped memory and false for anonymous memory. virtual bool IsDontNeedSafe() const = 0; private: FML_DISALLOW_COPY_AND_ASSIGN(Mapping); }; class FileMapping final : public Mapping { public: enum class Protection { kRead, kWrite, kExecute, }; explicit FileMapping(const fml::UniqueFD& fd, std::initializer_list<Protection> protection = { Protection::kRead}); ~FileMapping() override; static std::unique_ptr<FileMapping> CreateReadOnly(const std::string& path); static std::unique_ptr<FileMapping> CreateReadOnly( const fml::UniqueFD& base_fd, const std::string& sub_path = ""); static std::unique_ptr<FileMapping> CreateReadExecute( const std::string& path); static std::unique_ptr<FileMapping> CreateReadExecute( const fml::UniqueFD& base_fd, const std::string& sub_path = ""); // |Mapping| size_t GetSize() const override; // |Mapping| const uint8_t* GetMapping() const override; // |Mapping| bool IsDontNeedSafe() const override; uint8_t* GetMutableMapping(); bool IsValid() const; private: bool valid_ = false; size_t size_ = 0; uint8_t* mapping_ = nullptr; uint8_t* mutable_mapping_ = nullptr; #if FML_OS_WIN fml::UniqueFD mapping_handle_; #endif FML_DISALLOW_COPY_AND_ASSIGN(FileMapping); }; class DataMapping final : public Mapping { public: explicit DataMapping(std::vector<uint8_t> data); explicit DataMapping(const std::string& string); ~DataMapping() override; // |Mapping| size_t GetSize() const override; // |Mapping| const uint8_t* GetMapping() const override; // |Mapping| bool IsDontNeedSafe() const override; private: std::vector<uint8_t> data_; FML_DISALLOW_COPY_AND_ASSIGN(DataMapping); }; class NonOwnedMapping final : public Mapping { public: using ReleaseProc = std::function<void(const uint8_t* data, size_t size)>; NonOwnedMapping(const uint8_t* data, size_t size, const ReleaseProc& release_proc = nullptr, bool dontneed_safe = false); ~NonOwnedMapping() override; // |Mapping| size_t GetSize() const override; // |Mapping| const uint8_t* GetMapping() const override; // |Mapping| bool IsDontNeedSafe() const override; private: const uint8_t* const data_; const size_t size_; const ReleaseProc release_proc_; const bool dontneed_safe_; FML_DISALLOW_COPY_AND_ASSIGN(NonOwnedMapping); }; /// A Mapping like NonOwnedMapping, but uses Free as its release proc. class MallocMapping final : public Mapping { public: MallocMapping(); /// Creates a MallocMapping for a region of memory (without copying it). /// The function will `abort()` if the malloc fails. /// @param data The starting address of the mapping. /// @param size The size of the mapping in bytes. MallocMapping(uint8_t* data, size_t size); MallocMapping(fml::MallocMapping&& mapping); ~MallocMapping() override; /// Copies the data from `begin` to `end`. /// It's templated since void* arithemetic isn't allowed and we want support /// for `uint8_t` and `char`. template <typename T> static MallocMapping Copy(const T* begin, const T* end) { FML_DCHECK(end >= begin); size_t length = end - begin; return Copy(begin, length); } /// Copies a region of memory into a MallocMapping. /// The function will `abort()` if the malloc fails. /// @param begin The starting address of where we will copy. /// @param length The length of the region to copy in bytes. static MallocMapping Copy(const void* begin, size_t length); // |Mapping| size_t GetSize() const override; // |Mapping| const uint8_t* GetMapping() const override; // |Mapping| bool IsDontNeedSafe() const override; /// Removes ownership of the data buffer. /// After this is called; the mapping will point to nullptr. [[nodiscard]] uint8_t* Release(); private: uint8_t* data_; size_t size_; FML_DISALLOW_COPY_AND_ASSIGN(MallocMapping); }; class SymbolMapping final : public Mapping { public: SymbolMapping(fml::RefPtr<fml::NativeLibrary> native_library, const char* symbol_name); ~SymbolMapping() override; // |Mapping| size_t GetSize() const override; // |Mapping| const uint8_t* GetMapping() const override; // |Mapping| bool IsDontNeedSafe() const override; private: fml::RefPtr<fml::NativeLibrary> native_library_; const uint8_t* mapping_ = nullptr; FML_DISALLOW_COPY_AND_ASSIGN(SymbolMapping); }; } // namespace fml #endif // FLUTTER_FML_MAPPING_H_
engine/fml/mapping.h/0
{ "file_path": "engine/fml/mapping.h", "repo_id": "engine", "token_count": 1935 }
161
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_FML_MEMORY_WEAK_PTR_INTERNAL_H_ #define FLUTTER_FML_MEMORY_WEAK_PTR_INTERNAL_H_ #include "flutter/fml/macros.h" #include "flutter/fml/memory/ref_counted.h" namespace fml { namespace internal { // |WeakPtr<T>|s have a reference to a |WeakPtrFlag| to determine whether they // are valid (non-null) or not. We do not store a |T*| in this object since // there may also be |WeakPtr<U>|s to the same object, where |U| is a superclass // of |T|. // // This class in not thread-safe, though references may be released on any // thread (allowing weak pointers to be destroyed/reset/reassigned on any // thread). class WeakPtrFlag : public fml::RefCountedThreadSafe<WeakPtrFlag> { public: WeakPtrFlag(); ~WeakPtrFlag(); bool is_valid() const { return is_valid_; } void Invalidate(); private: bool is_valid_ = false; FML_DISALLOW_COPY_AND_ASSIGN(WeakPtrFlag); }; } // namespace internal } // namespace fml #endif // FLUTTER_FML_MEMORY_WEAK_PTR_INTERNAL_H_
engine/fml/memory/weak_ptr_internal.h/0
{ "file_path": "engine/fml/memory/weak_ptr_internal.h", "repo_id": "engine", "token_count": 402 }
162
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/fml/platform/android/cpu_affinity.h" #include <pthread.h> #include <sys/resource.h> #include <sys/time.h> #include <unistd.h> #include <mutex> #include <optional> #include <thread> #include "flutter/fml/logging.h" namespace fml { /// The CPUSpeedTracker is initialized once the first time a thread affinity is /// requested. std::once_flag gCPUTrackerFlag; static CPUSpeedTracker* gCPUTracker; // For each CPU index provided, attempts to open the file // /sys/devices/system/cpu/cpu$NUM/cpufreq/cpuinfo_max_freq and parse a number // containing the CPU frequency. void InitCPUInfo(size_t cpu_count) { std::vector<CpuIndexAndSpeed> cpu_speeds; for (auto i = 0u; i < cpu_count; i++) { auto path = "/sys/devices/system/cpu/cpu" + std::to_string(i) + "/cpufreq/cpuinfo_max_freq"; auto speed = ReadIntFromFile(path); if (speed.has_value()) { cpu_speeds.push_back({.index = i, .speed = speed.value()}); } } gCPUTracker = new CPUSpeedTracker(cpu_speeds); } bool SetUpCPUTracker() { // Populate CPU Info if uninitialized. auto count = std::thread::hardware_concurrency(); std::call_once(gCPUTrackerFlag, [count]() { InitCPUInfo(count); }); if (gCPUTracker == nullptr || !gCPUTracker->IsValid()) { return false; } return true; } std::optional<size_t> AndroidEfficiencyCoreCount() { if (!SetUpCPUTracker()) { return true; } auto result = gCPUTracker->GetIndices(CpuAffinity::kEfficiency).size(); FML_DCHECK(result > 0); return result; } bool AndroidRequestAffinity(CpuAffinity affinity) { if (!SetUpCPUTracker()) { return true; } cpu_set_t set; CPU_ZERO(&set); for (const auto index : gCPUTracker->GetIndices(affinity)) { CPU_SET(index, &set); } return sched_setaffinity(gettid(), sizeof(set), &set) == 0; } } // namespace fml
engine/fml/platform/android/cpu_affinity.cc/0
{ "file_path": "engine/fml/platform/android/cpu_affinity.cc", "repo_id": "engine", "token_count": 724 }
163
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_FML_PLATFORM_DARWIN_MESSAGE_LOOP_DARWIN_H_ #define FLUTTER_FML_PLATFORM_DARWIN_MESSAGE_LOOP_DARWIN_H_ #include <CoreFoundation/CoreFoundation.h> #include <atomic> #include "flutter/fml/macros.h" #include "flutter/fml/message_loop_impl.h" #include "flutter/fml/platform/darwin/cf_utils.h" namespace fml { class MessageLoopDarwin : public MessageLoopImpl { public: // A custom CFRunLoop mode used when processing flutter messages, // so that the CFRunLoop can be run without being interrupted by UIKit, // while still being able to receive and be interrupted by framework messages. static CFStringRef kMessageLoopCFRunLoopMode; private: std::atomic_bool running_; CFRef<CFRunLoopTimerRef> delayed_wake_timer_; CFRef<CFRunLoopRef> loop_; MessageLoopDarwin(); ~MessageLoopDarwin() override; // |fml::MessageLoopImpl| void Run() override; // |fml::MessageLoopImpl| void Terminate() override; // |fml::MessageLoopImpl| void WakeUp(fml::TimePoint time_point) override; static void OnTimerFire(CFRunLoopTimerRef timer, MessageLoopDarwin* loop); FML_FRIEND_MAKE_REF_COUNTED(MessageLoopDarwin); FML_FRIEND_REF_COUNTED_THREAD_SAFE(MessageLoopDarwin); FML_DISALLOW_COPY_AND_ASSIGN(MessageLoopDarwin); }; } // namespace fml #endif // FLUTTER_FML_PLATFORM_DARWIN_MESSAGE_LOOP_DARWIN_H_
engine/fml/platform/darwin/message_loop_darwin.h/0
{ "file_path": "engine/fml/platform/darwin/message_loop_darwin.h", "repo_id": "engine", "token_count": 529 }
164
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/fml/platform/darwin/string_range_sanitization.h" namespace fml { NSRange RangeForCharacterAtIndex(NSString* text, NSUInteger index) { if (text == nil || index > text.length) { return NSMakeRange(NSNotFound, 0); } if (index < text.length) { return [text rangeOfComposedCharacterSequenceAtIndex:index]; } return NSMakeRange(index, 0); } NSRange RangeForCharactersInRange(NSString* text, NSRange range) { if (text == nil || range.location + range.length > text.length) { return NSMakeRange(NSNotFound, 0); } NSRange sanitizedRange = [text rangeOfComposedCharacterSequencesForRange:range]; // We don't want to override the length, we just want to make sure we don't // select into the middle of a multi-byte character. Taking the // `sanitizedRange`'s length will end up altering the actual selection. return NSMakeRange(sanitizedRange.location, range.length); } } // namespace fml
engine/fml/platform/darwin/string_range_sanitization.mm/0
{ "file_path": "engine/fml/platform/darwin/string_range_sanitization.mm", "repo_id": "engine", "token_count": 337 }
165
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/fml/platform/linux/message_loop_linux.h" #include <sys/epoll.h> #include <unistd.h> #include "flutter/fml/eintr_wrapper.h" #include "flutter/fml/platform/linux/timerfd.h" namespace fml { static constexpr int kClockType = CLOCK_MONOTONIC; MessageLoopLinux::MessageLoopLinux() : epoll_fd_(FML_HANDLE_EINTR(::epoll_create(1 /* unused */))), timer_fd_(::timerfd_create(kClockType, TFD_NONBLOCK | TFD_CLOEXEC)) { FML_CHECK(epoll_fd_.is_valid()); FML_CHECK(timer_fd_.is_valid()); bool added_source = AddOrRemoveTimerSource(true); FML_CHECK(added_source); } MessageLoopLinux::~MessageLoopLinux() { bool removed_source = AddOrRemoveTimerSource(false); FML_CHECK(removed_source); } bool MessageLoopLinux::AddOrRemoveTimerSource(bool add) { struct epoll_event event = {}; event.events = EPOLLIN; // The data is just for informational purposes so we know when we were worken // by the FD. event.data.fd = timer_fd_.get(); int ctl_result = ::epoll_ctl(epoll_fd_.get(), add ? EPOLL_CTL_ADD : EPOLL_CTL_DEL, timer_fd_.get(), &event); return ctl_result == 0; } // |fml::MessageLoopImpl| void MessageLoopLinux::Run() { running_ = true; while (running_) { struct epoll_event event = {}; int epoll_result = FML_HANDLE_EINTR( ::epoll_wait(epoll_fd_.get(), &event, 1, -1 /* timeout */)); // Errors are fatal. if (event.events & (EPOLLERR | EPOLLHUP)) { running_ = false; continue; } // Timeouts are fatal since we specified an infinite timeout already. // Likewise, > 1 is not possible since we waited for one result. if (epoll_result != 1) { running_ = false; continue; } if (event.data.fd == timer_fd_.get()) { OnEventFired(); } } } // |fml::MessageLoopImpl| void MessageLoopLinux::Terminate() { running_ = false; WakeUp(fml::TimePoint::Now()); } // |fml::MessageLoopImpl| void MessageLoopLinux::WakeUp(fml::TimePoint time_point) { bool result = TimerRearm(timer_fd_.get(), time_point); (void)result; FML_DCHECK(result); } void MessageLoopLinux::OnEventFired() { if (TimerDrain(timer_fd_.get())) { RunExpiredTasksNow(); } } } // namespace fml
engine/fml/platform/linux/message_loop_linux.cc/0
{ "file_path": "engine/fml/platform/linux/message_loop_linux.cc", "repo_id": "engine", "token_count": 918 }
166
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_FML_RASTER_THREAD_MERGER_H_ #define FLUTTER_FML_RASTER_THREAD_MERGER_H_ #include <condition_variable> #include <mutex> #include "flutter/fml/macros.h" #include "flutter/fml/memory/ref_counted.h" #include "flutter/fml/message_loop_task_queues.h" #include "flutter/fml/shared_thread_merger.h" namespace fml { class MessageLoopImpl; enum class RasterThreadStatus { kRemainsMerged, kRemainsUnmerged, kUnmergedNow }; /// This class is a client and proxy between the rasterizer and /// |SharedThreadMerger|. The multiple |RasterThreadMerger| instances with same /// owner_queue_id and same subsumed_queue_id share the same /// |SharedThreadMerger| instance. Whether they share the same inner instance is /// determined by |RasterThreadMerger::CreateOrShareThreadMerger| method. class RasterThreadMerger : public fml::RefCountedThreadSafe<RasterThreadMerger> { public: // Merges the raster thread into platform thread for the duration of // the lease term. Lease is managed by the caller by either calling // |ExtendLeaseTo| or |DecrementLease|. // When the caller merges with a lease term of say 2. The threads // are going to remain merged until 2 invocations of |DecreaseLease|, // unless an |ExtendLeaseTo| gets called. // // If the task queues are the same, we consider them statically merged. // When task queues are statically merged this method becomes no-op. void MergeWithLease(size_t lease_term); // Gets the shared merger from current merger object const fml::RefPtr<SharedThreadMerger>& GetSharedRasterThreadMerger() const; /// Creates a new merger from parent, share the inside shared_merger member /// when the platform_queue_id and raster_queue_id are same, otherwise create /// a new shared_merger instance static fml::RefPtr<fml::RasterThreadMerger> CreateOrShareThreadMerger( const fml::RefPtr<fml::RasterThreadMerger>& parent_merger, TaskQueueId platform_id, TaskQueueId raster_id); // Un-merges the threads now if current caller is the last merged caller, // and it resets the lease term to 0, otherwise it will remove // the caller record and return. The multiple caller records were recorded // after |MergeWithLease| or |ExtendLeaseTo| method. // // Must be executed on the raster task runner. // // If the task queues are the same, we consider them statically merged. // When task queues are statically merged, we never unmerge them and // this method becomes no-op. void UnMergeNowIfLastOne(); // If the task queues are the same, we consider them statically merged. // When task queues are statically merged this method becomes no-op. void ExtendLeaseTo(size_t lease_term); // Returns |RasterThreadStatus::kUnmergedNow| if this call resulted in // splitting the raster and platform threads. Reduces the lease term by 1. // // If the task queues are the same, we consider them statically merged. // When task queues are statically merged this method becomes no-op. RasterThreadStatus DecrementLease(); // The method is locked by current instance, and asks the shared instance of // SharedThreadMerger and the merging state is determined by the // lease_term_ counter. bool IsMerged(); // Waits until the threads are merged. // // Must run on the platform task runner. void WaitUntilMerged(); // Returns true if the current thread owns rasterizing. // When the threads are merged, platform thread owns rasterizing. // When un-merged, raster thread owns rasterizing. bool IsOnRasterizingThread(); // Returns true if the current thread is the platform thread. bool IsOnPlatformThread() const; // Enables the thread merger. void Enable(); // Disables the thread merger. Once disabled, any call to // |MergeWithLease| or |UnMergeNowIfLastOne| results in a noop. void Disable(); // Whether the thread merger is enabled. By default, the thread merger is // enabled. If false, calls to |MergeWithLease| or |UnMergeNowIfLastOne| // or |ExtendLeaseTo| or |DecrementLease| results in a noop. bool IsEnabled(); // Registers a callback that can be used to clean up global state right after // the thread configuration has changed. // // For example, it can be used to clear the GL context so it can be used in // the next task from a different thread. void SetMergeUnmergeCallback(const fml::closure& callback); private: fml::TaskQueueId platform_queue_id_; fml::TaskQueueId gpu_queue_id_; RasterThreadMerger(fml::TaskQueueId platform_queue_id, fml::TaskQueueId gpu_queue_id); RasterThreadMerger(fml::RefPtr<fml::SharedThreadMerger> shared_merger, fml::TaskQueueId platform_queue_id, fml::TaskQueueId gpu_queue_id); const fml::RefPtr<fml::SharedThreadMerger> shared_merger_; std::condition_variable merged_condition_; std::mutex mutex_; fml::closure merge_unmerge_callback_; bool IsMergedUnSafe() const; bool IsEnabledUnSafe() const; // The platform_queue_id and gpu_queue_id are exactly the same. // We consider the threads are always merged and cannot be unmerged. bool TaskQueuesAreSame() const; FML_FRIEND_REF_COUNTED_THREAD_SAFE(RasterThreadMerger); FML_FRIEND_MAKE_REF_COUNTED(RasterThreadMerger); FML_DISALLOW_COPY_AND_ASSIGN(RasterThreadMerger); }; } // namespace fml #endif // FLUTTER_FML_RASTER_THREAD_MERGER_H_
engine/fml/raster_thread_merger.h/0
{ "file_path": "engine/fml/raster_thread_merger.h", "repo_id": "engine", "token_count": 1745 }
167
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <thread> #include "flutter/fml/synchronization/semaphore.h" #include "flutter/fml/thread.h" #include "flutter/fml/time/time_point.h" #include "gtest/gtest.h" TEST(SemaphoreTest, SimpleValidity) { fml::Semaphore sem(100); ASSERT_TRUE(sem.IsValid()); } TEST(SemaphoreTest, WaitOnZero) { fml::Semaphore sem(0); ASSERT_FALSE(sem.TryWait()); } TEST(SemaphoreTest, WaitOnZeroSignalThenWait) { fml::Semaphore sem(0); ASSERT_FALSE(sem.TryWait()); std::thread thread([&sem]() { sem.Signal(); }); thread.join(); ASSERT_TRUE(sem.TryWait()); ASSERT_FALSE(sem.TryWait()); } TEST(SemaphoreTest, IndefiniteWait) { auto start = fml::TimePoint::Now(); constexpr double wait_in_seconds = 0.25; fml::Semaphore sem(0); ASSERT_TRUE(sem.IsValid()); fml::Thread signaller("signaller_thread"); signaller.GetTaskRunner()->PostTaskForTime( [&sem]() { sem.Signal(); }, start + fml::TimeDelta::FromSecondsF(wait_in_seconds)); ASSERT_TRUE(sem.Wait()); auto delta = fml::TimePoint::Now() - start; ASSERT_GE(delta.ToSecondsF(), wait_in_seconds); signaller.Join(); }
engine/fml/synchronization/semaphore_unittest.cc/0
{ "file_path": "engine/fml/synchronization/semaphore_unittest.cc", "repo_id": "engine", "token_count": 492 }
168
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <atomic> #include <thread> #include "flutter/fml/macros.h" #include "flutter/fml/task_source.h" #include "flutter/fml/time/chrono_timestamp_provider.h" #include "flutter/fml/time/time_delta.h" #include "flutter/fml/time/time_point.h" #include "gtest/gtest.h" namespace fml { namespace testing { TEST(TaskSourceTests, SimpleInitialization) { TaskSource task_source = TaskSource(TaskQueueId(1)); task_source.RegisterTask( {1, [] {}, ChronoTicksSinceEpoch(), TaskSourceGrade::kUnspecified}); ASSERT_EQ(task_source.GetNumPendingTasks(), 1u); } TEST(TaskSourceTests, MultipleTaskGrades) { TaskSource task_source = TaskSource(TaskQueueId(1)); task_source.RegisterTask( {1, [] {}, ChronoTicksSinceEpoch(), TaskSourceGrade::kUnspecified}); task_source.RegisterTask( {2, [] {}, ChronoTicksSinceEpoch(), TaskSourceGrade::kUserInteraction}); task_source.RegisterTask( {3, [] {}, ChronoTicksSinceEpoch(), TaskSourceGrade::kDartEventLoop}); ASSERT_EQ(task_source.GetNumPendingTasks(), 3u); } TEST(TaskSourceTests, SimpleOrdering) { TaskSource task_source = TaskSource(TaskQueueId(1)); auto time_stamp = ChronoTicksSinceEpoch(); int value = 0; task_source.RegisterTask( {1, [&] { value = 1; }, time_stamp, TaskSourceGrade::kUnspecified}); task_source.RegisterTask({2, [&] { value = 7; }, time_stamp + fml::TimeDelta::FromMilliseconds(1), TaskSourceGrade::kUnspecified}); task_source.Top().task.GetTask()(); task_source.PopTask(TaskSourceGrade::kUnspecified); ASSERT_EQ(value, 1); task_source.Top().task.GetTask()(); task_source.PopTask(TaskSourceGrade::kUnspecified); ASSERT_EQ(value, 7); } TEST(TaskSourceTests, SimpleOrderingMultiTaskHeaps) { TaskSource task_source = TaskSource(TaskQueueId(1)); auto time_stamp = ChronoTicksSinceEpoch(); int value = 0; task_source.RegisterTask( {1, [&] { value = 1; }, time_stamp, TaskSourceGrade::kDartEventLoop}); task_source.RegisterTask({2, [&] { value = 7; }, time_stamp + fml::TimeDelta::FromMilliseconds(1), TaskSourceGrade::kUserInteraction}); auto top_task = task_source.Top(); top_task.task.GetTask()(); task_source.PopTask(top_task.task.GetTaskSourceGrade()); ASSERT_EQ(value, 1); auto second_task = task_source.Top(); second_task.task.GetTask()(); task_source.PopTask(second_task.task.GetTaskSourceGrade()); ASSERT_EQ(value, 7); } TEST(TaskSourceTests, OrderingMultiTaskHeapsSecondaryPaused) { TaskSource task_source = TaskSource(TaskQueueId(1)); auto time_stamp = ChronoTicksSinceEpoch(); int value = 0; task_source.RegisterTask( {1, [&] { value = 1; }, time_stamp, TaskSourceGrade::kDartEventLoop}); task_source.RegisterTask({2, [&] { value = 7; }, time_stamp + fml::TimeDelta::FromMilliseconds(1), TaskSourceGrade::kUserInteraction}); task_source.PauseSecondary(); auto top_task = task_source.Top(); top_task.task.GetTask()(); task_source.PopTask(top_task.task.GetTaskSourceGrade()); ASSERT_EQ(value, 7); ASSERT_TRUE(task_source.IsEmpty()); task_source.ResumeSecondary(); auto second_task = task_source.Top(); second_task.task.GetTask()(); task_source.PopTask(second_task.task.GetTaskSourceGrade()); ASSERT_EQ(value, 1); } } // namespace testing } // namespace fml
engine/fml/task_source_unittests.cc/0
{ "file_path": "engine/fml/task_source_unittests.cc", "repo_id": "engine", "token_count": 1401 }
169
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_FML_UNIQUE_FD_H_ #define FLUTTER_FML_UNIQUE_FD_H_ #include "flutter/fml/build_config.h" #include "flutter/fml/unique_object.h" #if FML_OS_WIN #include <windows.h> #include <map> #include <mutex> #include <optional> #else // FML_OS_WIN #include <dirent.h> #include <unistd.h> #endif // FML_OS_WIN namespace fml { namespace internal { #if FML_OS_WIN namespace os_win { struct DirCacheEntry { std::wstring filename; FILE_ID_128 id; }; // The order of these is important. Must come before UniqueFDTraits struct // else linker error. Embedding in struct also causes linker error. struct UniqueFDTraits { static std::mutex file_map_mutex; static std::map<HANDLE, DirCacheEntry> file_map; static HANDLE InvalidValue() { return INVALID_HANDLE_VALUE; } static bool IsValid(HANDLE value) { return value != InvalidValue(); } static void Free_Handle(HANDLE fd); static void Free(HANDLE fd) { RemoveCacheEntry(fd); UniqueFDTraits::Free_Handle(fd); } static void RemoveCacheEntry(HANDLE fd) { const std::lock_guard<std::mutex> lock(file_map_mutex); file_map.erase(fd); } static void StoreCacheEntry(HANDLE fd, DirCacheEntry state) { const std::lock_guard<std::mutex> lock(file_map_mutex); file_map[fd] = state; } static std::optional<DirCacheEntry> GetCacheEntry(HANDLE fd) { const std::lock_guard<std::mutex> lock(file_map_mutex); auto found = file_map.find(fd); return found == file_map.end() ? std::nullopt : std::optional<DirCacheEntry>{found->second}; } }; } // namespace os_win #else // FML_OS_WIN namespace os_unix { struct UniqueFDTraits { static int InvalidValue() { return -1; } static bool IsValid(int value) { return value >= 0; } static void Free(int fd); }; struct UniqueDirTraits { static DIR* InvalidValue() { return nullptr; } static bool IsValid(DIR* value) { return value != nullptr; } static void Free(DIR* dir); }; } // namespace os_unix #endif // FML_OS_WIN } // namespace internal #if FML_OS_WIN using UniqueFD = UniqueObject<HANDLE, internal::os_win::UniqueFDTraits>; #else // FML_OS_WIN using UniqueFD = UniqueObject<int, internal::os_unix::UniqueFDTraits>; using UniqueDir = UniqueObject<DIR*, internal::os_unix::UniqueDirTraits>; #endif // FML_OS_WIN } // namespace fml #endif // FLUTTER_FML_UNIQUE_FD_H_
engine/fml/unique_fd.h/0
{ "file_path": "engine/fml/unique_fd.h", "repo_id": "engine", "token_count": 954 }
170
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_AIKS_AIKS_PLAYGROUND_INSPECTOR_H_ #define FLUTTER_IMPELLER_AIKS_AIKS_PLAYGROUND_INSPECTOR_H_ #include <functional> #include <optional> #include "flutter/fml/macros.h" #include "impeller/aiks/aiks_context.h" #include "impeller/aiks/picture.h" #include "impeller/core/capture.h" #include "impeller/renderer/context.h" namespace impeller { class AiksInspector { public: AiksInspector(); const std::optional<Picture>& RenderInspector( AiksContext& aiks_context, const std::function<std::optional<Picture>()>& picture_callback); // Resets (releases) the underlying |Picture| object. // // Underlying issue: <https://github.com/flutter/flutter/issues/134678>. // // The tear-down code is not running in the right order; we still have a // reference to the |Picture| object when the |Context| is being destroyed, // which causes the |Texture| objects to leak. // // TODO(matanlurey): https://github.com/flutter/flutter/issues/134748. void HackResetDueToTextureLeaks(); private: void RenderCapture(CaptureContext& capture_context); void RenderCaptureElement(CaptureElement& element); bool capturing_ = false; bool wireframe_ = false; CaptureElement* hovered_element_ = nullptr; CaptureElement* selected_element_ = nullptr; std::optional<Picture> last_picture_; AiksInspector(const AiksInspector&) = delete; AiksInspector& operator=(const AiksInspector&) = delete; }; }; // namespace impeller #endif // FLUTTER_IMPELLER_AIKS_AIKS_PLAYGROUND_INSPECTOR_H_
engine/impeller/aiks/aiks_playground_inspector.h/0
{ "file_path": "engine/impeller/aiks/aiks_playground_inspector.h", "repo_id": "engine", "token_count": 588 }
171
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "impeller/aiks/image_filter.h" #include "impeller/entity/contents/filters/color_filter_contents.h" #include "impeller/entity/contents/filters/filter_contents.h" #include "impeller/entity/contents/filters/inputs/filter_input.h" namespace impeller { /******************************************************************************* ******* ImageFilter ******************************************************************************/ ImageFilter::ImageFilter() = default; ImageFilter::~ImageFilter() = default; std::shared_ptr<ImageFilter> ImageFilter::MakeBlur( Sigma sigma_x, Sigma sigma_y, FilterContents::BlurStyle blur_style, Entity::TileMode tile_mode) { return std::make_shared<BlurImageFilter>(sigma_x, sigma_y, blur_style, tile_mode); } std::shared_ptr<ImageFilter> ImageFilter::MakeDilate(Radius radius_x, Radius radius_y) { return std::make_shared<DilateImageFilter>(radius_x, radius_y); } std::shared_ptr<ImageFilter> ImageFilter::MakeErode(Radius radius_x, Radius radius_y) { return std::make_shared<ErodeImageFilter>(radius_x, radius_y); } std::shared_ptr<ImageFilter> ImageFilter::MakeMatrix( const Matrix& matrix, SamplerDescriptor sampler_descriptor) { return std::make_shared<MatrixImageFilter>(matrix, std::move(sampler_descriptor)); } std::shared_ptr<ImageFilter> ImageFilter::MakeCompose( const ImageFilter& inner, const ImageFilter& outer) { return std::make_shared<ComposeImageFilter>(inner, outer); } std::shared_ptr<ImageFilter> ImageFilter::MakeFromColorFilter( const ColorFilter& color_filter) { return std::make_shared<ColorImageFilter>(color_filter); } std::shared_ptr<ImageFilter> ImageFilter::MakeLocalMatrix( const Matrix& matrix, const ImageFilter& internal_filter) { return std::make_shared<LocalMatrixImageFilter>(matrix, internal_filter); } std::shared_ptr<FilterContents> ImageFilter::GetFilterContents() const { return WrapInput(FilterInput::Make(Rect())); } /******************************************************************************* ******* BlurImageFilter ******************************************************************************/ BlurImageFilter::BlurImageFilter(Sigma sigma_x, Sigma sigma_y, FilterContents::BlurStyle blur_style, Entity::TileMode tile_mode) : sigma_x_(sigma_x), sigma_y_(sigma_y), blur_style_(blur_style), tile_mode_(tile_mode) {} BlurImageFilter::~BlurImageFilter() = default; std::shared_ptr<FilterContents> BlurImageFilter::WrapInput( const FilterInput::Ref& input) const { return FilterContents::MakeGaussianBlur(input, sigma_x_, sigma_y_, tile_mode_, blur_style_); } std::shared_ptr<ImageFilter> BlurImageFilter::Clone() const { return std::make_shared<BlurImageFilter>(*this); } /******************************************************************************* ******* DilateImageFilter ******************************************************************************/ DilateImageFilter::DilateImageFilter(Radius radius_x, Radius radius_y) : radius_x_(radius_x), radius_y_(radius_y) {} DilateImageFilter::~DilateImageFilter() = default; std::shared_ptr<FilterContents> DilateImageFilter::WrapInput( const FilterInput::Ref& input) const { return FilterContents::MakeMorphology(input, radius_x_, radius_y_, FilterContents::MorphType::kDilate); } std::shared_ptr<ImageFilter> DilateImageFilter::Clone() const { return std::make_shared<DilateImageFilter>(*this); } /******************************************************************************* ******* ErodeImageFilter ******************************************************************************/ ErodeImageFilter::ErodeImageFilter(Radius radius_x, Radius radius_y) : radius_x_(radius_x), radius_y_(radius_y) {} ErodeImageFilter::~ErodeImageFilter() = default; std::shared_ptr<FilterContents> ErodeImageFilter::WrapInput( const FilterInput::Ref& input) const { return FilterContents::MakeMorphology(input, radius_x_, radius_y_, FilterContents::MorphType::kErode); } std::shared_ptr<ImageFilter> ErodeImageFilter::Clone() const { return std::make_shared<ErodeImageFilter>(*this); } /******************************************************************************* ******* MatrixImageFilter ******************************************************************************/ MatrixImageFilter::MatrixImageFilter(const Matrix& matrix, SamplerDescriptor sampler_descriptor) : matrix_(matrix), sampler_descriptor_(std::move(sampler_descriptor)) {} MatrixImageFilter::~MatrixImageFilter() = default; std::shared_ptr<FilterContents> MatrixImageFilter::WrapInput( const FilterInput::Ref& input) const { return FilterContents::MakeMatrixFilter(input, matrix_, sampler_descriptor_); } std::shared_ptr<ImageFilter> MatrixImageFilter::Clone() const { return std::make_shared<MatrixImageFilter>(*this); } /******************************************************************************* ******* ComposeImageFilter ******************************************************************************/ ComposeImageFilter::ComposeImageFilter(const ImageFilter& inner, const ImageFilter& outer) : inner_(inner.Clone()), outer_(outer.Clone()) {} ComposeImageFilter::~ComposeImageFilter() = default; std::shared_ptr<FilterContents> ComposeImageFilter::WrapInput( const FilterInput::Ref& input) const { return outer_->WrapInput(FilterInput::Make(inner_->WrapInput(input))); } std::shared_ptr<ImageFilter> ComposeImageFilter::Clone() const { return std::make_shared<ComposeImageFilter>(*this); } /******************************************************************************* ******* ColorImageFilter ******************************************************************************/ ColorImageFilter::ColorImageFilter(const ColorFilter& color_filter) : color_filter_(color_filter.Clone()) {} ColorImageFilter::~ColorImageFilter() = default; std::shared_ptr<FilterContents> ColorImageFilter::WrapInput( const FilterInput::Ref& input) const { return color_filter_->WrapWithGPUColorFilter( input, ColorFilterContents::AbsorbOpacity::kNo); } std::shared_ptr<ImageFilter> ColorImageFilter::Clone() const { return std::make_shared<ColorImageFilter>(*this); } /******************************************************************************* ******* LocalMatrixImageFilter ******************************************************************************/ LocalMatrixImageFilter::LocalMatrixImageFilter( const Matrix& matrix, const ImageFilter& internal_filter) : matrix_(matrix), internal_filter_(internal_filter.Clone()) {} LocalMatrixImageFilter::~LocalMatrixImageFilter() = default; std::shared_ptr<FilterContents> LocalMatrixImageFilter::WrapInput( const FilterInput::Ref& input) const { return FilterContents::MakeLocalMatrixFilter( FilterInput::Make(internal_filter_->WrapInput(input)), matrix_); } std::shared_ptr<ImageFilter> LocalMatrixImageFilter::Clone() const { return std::make_shared<LocalMatrixImageFilter>(*this); } } // namespace impeller
engine/impeller/aiks/image_filter.cc/0
{ "file_path": "engine/impeller/aiks/image_filter.cc", "repo_id": "engine", "token_count": 2565 }
172
# Copyright 2013 The Flutter Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import("../tools/impeller.gni") impeller_component("base") { sources = [ "allocation.cc", "allocation.h", "backend_cast.h", "comparable.cc", "comparable.h", "config.h", "mask.h", "promise.cc", "promise.h", "strings.cc", "strings.h", "thread.cc", "thread.h", "thread_safety.cc", "thread_safety.h", "timing.h", "validation.cc", "validation.h", "version.cc", "version.h", ] deps = [ "//flutter/fml" ] } impeller_component("base_unittests") { testonly = true sources = [ "base_unittests.cc" ] deps = [ ":base", "//flutter/testing", ] }
engine/impeller/base/BUILD.gn/0
{ "file_path": "engine/impeller/base/BUILD.gn", "repo_id": "engine", "token_count": 352 }
173
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <cstring> #include "flutter/testing/testing.h" #include "gtest/gtest.h" #include "impeller/base/validation.h" #include "impeller/compiler/compiler.h" #include "impeller/compiler/compiler_test.h" #include "impeller/compiler/source_options.h" #include "impeller/compiler/types.h" namespace impeller { namespace compiler { namespace testing { TEST(CompilerTest, ShaderKindMatchingIsSuccessful) { ASSERT_EQ(SourceTypeFromFileName("hello.vert"), SourceType::kVertexShader); ASSERT_EQ(SourceTypeFromFileName("hello.frag"), SourceType::kFragmentShader); ASSERT_EQ(SourceTypeFromFileName("hello.comp"), SourceType::kComputeShader); ASSERT_EQ(SourceTypeFromFileName("hello.msl"), SourceType::kUnknown); ASSERT_EQ(SourceTypeFromFileName("hello.glsl"), SourceType::kUnknown); } TEST_P(CompilerTest, CanCompile) { if (GetParam() == TargetPlatform::kSkSL) { GTEST_SKIP() << "Not supported with SkSL"; } ASSERT_TRUE(CanCompileAndReflect("sample.vert")); ASSERT_TRUE(CanCompileAndReflect("sample.vert", SourceType::kVertexShader)); ASSERT_TRUE(CanCompileAndReflect("sample.vert", SourceType::kVertexShader, SourceLanguage::kGLSL)); } TEST_P(CompilerTest, CanCompileHLSL) { if (GetParam() == TargetPlatform::kSkSL) { GTEST_SKIP() << "Not supported with SkSL"; } ASSERT_TRUE(CanCompileAndReflect( "simple.vert.hlsl", SourceType::kVertexShader, SourceLanguage::kHLSL)); } TEST_P(CompilerTest, CanCompileHLSLWithMultipleStages) { if (GetParam() == TargetPlatform::kSkSL) { GTEST_SKIP() << "Not supported with SkSL"; } ASSERT_TRUE(CanCompileAndReflect("multiple_stages.hlsl", SourceType::kVertexShader, SourceLanguage::kHLSL, "VertexShader")); ASSERT_TRUE(CanCompileAndReflect("multiple_stages.hlsl", SourceType::kFragmentShader, SourceLanguage::kHLSL, "FragmentShader")); } TEST_P(CompilerTest, CanCompileComputeShader) { if (!TargetPlatformIsMetal(GetParam())) { GTEST_SKIP_("Only enabled on Metal backends till ES 3.2 support is added."); } ASSERT_TRUE(CanCompileAndReflect("sample.comp")); ASSERT_TRUE(CanCompileAndReflect("sample.comp", SourceType::kComputeShader)); } TEST_P(CompilerTest, MustFailDueToExceedingResourcesLimit) { if (GetParam() == TargetPlatform::kSkSL) { GTEST_SKIP() << "Not supported with SkSL"; } ScopedValidationDisable disable_validation; ASSERT_FALSE( CanCompileAndReflect("resources_limit.vert", SourceType::kVertexShader)); } TEST_P(CompilerTest, MustFailDueToMultipleLocationPerStructMember) { if (GetParam() == TargetPlatform::kSkSL) { GTEST_SKIP() << "Not supported with SkSL"; } ScopedValidationDisable disable_validation; ASSERT_FALSE(CanCompileAndReflect("struct_def_bug.vert")); } TEST_P(CompilerTest, BindingBaseForFragShader) { if (!TargetPlatformIsVulkan(GetParam())) { GTEST_SKIP(); } ASSERT_TRUE(CanCompileAndReflect("sample.vert", SourceType::kVertexShader)); ASSERT_TRUE(CanCompileAndReflect("sample.frag", SourceType::kFragmentShader)); auto get_binding = [&](const char* fixture) -> uint32_t { auto json_fd = GetReflectionJson(fixture); nlohmann::json shader_json = nlohmann::json::parse(json_fd->GetMapping()); return shader_json["buffers"][0]["binding"].get<uint32_t>(); }; auto vert_uniform_binding = get_binding("sample.vert"); auto frag_uniform_binding = get_binding("sample.frag"); ASSERT_GT(frag_uniform_binding, vert_uniform_binding); } TEST_P(CompilerTest, UniformsHaveBindingAndSet) { if (GetParam() == TargetPlatform::kSkSL) { GTEST_SKIP() << "Not supported with SkSL"; } ASSERT_TRUE(CanCompileAndReflect("sample_with_binding.vert", SourceType::kVertexShader)); ASSERT_TRUE(CanCompileAndReflect("sample.frag", SourceType::kFragmentShader)); struct binding_and_set { uint32_t binding; uint32_t set; }; auto get_binding = [&](const char* fixture) -> binding_and_set { auto json_fd = GetReflectionJson(fixture); nlohmann::json shader_json = nlohmann::json::parse(json_fd->GetMapping()); uint32_t binding = shader_json["buffers"][0]["binding"].get<uint32_t>(); uint32_t set = shader_json["buffers"][0]["set"].get<uint32_t>(); return {binding, set}; }; auto vert_uniform_binding = get_binding("sample_with_binding.vert"); auto frag_uniform_binding = get_binding("sample.frag"); ASSERT_EQ(frag_uniform_binding.set, 0u); ASSERT_EQ(vert_uniform_binding.set, 3u); ASSERT_EQ(vert_uniform_binding.binding, 17u); } TEST_P(CompilerTest, SkSLTextureLookUpOrderOfOperations) { if (GetParam() != TargetPlatform::kSkSL) { GTEST_SKIP() << "Only supported on SkSL"; } ASSERT_TRUE( CanCompileAndReflect("texture_lookup.frag", SourceType::kFragmentShader)); auto shader = GetShaderFile("texture_lookup.frag", GetParam()); std::string_view shader_mapping( reinterpret_cast<const char*>(shader->GetMapping()), shader->GetSize()); constexpr std::string_view expected = "textureA.eval(textureA_size * ( vec2(1.0) + flutter_FragCoord.xy));"; EXPECT_NE(shader_mapping.find(expected), std::string::npos); } TEST_P(CompilerTest, CanCompileStructs) { if (GetParam() != TargetPlatform::kSkSL) { GTEST_SKIP() << "Only supported on SkSL"; } ASSERT_TRUE(CanCompileAndReflect("struct_internal.frag", SourceType::kFragmentShader)); } #define INSTANTIATE_TARGET_PLATFORM_TEST_SUITE_P(suite_name) \ INSTANTIATE_TEST_SUITE_P( \ suite_name, CompilerTest, \ ::testing::Values(TargetPlatform::kOpenGLES, \ TargetPlatform::kOpenGLDesktop, \ TargetPlatform::kMetalDesktop, \ TargetPlatform::kMetalIOS, TargetPlatform::kSkSL), \ [](const ::testing::TestParamInfo<CompilerTest::ParamType>& info) { \ return TargetPlatformToString(info.param); \ }); INSTANTIATE_TARGET_PLATFORM_TEST_SUITE_P(CompilerSuite); } // namespace testing } // namespace compiler } // namespace impeller
engine/impeller/compiler/compiler_unittests.cc/0
{ "file_path": "engine/impeller/compiler/compiler_unittests.cc", "repo_id": "engine", "token_count": 2752 }
174
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "gtest/gtest.h" #include "impeller/compiler/shader_bundle.h" #include "flutter/testing/testing.h" #include "impeller/compiler/source_options.h" #include "impeller/compiler/types.h" #include "impeller/shader_bundle/shader_bundle_flatbuffers.h" namespace impeller { namespace compiler { namespace testing { const std::string kUnlitFragmentBundleConfig = "\"UnlitFragment\": {\"type\": \"fragment\", \"file\": " "\"shaders/flutter_gpu_unlit.frag\"}"; const std::string kUnlitVertexBundleConfig = "\"UnlitVertex\": {\"type\": \"vertex\", \"file\": " "\"shaders/flutter_gpu_unlit.vert\"}"; TEST(ShaderBundleTest, ParseShaderBundleConfigFailsForInvalidJSON) { std::string bundle = ""; std::stringstream error; auto result = ParseShaderBundleConfig(bundle, error); ASSERT_FALSE(result.has_value()); ASSERT_STREQ(error.str().c_str(), "The shader bundle is not a valid JSON object.\n"); } TEST(ShaderBundleTest, ParseShaderBundleConfigFailsWhenEntryNotObject) { std::string bundle = "{\"UnlitVertex\": []}"; std::stringstream error; auto result = ParseShaderBundleConfig(bundle, error); ASSERT_FALSE(result.has_value()); ASSERT_STREQ( error.str().c_str(), "Invalid shader entry \"UnlitVertex\": Entry is not a JSON object.\n"); } TEST(ShaderBundleTest, ParseShaderBundleConfigFailsWhenMissingFile) { std::string bundle = "{\"UnlitVertex\": {\"type\": \"vertex\"}}"; std::stringstream error; auto result = ParseShaderBundleConfig(bundle, error); ASSERT_FALSE(result.has_value()); ASSERT_STREQ(error.str().c_str(), "Invalid shader entry \"UnlitVertex\": Missing required " "\"file\" field.\n"); } TEST(ShaderBundleTest, ParseShaderBundleConfigFailsWhenMissingType) { std::string bundle = "{\"UnlitVertex\": {\"file\": \"shaders/flutter_gpu_unlit.vert\"}}"; std::stringstream error; auto result = ParseShaderBundleConfig(bundle, error); ASSERT_FALSE(result.has_value()); ASSERT_STREQ(error.str().c_str(), "Invalid shader entry \"UnlitVertex\": Missing required " "\"type\" field.\n"); } TEST(ShaderBundleTest, ParseShaderBundleConfigFailsForInvalidType) { std::string bundle = "{\"UnlitVertex\": {\"type\": \"invalid\", \"file\": " "\"shaders/flutter_gpu_unlit.vert\"}}"; std::stringstream error; auto result = ParseShaderBundleConfig(bundle, error); ASSERT_FALSE(result.has_value()); ASSERT_STREQ(error.str().c_str(), "Invalid shader entry \"UnlitVertex\": Shader type " "\"invalid\" is unknown.\n"); } TEST(ShaderBundleTest, ParseShaderBundleConfigFailsForInvalidLanguage) { std::string bundle = "{\"UnlitVertex\": {\"type\": \"vertex\", \"language\": \"invalid\", " "\"file\": \"shaders/flutter_gpu_unlit.vert\"}}"; std::stringstream error; auto result = ParseShaderBundleConfig(bundle, error); ASSERT_FALSE(result.has_value()); ASSERT_STREQ(error.str().c_str(), "Invalid shader entry \"UnlitVertex\": Unknown language type " "\"invalid\".\n"); } TEST(ShaderBundleTest, ParseShaderBundleConfigReturnsExpectedConfig) { std::string bundle = "{" + kUnlitVertexBundleConfig + ", " + kUnlitFragmentBundleConfig + "}"; std::stringstream error; auto result = ParseShaderBundleConfig(bundle, error); ASSERT_TRUE(result.has_value()); ASSERT_STREQ(error.str().c_str(), ""); // NOLINTBEGIN(bugprone-unchecked-optional-access) auto maybe_vertex = result->find("UnlitVertex"); auto maybe_fragment = result->find("UnlitFragment"); ASSERT_TRUE(maybe_vertex != result->end()); ASSERT_TRUE(maybe_fragment != result->end()); auto vertex = maybe_vertex->second; auto fragment = maybe_fragment->second; // NOLINTEND(bugprone-unchecked-optional-access) EXPECT_EQ(vertex.type, SourceType::kVertexShader); EXPECT_EQ(vertex.language, SourceLanguage::kGLSL); EXPECT_STREQ(vertex.entry_point.c_str(), "main"); EXPECT_STREQ(vertex.source_file_name.c_str(), "shaders/flutter_gpu_unlit.vert"); EXPECT_EQ(fragment.type, SourceType::kFragmentShader); EXPECT_EQ(fragment.language, SourceLanguage::kGLSL); EXPECT_STREQ(fragment.entry_point.c_str(), "main"); EXPECT_STREQ(fragment.source_file_name.c_str(), "shaders/flutter_gpu_unlit.frag"); } template <typename T> const T* FindByName(const std::vector<std::unique_ptr<T>>& collection, const std::string& name) { const auto maybe = std::find_if( collection.begin(), collection.end(), [&name](const std::unique_ptr<T>& value) { return value->name == name; }); if (maybe == collection.end()) { return nullptr; } return maybe->get(); } TEST(ShaderBundleTest, GenerateShaderBundleFlatbufferProducesCorrectResult) { std::string fixtures_path = flutter::testing::GetFixturesPath(); std::string config = "{\"UnlitFragment\": {\"type\": \"fragment\", \"file\": \"" + fixtures_path + "/flutter_gpu_unlit.frag\"}, \"UnlitVertex\": {\"type\": " "\"vertex\", \"file\": \"" + fixtures_path + "/flutter_gpu_unlit.vert\"}}"; SourceOptions options; options.target_platform = TargetPlatform::kRuntimeStageMetal; options.source_language = SourceLanguage::kGLSL; std::optional<fb::shaderbundle::ShaderBundleT> bundle = GenerateShaderBundleFlatbuffer(config, options); ASSERT_TRUE(bundle.has_value()); // NOLINTNEXTLINE(bugprone-unchecked-optional-access) const auto& shaders = bundle->shaders; const auto* vertex = FindByName(shaders, "UnlitVertex"); const auto* fragment = FindByName(shaders, "UnlitFragment"); ASSERT_NE(vertex, nullptr); ASSERT_NE(fragment, nullptr); // -------------------------------------------------------------------------- /// Verify vertex shader. /// EXPECT_STREQ(vertex->metal_desktop->entrypoint.c_str(), "flutter_gpu_unlit_vertex_main"); EXPECT_EQ(vertex->metal_desktop->stage, fb::shaderbundle::ShaderStage::kVertex); // Inputs. ASSERT_EQ(vertex->metal_desktop->inputs.size(), 1u); const auto& v_in_position = vertex->metal_desktop->inputs[0]; EXPECT_STREQ(v_in_position->name.c_str(), "position"); EXPECT_EQ(v_in_position->location, 0u); EXPECT_EQ(v_in_position->set, 0u); EXPECT_EQ(v_in_position->binding, 0u); EXPECT_EQ(v_in_position->type, fb::shaderbundle::InputDataType::kFloat); EXPECT_EQ(v_in_position->bit_width, 32u); EXPECT_EQ(v_in_position->vec_size, 2u); EXPECT_EQ(v_in_position->columns, 1u); EXPECT_EQ(v_in_position->offset, 0u); // Uniforms. ASSERT_EQ(vertex->metal_desktop->uniform_structs.size(), 1u); const auto* vert_info = FindByName(vertex->metal_desktop->uniform_structs, "VertInfo"); ASSERT_NE(vert_info, nullptr); EXPECT_EQ(vert_info->ext_res_0, 0u); EXPECT_EQ(vert_info->set, 0u); EXPECT_EQ(vert_info->binding, 0u); ASSERT_EQ(vert_info->fields.size(), 2u); const auto& mvp = vert_info->fields[0]; EXPECT_STREQ(mvp->name.c_str(), "mvp"); EXPECT_EQ(mvp->type, fb::shaderbundle::UniformDataType::kFloat); EXPECT_EQ(mvp->offset_in_bytes, 0u); EXPECT_EQ(mvp->element_size_in_bytes, 64u); EXPECT_EQ(mvp->total_size_in_bytes, 64u); EXPECT_EQ(mvp->array_elements, 0u); const auto& color = vert_info->fields[1]; EXPECT_STREQ(color->name.c_str(), "color"); EXPECT_EQ(color->type, fb::shaderbundle::UniformDataType::kFloat); EXPECT_EQ(color->offset_in_bytes, 64u); EXPECT_EQ(color->element_size_in_bytes, 16u); EXPECT_EQ(color->total_size_in_bytes, 16u); EXPECT_EQ(color->array_elements, 0u); // -------------------------------------------------------------------------- /// Verify fragment shader. /// EXPECT_STREQ(fragment->metal_desktop->entrypoint.c_str(), "flutter_gpu_unlit_fragment_main"); EXPECT_EQ(fragment->metal_desktop->stage, fb::shaderbundle::ShaderStage::kFragment); // Inputs (not recorded for fragment shaders). ASSERT_EQ(fragment->metal_desktop->inputs.size(), 0u); // Uniforms. ASSERT_EQ(fragment->metal_desktop->inputs.size(), 0u); } } // namespace testing } // namespace compiler } // namespace impeller
engine/impeller/compiler/shader_bundle_unittests.cc/0
{ "file_path": "engine/impeller/compiler/shader_bundle_unittests.cc", "repo_id": "engine", "token_count": 3272 }
175
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef TEXTURE_GLSL_ #define TEXTURE_GLSL_ #include <impeller/branching.glsl> #include <impeller/conversions.glsl> #include <impeller/tile_mode.glsl> #include <impeller/types.glsl> /// Sample from a texture. /// /// If `y_coord_scale` < 0.0, the Y coordinate is flipped. This is useful /// for Impeller graphics backends that use a flipped framebuffer coordinate /// space. /// /// A negative mip bias is applied to improve the sharpness of scaled down /// images when mip sampling is enabled. See `kDefaultMipBias` for more detail. vec4 IPSample(sampler2D texture_sampler, vec2 coords, float y_coord_scale) { return texture(texture_sampler, IPRemapCoords(coords, y_coord_scale), kDefaultMipBias); } /// Sample from a texture. /// /// If `y_coord_scale` < 0.0, the Y coordinate is flipped. This is useful /// for Impeller graphics backends that use a flipped framebuffer coordinate /// space. /// The range of `coords` will be mapped from [0, 1] to [half_texel, 1 - /// half_texel] vec4 IPSampleLinear(sampler2D texture_sampler, vec2 coords, float y_coord_scale, vec2 half_texel) { coords.x = mix(half_texel.x, 1 - half_texel.x, coords.x); coords.y = mix(half_texel.y, 1 - half_texel.y, coords.y); return IPSample(texture_sampler, coords, y_coord_scale); } /// Remap a float using a tiling mode. /// /// When `tile_mode` is `kTileModeDecal`, no tiling is applied and `t` is /// returned. In all other cases, a value between 0 and 1 is returned by tiling /// `t`. /// When `t` is between [0 to 1), the original unchanged `t` is always returned. float IPFloatTile(float t, float tile_mode) { if (tile_mode == kTileModeClamp) { t = clamp(t, 0.0, 1.0); } else if (tile_mode == kTileModeRepeat) { t = fract(t); } else if (tile_mode == kTileModeMirror) { float t1 = t - 1; float t2 = t1 - 2 * floor(t1 * 0.5) - 1; t = abs(t2); } return t; } /// Remap a vec2 using a tiling mode. /// /// Runs each component of the vec2 through `IPFloatTile`. vec2 IPVec2Tile(vec2 coords, float x_tile_mode, float y_tile_mode) { return vec2(IPFloatTile(coords.x, x_tile_mode), IPFloatTile(coords.y, y_tile_mode)); } /// Sample a texture, emulating a specific tile mode. /// /// This is useful for Impeller graphics backend that don't have native support /// for Decal. vec4 IPSampleWithTileMode(sampler2D tex, vec2 coords, float x_tile_mode, float y_tile_mode) { if (x_tile_mode == kTileModeDecal && (coords.x < 0 || coords.x >= 1) || y_tile_mode == kTileModeDecal && (coords.y < 0 || coords.y >= 1)) { return vec4(0); } return texture(tex, coords, kDefaultMipBias); } const float16_t kTileModeDecalHf = 3.0hf; /// Sample a texture, emulating a specific tile mode. /// /// This is useful for Impeller graphics backend that don't have native support /// for Decal. f16vec4 IPHalfSampleWithTileMode(f16sampler2D tex, vec2 coords, float16_t x_tile_mode, float16_t y_tile_mode) { if (x_tile_mode == kTileModeDecalHf && (coords.x < 0.0 || coords.x >= 1.0) || y_tile_mode == kTileModeDecalHf && (coords.y < 0.0 || coords.y >= 1.0)) { return f16vec4(0.0hf); } return texture(tex, coords, kDefaultMipBiasHalf); } /// Sample a texture, emulating a specific tile mode. /// /// This is useful for Impeller graphics backend that don't have native support /// for Decal. /// The range of `coords` will be mapped from [0, 1] to [half_texel, 1 - /// half_texel] vec4 IPSampleLinearWithTileMode(sampler2D tex, vec2 coords, float y_coord_scale, vec2 half_texel, float x_tile_mode, float y_tile_mode, vec4 decal_border_color) { if (x_tile_mode == kTileModeDecal && (coords.x < 0 || coords.x >= 1) || y_tile_mode == kTileModeDecal && (coords.y < 0 || coords.y >= 1)) { return decal_border_color; } return IPSampleLinear(tex, IPVec2Tile(coords, x_tile_mode, y_tile_mode), y_coord_scale, half_texel); } /// Sample a texture with decal tile mode. vec4 IPSampleDecal(sampler2D texture_sampler, vec2 coords) { if (any(lessThan(coords, vec2(0))) || any(greaterThanEqual(coords, vec2(1)))) { return vec4(0); } return texture(texture_sampler, coords, kDefaultMipBias); } /// Sample a texture with decal tile mode. f16vec4 IPHalfSampleDecal(f16sampler2D texture_sampler, vec2 coords) { if (any(lessThan(coords, vec2(0))) || any(greaterThanEqual(coords, vec2(1)))) { return f16vec4(0.0); } return texture(texture_sampler, coords, kDefaultMipBiasHalf); } /// Sample a texture, emulating a specific tile mode. /// /// This is useful for Impeller graphics backend that don't have native support /// for Decal. /// The range of `coords` will be mapped from [0, 1] to [half_texel, 1 - /// half_texel] vec4 IPSampleLinearWithTileMode(sampler2D tex, vec2 coords, float y_coord_scale, vec2 half_texel, float tile_mode, vec4 decal_border_color) { return IPSampleLinearWithTileMode(tex, coords, y_coord_scale, half_texel, tile_mode, tile_mode, decal_border_color); } #endif
engine/impeller/compiler/shader_lib/impeller/texture.glsl/0
{ "file_path": "engine/impeller/compiler/shader_lib/impeller/texture.glsl", "repo_id": "engine", "token_count": 2565 }
176
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_COMPILER_UNIFORM_SORTER_H_ #define FLUTTER_IMPELLER_COMPILER_UNIFORM_SORTER_H_ #include <optional> #include "impeller/compiler/compiler_backend.h" #include "spirv_msl.hpp" #include "spirv_parser.hpp" namespace impeller { /// @brief Sorts uniform declarations in an IR according to decoration order. /// /// The [type_filter] may be optionally supplied to limit which types are /// returned The [include] value can be set to false change this filter to /// exclude instead of include. std::vector<spirv_cross::ID> SortUniforms( const spirv_cross::ParsedIR* ir, const spirv_cross::Compiler* compiler, std::optional<spirv_cross::SPIRType::BaseType> type_filter = std::nullopt, bool include = true); } // namespace impeller #endif // FLUTTER_IMPELLER_COMPILER_UNIFORM_SORTER_H_
engine/impeller/compiler/uniform_sorter.h/0
{ "file_path": "engine/impeller/compiler/uniform_sorter.h", "repo_id": "engine", "token_count": 347 }
177
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_CORE_FORMATS_H_ #define FLUTTER_IMPELLER_CORE_FORMATS_H_ #include <cstdint> #include <functional> #include <memory> #include <string> #include <type_traits> #include "flutter/fml/hash_combine.h" #include "flutter/fml/logging.h" #include "impeller/base/mask.h" #include "impeller/geometry/color.h" #include "impeller/geometry/rect.h" #include "impeller/geometry/scalar.h" namespace impeller { enum class WindingOrder { kClockwise, kCounterClockwise, }; class Texture; //------------------------------------------------------------------------------ /// @brief Specified where the allocation resides and how it is used. /// enum class StorageMode { //---------------------------------------------------------------------------- /// Allocations can be mapped onto the hosts address space and also be used by /// the device. /// kHostVisible, //---------------------------------------------------------------------------- /// Allocations can only be used by the device. This location is optimal for /// use by the device. If the host needs to access these allocations, the /// transfer queue must be used to transfer this allocation onto the a host /// visible buffer. /// kDevicePrivate, //---------------------------------------------------------------------------- /// Used by the device for temporary render targets. These allocations cannot /// be transferred from and to other allocations using the transfer queue. /// Render pass cannot initialize the contents of these buffers using load and /// store actions. /// /// These allocations reside in tile memory which has higher bandwidth, lower /// latency and lower power consumption. The total device memory usage is /// also lower as a separate allocation does not need to be created in /// device memory. Prefer using these allocations for intermediates like depth /// and stencil buffers. /// kDeviceTransient, }; constexpr const char* StorageModeToString(StorageMode mode) { switch (mode) { case StorageMode::kHostVisible: return "HostVisible"; case StorageMode::kDevicePrivate: return "DevicePrivate"; case StorageMode::kDeviceTransient: return "DeviceTransient"; } FML_UNREACHABLE(); } //------------------------------------------------------------------------------ /// @brief The Pixel formats supported by Impeller. The naming convention /// denotes the usage of the component, the bit width of that /// component, and then one or more qualifiers to its /// interpretation. /// /// For instance, `kR8G8B8A8UNormIntSRGB` is a 32 bits-per-pixel /// format ordered in RGBA with 8 bits per component with each /// component expressed as an unsigned normalized integer and a /// conversion from sRGB to linear color space. /// /// Key: /// R -> Red Component /// G -> Green Component /// B -> Blue Component /// D -> Depth Component /// S -> Stencil Component /// U -> Unsigned (Lack of this denotes a signed component) /// Norm -> Normalized /// SRGB -> sRGB to linear interpretation /// /// While the effective bit width of the pixel can be determined by /// adding up the widths of each component, only the non-esoteric /// formats are tightly packed. Do not assume tight packing for the /// esoteric formats and use blit passes to convert to a /// non-esoteric pass. /// enum class PixelFormat : uint8_t { kUnknown, kA8UNormInt, kR8UNormInt, kR8G8UNormInt, kR8G8B8A8UNormInt, kR8G8B8A8UNormIntSRGB, kB8G8R8A8UNormInt, kB8G8R8A8UNormIntSRGB, kR32G32B32A32Float, kR16G16B16A16Float, kB10G10R10XR, kB10G10R10XRSRGB, kB10G10R10A10XR, // Depth and stencil formats. kS8UInt, kD24UnormS8Uint, kD32FloatS8UInt, }; constexpr bool IsDepthWritable(PixelFormat format) { switch (format) { case PixelFormat::kD24UnormS8Uint: case PixelFormat::kD32FloatS8UInt: return true; default: return false; } } constexpr bool IsStencilWritable(PixelFormat format) { switch (format) { case PixelFormat::kS8UInt: case PixelFormat::kD24UnormS8Uint: case PixelFormat::kD32FloatS8UInt: return true; default: return false; } } constexpr const char* PixelFormatToString(PixelFormat format) { switch (format) { case PixelFormat::kUnknown: return "Unknown"; case PixelFormat::kA8UNormInt: return "A8UNormInt"; case PixelFormat::kR8UNormInt: return "R8UNormInt"; case PixelFormat::kR8G8UNormInt: return "R8G8UNormInt"; case PixelFormat::kR8G8B8A8UNormInt: return "R8G8B8A8UNormInt"; case PixelFormat::kR8G8B8A8UNormIntSRGB: return "R8G8B8A8UNormIntSRGB"; case PixelFormat::kB8G8R8A8UNormInt: return "B8G8R8A8UNormInt"; case PixelFormat::kB8G8R8A8UNormIntSRGB: return "B8G8R8A8UNormIntSRGB"; case PixelFormat::kR32G32B32A32Float: return "R32G32B32A32Float"; case PixelFormat::kR16G16B16A16Float: return "R16G16B16A16Float"; case PixelFormat::kB10G10R10XR: return "B10G10R10XR"; case PixelFormat::kB10G10R10XRSRGB: return "B10G10R10XRSRGB"; case PixelFormat::kB10G10R10A10XR: return "B10G10R10A10XR"; case PixelFormat::kS8UInt: return "S8UInt"; case PixelFormat::kD24UnormS8Uint: return "D24UnormS8Uint"; case PixelFormat::kD32FloatS8UInt: return "D32FloatS8UInt"; } FML_UNREACHABLE(); } enum class BlendFactor { kZero, kOne, kSourceColor, kOneMinusSourceColor, kSourceAlpha, kOneMinusSourceAlpha, kDestinationColor, kOneMinusDestinationColor, kDestinationAlpha, kOneMinusDestinationAlpha, kSourceAlphaSaturated, kBlendColor, kOneMinusBlendColor, kBlendAlpha, kOneMinusBlendAlpha, }; enum class BlendOperation { kAdd, kSubtract, kReverseSubtract, }; enum class LoadAction { kDontCare, kLoad, kClear, }; enum class StoreAction { kDontCare, kStore, kMultisampleResolve, kStoreAndMultisampleResolve, }; constexpr const char* LoadActionToString(LoadAction action) { switch (action) { case LoadAction::kDontCare: return "DontCare"; case LoadAction::kLoad: return "Load"; case LoadAction::kClear: return "Clear"; } } constexpr const char* StoreActionToString(StoreAction action) { switch (action) { case StoreAction::kDontCare: return "DontCare"; case StoreAction::kStore: return "Store"; case StoreAction::kMultisampleResolve: return "MultisampleResolve"; case StoreAction::kStoreAndMultisampleResolve: return "StoreAndMultisampleResolve"; } } constexpr bool CanClearAttachment(LoadAction action) { switch (action) { case LoadAction::kLoad: return false; case LoadAction::kDontCare: case LoadAction::kClear: return true; } FML_UNREACHABLE(); } constexpr bool CanDiscardAttachmentWhenDone(StoreAction action) { switch (action) { case StoreAction::kStore: case StoreAction::kStoreAndMultisampleResolve: return false; case StoreAction::kDontCare: case StoreAction::kMultisampleResolve: return true; } FML_UNREACHABLE(); } enum class TextureType { kTexture2D, kTexture2DMultisample, kTextureCube, kTextureExternalOES, }; constexpr const char* TextureTypeToString(TextureType type) { switch (type) { case TextureType::kTexture2D: return "Texture2D"; case TextureType::kTexture2DMultisample: return "Texture2DMultisample"; case TextureType::kTextureCube: return "TextureCube"; case TextureType::kTextureExternalOES: return "TextureExternalOES"; } FML_UNREACHABLE(); } constexpr bool IsMultisampleCapable(TextureType type) { switch (type) { case TextureType::kTexture2D: case TextureType::kTextureCube: case TextureType::kTextureExternalOES: return false; case TextureType::kTexture2DMultisample: return true; } return false; } enum class SampleCount : uint8_t { kCount1 = 1, kCount4 = 4, }; enum class TextureUsage { kUnknown = 0, kShaderRead = 1 << 0, kShaderWrite = 1 << 1, kRenderTarget = 1 << 2, }; IMPELLER_ENUM_IS_MASK(TextureUsage); using TextureUsageMask = Mask<TextureUsage>; constexpr const char* TextureUsageToString(TextureUsage usage) { switch (usage) { case TextureUsage::kUnknown: return "Unknown"; case TextureUsage::kShaderRead: return "ShaderRead"; case TextureUsage::kShaderWrite: return "ShaderWrite"; case TextureUsage::kRenderTarget: return "RenderTarget"; } FML_UNREACHABLE(); } std::string TextureUsageMaskToString(TextureUsageMask mask); // Texture coordinate system. enum class TextureCoordinateSystem { // Alternative coordinate system used when uploading texture data from the // host. // (0, 0) is the bottom-left of the image with +Y going up. kUploadFromHost, // Default coordinate system. // (0, 0) is the top-left of the image with +Y going down. kRenderToTexture, }; enum class CullMode { kNone, kFrontFace, kBackFace, }; enum class IndexType { kUnknown, k16bit, k32bit, /// Does not use the index buffer. kNone, }; /// Decides how backend draws pixels based on input vertices. enum class PrimitiveType : uint8_t { /// Draws a triage for each separate set of three vertices. /// /// Vertices [A, B, C, D, E, F] will produce triages /// [ABC, DEF]. kTriangle, /// Draws a triage for every adjacent three vertices. /// /// Vertices [A, B, C, D, E, F] will produce triages /// [ABC, BCD, CDE, DEF]. kTriangleStrip, /// Draws a line for each separate set of two vertices. /// /// Vertices [A, B, C] will produce discontinued line /// [AB, BC]. kLine, /// Draws a continuous line that connect every input vertices /// /// Vertices [A, B, C] will produce one continuous line /// [ABC]. kLineStrip, /// Draws a point at each input vertex. kPoint, // Triangle fans are implementation dependent and need extra extensions // checks. Hence, they are not supported here. }; enum class PolygonMode { kFill, kLine, }; struct DepthRange { Scalar z_near = 0.0; Scalar z_far = 1.0; constexpr bool operator==(const DepthRange& other) const { return z_near == other.z_near && z_far == other.z_far; } }; struct Viewport { Rect rect; DepthRange depth_range; constexpr bool operator==(const Viewport& other) const { return rect == other.rect && depth_range == other.depth_range; } }; enum class MinMagFilter { /// Select nearest to the sample point. Most widely supported. kNearest, /// Select two points and linearly interpolate between them. Some formats /// may not support this. kLinear, }; enum class MipFilter { /// Sample from the nearest mip level. kNearest, /// Sample from the two nearest mip levels and linearly interpolate between /// them. kLinear, }; enum class SamplerAddressMode { kClampToEdge, kRepeat, kMirror, // More modes are almost always supported but they are usually behind // extensions checks. The ones current in these structs are safe (always // supported) defaults. /// @brief decal sampling mode is only supported on devices that pass /// the `Capabilities.SupportsDecalSamplerAddressMode` check. kDecal, }; enum class ColorWriteMaskBits : uint64_t { kNone = 0, kRed = 1 << 0, kGreen = 1 << 1, kBlue = 1 << 2, kAlpha = 1 << 3, kAll = kRed | kGreen | kBlue | kAlpha, }; IMPELLER_ENUM_IS_MASK(ColorWriteMaskBits); using ColorWriteMask = Mask<ColorWriteMaskBits>; constexpr size_t BytesPerPixelForPixelFormat(PixelFormat format) { switch (format) { case PixelFormat::kUnknown: return 0u; case PixelFormat::kA8UNormInt: case PixelFormat::kR8UNormInt: case PixelFormat::kS8UInt: return 1u; case PixelFormat::kR8G8UNormInt: return 2u; case PixelFormat::kR8G8B8A8UNormInt: case PixelFormat::kR8G8B8A8UNormIntSRGB: case PixelFormat::kB8G8R8A8UNormInt: case PixelFormat::kB8G8R8A8UNormIntSRGB: case PixelFormat::kB10G10R10XRSRGB: case PixelFormat::kB10G10R10XR: return 4u; case PixelFormat::kD24UnormS8Uint: return 4u; case PixelFormat::kD32FloatS8UInt: return 5u; case PixelFormat::kR16G16B16A16Float: case PixelFormat::kB10G10R10A10XR: return 8u; case PixelFormat::kR32G32B32A32Float: return 16u; } return 0u; } //------------------------------------------------------------------------------ /// @brief Describe the color attachment that will be used with this /// pipeline. /// /// Blending at specific color attachments follows the pseudo-code: /// ``` /// if (blending_enabled) { /// final_color.rgb = (src_color_blend_factor * new_color.rgb) /// <color_blend_op> /// (dst_color_blend_factor * old_color.rgb); /// final_color.a = (src_alpha_blend_factor * new_color.a) /// <alpha_blend_op> /// (dst_alpha_blend_factor * old_color.a); /// } else { /// final_color = new_color; /// } /// // IMPORTANT: The write mask is applied irrespective of whether /// // blending_enabled is set. /// final_color = final_color & write_mask; /// ``` /// /// The default blend mode is 1 - source alpha. struct ColorAttachmentDescriptor { PixelFormat format = PixelFormat::kUnknown; bool blending_enabled = false; BlendFactor src_color_blend_factor = BlendFactor::kSourceAlpha; BlendOperation color_blend_op = BlendOperation::kAdd; BlendFactor dst_color_blend_factor = BlendFactor::kOneMinusSourceAlpha; BlendFactor src_alpha_blend_factor = BlendFactor::kSourceAlpha; BlendOperation alpha_blend_op = BlendOperation::kAdd; BlendFactor dst_alpha_blend_factor = BlendFactor::kOneMinusSourceAlpha; ColorWriteMask write_mask = ColorWriteMaskBits::kAll; constexpr bool operator==(const ColorAttachmentDescriptor& o) const { return format == o.format && // blending_enabled == o.blending_enabled && // src_color_blend_factor == o.src_color_blend_factor && // color_blend_op == o.color_blend_op && // dst_color_blend_factor == o.dst_color_blend_factor && // src_alpha_blend_factor == o.src_alpha_blend_factor && // alpha_blend_op == o.alpha_blend_op && // dst_alpha_blend_factor == o.dst_alpha_blend_factor && // write_mask == o.write_mask; } constexpr size_t Hash() const { return fml::HashCombine( format, blending_enabled, src_color_blend_factor, color_blend_op, dst_color_blend_factor, src_alpha_blend_factor, alpha_blend_op, dst_alpha_blend_factor, static_cast<uint64_t>(write_mask)); } }; enum class CompareFunction : uint8_t { /// Comparison test never passes. kNever, /// Comparison test passes always passes. kAlways, /// Comparison test passes if new_value < current_value. kLess, /// Comparison test passes if new_value == current_value. kEqual, /// Comparison test passes if new_value <= current_value. kLessEqual, /// Comparison test passes if new_value > current_value. kGreater, /// Comparison test passes if new_value != current_value. kNotEqual, /// Comparison test passes if new_value >= current_value. kGreaterEqual, }; enum class StencilOperation : uint8_t { /// Don't modify the current stencil value. kKeep, /// Reset the stencil value to zero. kZero, /// Reset the stencil value to the reference value. kSetToReferenceValue, /// Increment the current stencil value by 1. Clamp it to the maximum. kIncrementClamp, /// Decrement the current stencil value by 1. Clamp it to zero. kDecrementClamp, /// Perform a logical bitwise invert on the current stencil value. kInvert, /// Increment the current stencil value by 1. If at maximum, set to zero. kIncrementWrap, /// Decrement the current stencil value by 1. If at zero, set to maximum. kDecrementWrap, }; struct DepthAttachmentDescriptor { //---------------------------------------------------------------------------- /// Indicates how to compare the value with that in the depth buffer. /// CompareFunction depth_compare = CompareFunction::kAlways; //---------------------------------------------------------------------------- /// Indicates when writes must be performed to the depth buffer. /// bool depth_write_enabled = false; constexpr bool operator==(const DepthAttachmentDescriptor& o) const { return depth_compare == o.depth_compare && depth_write_enabled == o.depth_write_enabled; } constexpr size_t GetHash() const { return fml::HashCombine(depth_compare, depth_write_enabled); } }; struct StencilAttachmentDescriptor { //---------------------------------------------------------------------------- /// Indicates the operation to perform between the reference value and the /// value in the stencil buffer. Both values have the read_mask applied to /// them before performing this operation. /// CompareFunction stencil_compare = CompareFunction::kAlways; //---------------------------------------------------------------------------- /// Indicates what to do when the stencil test has failed. /// StencilOperation stencil_failure = StencilOperation::kKeep; //---------------------------------------------------------------------------- /// Indicates what to do when the stencil test passes but the depth test /// fails. /// StencilOperation depth_failure = StencilOperation::kKeep; //---------------------------------------------------------------------------- /// Indicates what to do when both the stencil and depth tests pass. /// StencilOperation depth_stencil_pass = StencilOperation::kKeep; //---------------------------------------------------------------------------- /// The mask applied to the reference and stencil buffer values before /// performing the stencil_compare operation. /// uint32_t read_mask = ~0; //---------------------------------------------------------------------------- /// The mask applied to the new stencil value before it is written into the /// stencil buffer. /// uint32_t write_mask = ~0; constexpr bool operator==(const StencilAttachmentDescriptor& o) const { return stencil_compare == o.stencil_compare && stencil_failure == o.stencil_failure && depth_failure == o.depth_failure && depth_stencil_pass == o.depth_stencil_pass && read_mask == o.read_mask && write_mask == o.write_mask; } constexpr size_t GetHash() const { return fml::HashCombine(stencil_compare, stencil_failure, depth_failure, depth_stencil_pass, read_mask, write_mask); } }; struct Attachment { std::shared_ptr<Texture> texture; std::shared_ptr<Texture> resolve_texture; LoadAction load_action = LoadAction::kDontCare; StoreAction store_action = StoreAction::kStore; bool IsValid() const; }; struct ColorAttachment : public Attachment { Color clear_color = Color::BlackTransparent(); }; struct DepthAttachment : public Attachment { double clear_depth = 0.0; }; struct StencilAttachment : public Attachment { uint32_t clear_stencil = 0; }; std::string AttachmentToString(const Attachment& attachment); std::string ColorAttachmentToString(const ColorAttachment& color); std::string DepthAttachmentToString(const DepthAttachment& depth); std::string StencilAttachmentToString(const StencilAttachment& stencil); } // namespace impeller namespace std { template <> struct hash<impeller::DepthAttachmentDescriptor> { constexpr std::size_t operator()( const impeller::DepthAttachmentDescriptor& des) const { return des.GetHash(); } }; template <> struct hash<impeller::StencilAttachmentDescriptor> { constexpr std::size_t operator()( const impeller::StencilAttachmentDescriptor& des) const { return des.GetHash(); } }; } // namespace std #endif // FLUTTER_IMPELLER_CORE_FORMATS_H_
engine/impeller/core/formats.h/0
{ "file_path": "engine/impeller/core/formats.h", "repo_id": "engine", "token_count": 7274 }
178
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_CORE_SHADER_TYPES_H_ #define FLUTTER_IMPELLER_CORE_SHADER_TYPES_H_ #include <cstddef> #include <cstdint> #include <optional> #include <string_view> #include <vector> #include "flutter/fml/hash_combine.h" #include "flutter/fml/logging.h" #include "impeller/core/runtime_types.h" #include "impeller/geometry/half.h" #include "impeller/geometry/matrix.h" namespace impeller { enum class ShaderStage { kUnknown, kVertex, kFragment, kCompute, }; constexpr ShaderStage ToShaderStage(RuntimeShaderStage stage) { switch (stage) { case RuntimeShaderStage::kVertex: return ShaderStage::kVertex; case RuntimeShaderStage::kFragment: return ShaderStage::kFragment; case RuntimeShaderStage::kCompute: return ShaderStage::kCompute; } FML_UNREACHABLE(); } enum class ShaderType { kUnknown, kVoid, kBoolean, kSignedByte, kUnsignedByte, kSignedShort, kUnsignedShort, kSignedInt, kUnsignedInt, kSignedInt64, kUnsignedInt64, kAtomicCounter, kHalfFloat, kFloat, kDouble, kStruct, kImage, kSampledImage, kSampler, }; struct ShaderStructMemberMetadata { ShaderType type; std::string name; size_t offset; size_t size; size_t byte_length; std::optional<size_t> array_elements; }; struct ShaderMetadata { // This must match the uniform name in the shader program. std::string name; std::vector<ShaderStructMemberMetadata> members; }; /// @brief Metadata required to bind a buffer. /// /// OpenGL binding requires the usage of the separate shader metadata struct. struct ShaderUniformSlot { /// @brief The name of the uniform slot. const char* name; /// @brief `ext_res_0` is the Metal binding value. size_t ext_res_0; /// @brief The Vulkan descriptor set index. size_t set; /// @brief The Vulkan binding value. size_t binding; }; /// @brief Metadata required to bind a combined texture and sampler. /// /// OpenGL binding requires the usage of the separate shader metadata struct. struct SampledImageSlot { /// @brief The name of the uniform slot. const char* name; /// @brief `ext_res_0` is the Metal binding value. size_t texture_index; /// @brief The Vulkan descriptor set index. size_t set; /// @brief The Vulkan binding value. size_t binding; }; struct ShaderStageIOSlot { const char* name; size_t location; size_t set; size_t binding; ShaderType type; size_t bit_width; size_t vec_size; size_t columns; size_t offset; constexpr size_t GetHash() const { return fml::HashCombine(name, location, set, binding, type, bit_width, vec_size, columns, offset); } constexpr bool operator==(const ShaderStageIOSlot& other) const { return name == other.name && // location == other.location && // set == other.set && // binding == other.binding && // type == other.type && // bit_width == other.bit_width && // vec_size == other.vec_size && // columns == other.columns && // offset == other.offset; } }; struct ShaderStageBufferLayout { size_t stride; size_t binding; constexpr size_t GetHash() const { return fml::HashCombine(stride, binding); } constexpr bool operator==(const ShaderStageBufferLayout& other) const { return stride == other.stride && // binding == other.binding; } }; enum class DescriptorType { kUniformBuffer, kStorageBuffer, kSampledImage, kImage, kSampler, kInputAttachment, }; struct DescriptorSetLayout { uint32_t binding; DescriptorType descriptor_type; ShaderStage shader_stage; }; template <size_t Size> struct Padding { private: uint8_t pad_[Size]; }; /// @brief Struct used for padding uniform buffer array elements. template <typename T, size_t Size, class = std::enable_if_t<std::is_standard_layout_v<T>>> struct Padded { T value; Padding<Size> _PADDING_; Padded(T p_value) : value(p_value){}; // NOLINT(google-explicit-constructor) }; inline constexpr Vector4 ToVector(Color color) { return {color.red, color.green, color.blue, color.alpha}; } } // namespace impeller #endif // FLUTTER_IMPELLER_CORE_SHADER_TYPES_H_
engine/impeller/core/shader_types.h/0
{ "file_path": "engine/impeller/core/shader_types.h", "repo_id": "engine", "token_count": 1688 }
179
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_DISPLAY_LIST_DL_VERTICES_GEOMETRY_H_ #define FLUTTER_IMPELLER_DISPLAY_LIST_DL_VERTICES_GEOMETRY_H_ #include "flutter/display_list/dl_vertices.h" #include "impeller/entity/geometry/vertices_geometry.h" namespace impeller { std::shared_ptr<VerticesGeometry> MakeVertices( const flutter::DlVertices* vertices); } // namespace impeller #endif // FLUTTER_IMPELLER_DISPLAY_LIST_DL_VERTICES_GEOMETRY_H_
engine/impeller/display_list/dl_vertices_geometry.h/0
{ "file_path": "engine/impeller/display_list/dl_vertices_geometry.h", "repo_id": "engine", "token_count": 223 }
180
# Threading in Vulkan The Vulkan backend uses a dedicated concurrent worker pool that is created along with the creation of the Vulkan context. Unlike other pools such as the IO worker pool, long running tasks may **not** be posted to this pool. This is because frame workloads can and often are distributed to workers in this pool. Having a potentially long running task (such as texture decompression) may potentially block frame critical tasks. The limitation of a separate pool for frame critical tasks is working around the separate limitation of not being able to specify a QoS to specific tasks and may be lifted in the future. There is also a separate component called the fence waiter which operates on its own thread. The purpose of the fence waiter is to ensure that the resource reference count lives at least as long as the GPU command buffer(s) that access this resource. Resource collection and pooling also happens on another thread called the resource manager. This is because touching the allocators is a potentially expensive operation and having the collection be done in a frame workload, or on the fence waiter thread may cause jank. With this overview, the total number of thread used by the Impeller Vulkan backend is the number of workers in the concurrent worker pool, and the two threads for the fence waiter and resource manager respectively. A summary of the interaction between the various threads is drawn below: ```mermaid sequenceDiagram participant rt as Render Thread participant worker1 as Concurrent Worker 1 participant worker2 as Concurrent Worker 2 participant fence_waiter as Fence Waiter participant resource_manager as Resource Manager participant gpu as GPU rt->>+worker1: Setup PSO 1 rt->>+worker2: Setup PSO n worker1-->>-rt: Done worker2-->>-rt: Done Note over rt,resource_manager: Application launch loop One Frame activate rt rt->>+worker2: Frame Workload activate fence_waiter rt->>fence_waiter: Resource 1 owned by GPU worker2-->>-rt: Done rt->>fence_waiter: Resource 2 owned by GPU rt->>gpu: Submit GPU Commands deactivate rt end activate gpu gpu-->>fence_waiter: GPU Work Done fence_waiter->>resource_manager: Collect/Pool Resources deactivate fence_waiter activate resource_manager deactivate gpu deactivate resource_manager ```
engine/impeller/docs/vulkan_threading.md/0
{ "file_path": "engine/impeller/docs/vulkan_threading.md", "repo_id": "engine", "token_count": 682 }
181
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "impeller/entity/contents/content_context.h" #include <memory> #include <utility> #include "fml/trace_event.h" #include "impeller/base/strings.h" #include "impeller/base/validation.h" #include "impeller/core/formats.h" #include "impeller/entity/contents/framebuffer_blend_contents.h" #include "impeller/entity/entity.h" #include "impeller/entity/render_target_cache.h" #include "impeller/renderer/command_buffer.h" #include "impeller/renderer/pipeline_descriptor.h" #include "impeller/renderer/pipeline_library.h" #include "impeller/renderer/render_target.h" #include "impeller/renderer/texture_mipmap.h" #include "impeller/tessellator/tessellator.h" #include "impeller/typographer/typographer_context.h" namespace impeller { void ContentContextOptions::ApplyToPipelineDescriptor( PipelineDescriptor& desc) const { auto pipeline_blend = blend_mode; if (blend_mode > Entity::kLastPipelineBlendMode) { VALIDATION_LOG << "Cannot use blend mode " << static_cast<int>(blend_mode) << " as a pipeline blend."; pipeline_blend = BlendMode::kSourceOver; } desc.SetSampleCount(sample_count); ColorAttachmentDescriptor color0 = *desc.GetColorAttachmentDescriptor(0u); color0.format = color_attachment_pixel_format; color0.alpha_blend_op = BlendOperation::kAdd; color0.color_blend_op = BlendOperation::kAdd; switch (pipeline_blend) { case BlendMode::kClear: if (is_for_rrect_blur_clear) { color0.alpha_blend_op = BlendOperation::kReverseSubtract; color0.color_blend_op = BlendOperation::kReverseSubtract; color0.dst_alpha_blend_factor = BlendFactor::kOne; color0.dst_color_blend_factor = BlendFactor::kOne; color0.src_alpha_blend_factor = BlendFactor::kDestinationColor; color0.src_color_blend_factor = BlendFactor::kDestinationColor; } else { color0.dst_alpha_blend_factor = BlendFactor::kZero; color0.dst_color_blend_factor = BlendFactor::kZero; color0.src_alpha_blend_factor = BlendFactor::kZero; color0.src_color_blend_factor = BlendFactor::kZero; } break; case BlendMode::kSource: color0.blending_enabled = false; color0.dst_alpha_blend_factor = BlendFactor::kZero; color0.dst_color_blend_factor = BlendFactor::kZero; color0.src_alpha_blend_factor = BlendFactor::kOne; color0.src_color_blend_factor = BlendFactor::kOne; break; case BlendMode::kDestination: color0.dst_alpha_blend_factor = BlendFactor::kOne; color0.dst_color_blend_factor = BlendFactor::kOne; color0.src_alpha_blend_factor = BlendFactor::kZero; color0.src_color_blend_factor = BlendFactor::kZero; break; case BlendMode::kSourceOver: color0.dst_alpha_blend_factor = BlendFactor::kOneMinusSourceAlpha; color0.dst_color_blend_factor = BlendFactor::kOneMinusSourceAlpha; color0.src_alpha_blend_factor = BlendFactor::kOne; color0.src_color_blend_factor = BlendFactor::kOne; break; case BlendMode::kDestinationOver: color0.dst_alpha_blend_factor = BlendFactor::kOne; color0.dst_color_blend_factor = BlendFactor::kOne; color0.src_alpha_blend_factor = BlendFactor::kOneMinusDestinationAlpha; color0.src_color_blend_factor = BlendFactor::kOneMinusDestinationAlpha; break; case BlendMode::kSourceIn: color0.dst_alpha_blend_factor = BlendFactor::kZero; color0.dst_color_blend_factor = BlendFactor::kZero; color0.src_alpha_blend_factor = BlendFactor::kDestinationAlpha; color0.src_color_blend_factor = BlendFactor::kDestinationAlpha; break; case BlendMode::kDestinationIn: color0.dst_alpha_blend_factor = BlendFactor::kSourceAlpha; color0.dst_color_blend_factor = BlendFactor::kSourceAlpha; color0.src_alpha_blend_factor = BlendFactor::kZero; color0.src_color_blend_factor = BlendFactor::kZero; break; case BlendMode::kSourceOut: color0.dst_alpha_blend_factor = BlendFactor::kZero; color0.dst_color_blend_factor = BlendFactor::kZero; color0.src_alpha_blend_factor = BlendFactor::kOneMinusDestinationAlpha; color0.src_color_blend_factor = BlendFactor::kOneMinusDestinationAlpha; break; case BlendMode::kDestinationOut: color0.dst_alpha_blend_factor = BlendFactor::kOneMinusSourceAlpha; color0.dst_color_blend_factor = BlendFactor::kOneMinusSourceAlpha; color0.src_alpha_blend_factor = BlendFactor::kZero; color0.src_color_blend_factor = BlendFactor::kZero; break; case BlendMode::kSourceATop: color0.dst_alpha_blend_factor = BlendFactor::kOneMinusSourceAlpha; color0.dst_color_blend_factor = BlendFactor::kOneMinusSourceAlpha; color0.src_alpha_blend_factor = BlendFactor::kDestinationAlpha; color0.src_color_blend_factor = BlendFactor::kDestinationAlpha; break; case BlendMode::kDestinationATop: color0.dst_alpha_blend_factor = BlendFactor::kSourceAlpha; color0.dst_color_blend_factor = BlendFactor::kSourceAlpha; color0.src_alpha_blend_factor = BlendFactor::kOneMinusDestinationAlpha; color0.src_color_blend_factor = BlendFactor::kOneMinusDestinationAlpha; break; case BlendMode::kXor: color0.dst_alpha_blend_factor = BlendFactor::kOneMinusSourceAlpha; color0.dst_color_blend_factor = BlendFactor::kOneMinusSourceAlpha; color0.src_alpha_blend_factor = BlendFactor::kOneMinusDestinationAlpha; color0.src_color_blend_factor = BlendFactor::kOneMinusDestinationAlpha; break; case BlendMode::kPlus: color0.dst_alpha_blend_factor = BlendFactor::kOne; color0.dst_color_blend_factor = BlendFactor::kOne; color0.src_alpha_blend_factor = BlendFactor::kOne; color0.src_color_blend_factor = BlendFactor::kOne; break; case BlendMode::kModulate: color0.dst_alpha_blend_factor = BlendFactor::kSourceAlpha; color0.dst_color_blend_factor = BlendFactor::kSourceColor; color0.src_alpha_blend_factor = BlendFactor::kZero; color0.src_color_blend_factor = BlendFactor::kZero; break; default: FML_UNREACHABLE(); } desc.SetColorAttachmentDescriptor(0u, color0); if (!has_depth_stencil_attachments) { desc.ClearDepthAttachment(); desc.ClearStencilAttachments(); } auto maybe_stencil = desc.GetFrontStencilAttachmentDescriptor(); auto maybe_depth = desc.GetDepthStencilAttachmentDescriptor(); FML_DCHECK(has_depth_stencil_attachments == maybe_depth.has_value()) << "Depth attachment doesn't match expected pipeline state. " "has_depth_stencil_attachments=" << has_depth_stencil_attachments; FML_DCHECK(has_depth_stencil_attachments == maybe_stencil.has_value()) << "Stencil attachment doesn't match expected pipeline state. " "has_depth_stencil_attachments=" << has_depth_stencil_attachments; if (maybe_stencil.has_value()) { StencilAttachmentDescriptor front_stencil = maybe_stencil.value(); StencilAttachmentDescriptor back_stencil = front_stencil; switch (stencil_mode) { case StencilMode::kIgnore: front_stencil.stencil_compare = CompareFunction::kAlways; front_stencil.depth_stencil_pass = StencilOperation::kKeep; desc.SetStencilAttachmentDescriptors(front_stencil); break; case StencilMode::kStencilNonZeroFill: // The stencil ref should be 0 on commands that use this mode. front_stencil.stencil_compare = CompareFunction::kAlways; front_stencil.depth_stencil_pass = StencilOperation::kIncrementWrap; back_stencil.stencil_compare = CompareFunction::kAlways; back_stencil.depth_stencil_pass = StencilOperation::kDecrementWrap; desc.SetStencilAttachmentDescriptors(front_stencil, back_stencil); break; case StencilMode::kStencilEvenOddFill: // The stencil ref should be 0 on commands that use this mode. front_stencil.stencil_compare = CompareFunction::kEqual; front_stencil.depth_stencil_pass = StencilOperation::kIncrementWrap; front_stencil.stencil_failure = StencilOperation::kDecrementWrap; desc.SetStencilAttachmentDescriptors(front_stencil); break; case StencilMode::kCoverCompare: // The stencil ref should be 0 on commands that use this mode. front_stencil.stencil_compare = CompareFunction::kNotEqual; front_stencil.depth_stencil_pass = StencilOperation::kSetToReferenceValue; desc.SetStencilAttachmentDescriptors(front_stencil); break; case StencilMode::kCoverCompareInverted: // The stencil ref should be 0 on commands that use this mode. front_stencil.stencil_compare = CompareFunction::kEqual; front_stencil.stencil_failure = StencilOperation::kSetToReferenceValue; desc.SetStencilAttachmentDescriptors(front_stencil); break; case StencilMode::kLegacyClipRestore: front_stencil.stencil_compare = CompareFunction::kLess; front_stencil.depth_stencil_pass = StencilOperation::kSetToReferenceValue; desc.SetStencilAttachmentDescriptors(front_stencil); break; case StencilMode::kLegacyClipIncrement: front_stencil.stencil_compare = CompareFunction::kEqual; front_stencil.depth_stencil_pass = StencilOperation::kIncrementClamp; desc.SetStencilAttachmentDescriptors(front_stencil); break; case StencilMode::kLegacyClipDecrement: front_stencil.stencil_compare = CompareFunction::kEqual; front_stencil.depth_stencil_pass = StencilOperation::kDecrementClamp; desc.SetStencilAttachmentDescriptors(front_stencil); break; case StencilMode::kLegacyClipCompare: front_stencil.stencil_compare = CompareFunction::kEqual; front_stencil.depth_stencil_pass = StencilOperation::kKeep; desc.SetStencilAttachmentDescriptors(front_stencil); break; } } if (maybe_depth.has_value()) { DepthAttachmentDescriptor depth = maybe_depth.value(); depth.depth_write_enabled = depth_write_enabled; depth.depth_compare = depth_compare; desc.SetDepthStencilAttachmentDescriptor(depth); } desc.SetPrimitiveType(primitive_type); desc.SetPolygonMode(wireframe ? PolygonMode::kLine : PolygonMode::kFill); } template <typename PipelineT> static std::unique_ptr<PipelineT> CreateDefaultPipeline( const Context& context) { auto desc = PipelineT::Builder::MakeDefaultPipelineDescriptor(context); if (!desc.has_value()) { return nullptr; } // Apply default ContentContextOptions to the descriptor. const auto default_color_format = context.GetCapabilities()->GetDefaultColorFormat(); ContentContextOptions{.sample_count = SampleCount::kCount4, .primitive_type = PrimitiveType::kTriangleStrip, .color_attachment_pixel_format = default_color_format} .ApplyToPipelineDescriptor(*desc); return std::make_unique<PipelineT>(context, desc); } ContentContext::ContentContext( std::shared_ptr<Context> context, std::shared_ptr<TypographerContext> typographer_context, std::shared_ptr<RenderTargetAllocator> render_target_allocator) : context_(std::move(context)), lazy_glyph_atlas_( std::make_shared<LazyGlyphAtlas>(std::move(typographer_context))), tessellator_(std::make_shared<Tessellator>()), #if IMPELLER_ENABLE_3D scene_context_(std::make_shared<scene::SceneContext>(context_)), #endif // IMPELLER_ENABLE_3D render_target_cache_(render_target_allocator == nullptr ? std::make_shared<RenderTargetCache>( context_->GetResourceAllocator()) : std::move(render_target_allocator)), host_buffer_(HostBuffer::Create(context_->GetResourceAllocator())), pending_command_buffers_(std::make_unique<PendingCommandBuffers>()) { if (!context_ || !context_->IsValid()) { return; } auto options = ContentContextOptions{ .sample_count = SampleCount::kCount4, .color_attachment_pixel_format = context_->GetCapabilities()->GetDefaultColorFormat()}; auto options_trianglestrip = ContentContextOptions{ .sample_count = SampleCount::kCount4, .primitive_type = PrimitiveType::kTriangleStrip, .color_attachment_pixel_format = context_->GetCapabilities()->GetDefaultColorFormat()}; const auto supports_decal = static_cast<Scalar>( context_->GetCapabilities()->SupportsDecalSamplerAddressMode()); #ifdef IMPELLER_DEBUG checkerboard_pipelines_.CreateDefault(*context_, options); #endif // IMPELLER_DEBUG solid_fill_pipelines_.CreateDefault(*context_, options); if (context_->GetCapabilities()->SupportsSSBO()) { linear_gradient_ssbo_fill_pipelines_.CreateDefault(*context_, options); radial_gradient_ssbo_fill_pipelines_.CreateDefault(*context_, options); conical_gradient_ssbo_fill_pipelines_.CreateDefault(*context_, options); sweep_gradient_ssbo_fill_pipelines_.CreateDefault(*context_, options); } else { linear_gradient_fill_pipelines_.CreateDefault(*context_, options); radial_gradient_fill_pipelines_.CreateDefault(*context_, options); conical_gradient_fill_pipelines_.CreateDefault(*context_, options); sweep_gradient_fill_pipelines_.CreateDefault(*context_, options); } if (context_->GetCapabilities()->SupportsFramebufferFetch()) { framebuffer_blend_color_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kColor), supports_decal}); framebuffer_blend_colorburn_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kColorBurn), supports_decal}); framebuffer_blend_colordodge_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kColorDodge), supports_decal}); framebuffer_blend_darken_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kDarken), supports_decal}); framebuffer_blend_difference_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kDifference), supports_decal}); framebuffer_blend_exclusion_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kExclusion), supports_decal}); framebuffer_blend_hardlight_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kHardLight), supports_decal}); framebuffer_blend_hue_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kHue), supports_decal}); framebuffer_blend_lighten_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kLighten), supports_decal}); framebuffer_blend_luminosity_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kLuminosity), supports_decal}); framebuffer_blend_multiply_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kMultiply), supports_decal}); framebuffer_blend_overlay_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kOverlay), supports_decal}); framebuffer_blend_saturation_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kSaturation), supports_decal}); framebuffer_blend_screen_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kScreen), supports_decal}); framebuffer_blend_softlight_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kSoftLight), supports_decal}); } blend_color_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kColor), supports_decal}); blend_colorburn_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kColorBurn), supports_decal}); blend_colordodge_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kColorDodge), supports_decal}); blend_darken_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kDarken), supports_decal}); blend_difference_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kDifference), supports_decal}); blend_exclusion_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kExclusion), supports_decal}); blend_hardlight_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kHardLight), supports_decal}); blend_hue_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kHue), supports_decal}); blend_lighten_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kLighten), supports_decal}); blend_luminosity_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kLuminosity), supports_decal}); blend_multiply_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kMultiply), supports_decal}); blend_overlay_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kOverlay), supports_decal}); blend_saturation_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kSaturation), supports_decal}); blend_screen_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kScreen), supports_decal}); blend_softlight_pipelines_.CreateDefault( *context_, options_trianglestrip, {static_cast<Scalar>(BlendSelectValues::kSoftLight), supports_decal}); rrect_blur_pipelines_.CreateDefault(*context_, options_trianglestrip); texture_blend_pipelines_.CreateDefault(*context_, options); texture_pipelines_.CreateDefault(*context_, options); texture_strict_src_pipelines_.CreateDefault(*context_, options); position_uv_pipelines_.CreateDefault(*context_, options); tiled_texture_pipelines_.CreateDefault(*context_, options); gaussian_blur_noalpha_decal_pipelines_.CreateDefault(*context_, options_trianglestrip); gaussian_blur_noalpha_nodecal_pipelines_.CreateDefault(*context_, options_trianglestrip); kernel_decal_pipelines_.CreateDefault(*context_, options_trianglestrip); kernel_nodecal_pipelines_.CreateDefault(*context_, options_trianglestrip); border_mask_blur_pipelines_.CreateDefault(*context_, options_trianglestrip); morphology_filter_pipelines_.CreateDefault(*context_, options_trianglestrip, {supports_decal}); color_matrix_color_filter_pipelines_.CreateDefault(*context_, options_trianglestrip); linear_to_srgb_filter_pipelines_.CreateDefault(*context_, options_trianglestrip); srgb_to_linear_filter_pipelines_.CreateDefault(*context_, options_trianglestrip); glyph_atlas_pipelines_.CreateDefault( *context_, options, {static_cast<Scalar>( GetContext()->GetCapabilities()->GetDefaultGlyphAtlasFormat() == PixelFormat::kA8UNormInt)}); glyph_atlas_color_pipelines_.CreateDefault(*context_, options); geometry_color_pipelines_.CreateDefault(*context_, options); yuv_to_rgb_filter_pipelines_.CreateDefault(*context_, options_trianglestrip); porter_duff_blend_pipelines_.CreateDefault(*context_, options_trianglestrip, {supports_decal}); // GLES only shader that is unsupported on macOS. #if defined(IMPELLER_ENABLE_OPENGLES) && !defined(FML_OS_MACOSX) if (GetContext()->GetBackendType() == Context::BackendType::kOpenGLES) { texture_external_pipelines_.CreateDefault(*context_, options); } if (GetContext()->GetBackendType() == Context::BackendType::kOpenGLES) { tiled_texture_external_pipelines_.CreateDefault(*context_, options); } #endif // IMPELLER_ENABLE_OPENGLES if (context_->GetCapabilities()->SupportsCompute()) { auto pipeline_desc = PointsComputeShaderPipeline::MakeDefaultPipelineDescriptor(*context_); point_field_compute_pipelines_ = context_->GetPipelineLibrary()->GetPipeline(pipeline_desc).Get(); auto uv_pipeline_desc = UvComputeShaderPipeline::MakeDefaultPipelineDescriptor(*context_); uv_compute_pipelines_ = context_->GetPipelineLibrary()->GetPipeline(uv_pipeline_desc).Get(); } /// Setup default clip pipeline. auto clip_pipeline_descriptor = ClipPipeline::Builder::MakeDefaultPipelineDescriptor(*context_); if (!clip_pipeline_descriptor.has_value()) { return; } ContentContextOptions{ .sample_count = SampleCount::kCount4, .color_attachment_pixel_format = context_->GetCapabilities()->GetDefaultColorFormat()} .ApplyToPipelineDescriptor(*clip_pipeline_descriptor); // Disable write to all color attachments. auto clip_color_attachments = clip_pipeline_descriptor->GetColorAttachmentDescriptors(); for (auto& color_attachment : clip_color_attachments) { color_attachment.second.write_mask = ColorWriteMaskBits::kNone; } clip_pipeline_descriptor->SetColorAttachmentDescriptors( std::move(clip_color_attachments)); clip_pipelines_.SetDefault(options, std::make_unique<ClipPipeline>( *context_, clip_pipeline_descriptor)); is_valid_ = true; InitializeCommonlyUsedShadersIfNeeded(); } ContentContext::~ContentContext() = default; bool ContentContext::IsValid() const { return is_valid_; } fml::StatusOr<RenderTarget> ContentContext::MakeSubpass( const std::string& label, ISize texture_size, const SubpassCallback& subpass_callback, bool msaa_enabled, bool depth_stencil_enabled, int32_t mip_count) const { const std::shared_ptr<Context>& context = GetContext(); RenderTarget subpass_target; std::optional<RenderTarget::AttachmentConfig> depth_stencil_config = depth_stencil_enabled ? RenderTarget::kDefaultStencilAttachmentConfig : std::optional<RenderTarget::AttachmentConfig>(); if (context->GetCapabilities()->SupportsOffscreenMSAA() && msaa_enabled) { subpass_target = GetRenderTargetCache()->CreateOffscreenMSAA( *context, texture_size, /*mip_count=*/mip_count, SPrintF("%s Offscreen", label.c_str()), RenderTarget::kDefaultColorAttachmentConfigMSAA, depth_stencil_config); } else { subpass_target = GetRenderTargetCache()->CreateOffscreen( *context, texture_size, /*mip_count=*/mip_count, SPrintF("%s Offscreen", label.c_str()), RenderTarget::kDefaultColorAttachmentConfig, depth_stencil_config); } return MakeSubpass(label, subpass_target, subpass_callback); } fml::StatusOr<RenderTarget> ContentContext::MakeSubpass( const std::string& label, const RenderTarget& subpass_target, const SubpassCallback& subpass_callback) const { const std::shared_ptr<Context>& context = GetContext(); auto subpass_texture = subpass_target.GetRenderTargetTexture(); if (!subpass_texture) { return fml::Status(fml::StatusCode::kUnknown, ""); } auto sub_command_buffer = context->CreateCommandBuffer(); sub_command_buffer->SetLabel(SPrintF("%s CommandBuffer", label.c_str())); if (!sub_command_buffer) { return fml::Status(fml::StatusCode::kUnknown, ""); } auto sub_renderpass = sub_command_buffer->CreateRenderPass(subpass_target); if (!sub_renderpass) { return fml::Status(fml::StatusCode::kUnknown, ""); } sub_renderpass->SetLabel(SPrintF("%s RenderPass", label.c_str())); if (!subpass_callback(*this, *sub_renderpass)) { return fml::Status(fml::StatusCode::kUnknown, ""); } if (!sub_renderpass->EncodeCommands()) { return fml::Status(fml::StatusCode::kUnknown, ""); } const std::shared_ptr<Texture>& target_texture = subpass_target.GetRenderTargetTexture(); if (target_texture->GetMipCount() > 1) { fml::Status mipmap_status = AddMipmapGeneration(sub_command_buffer, context, target_texture); if (!mipmap_status.ok()) { return mipmap_status; } } if (!context->GetCommandQueue()->Submit({sub_command_buffer}).ok()) { return fml::Status(fml::StatusCode::kUnknown, ""); } return subpass_target; } #if IMPELLER_ENABLE_3D std::shared_ptr<scene::SceneContext> ContentContext::GetSceneContext() const { return scene_context_; } #endif // IMPELLER_ENABLE_3D std::shared_ptr<Tessellator> ContentContext::GetTessellator() const { return tessellator_; } std::shared_ptr<Context> ContentContext::GetContext() const { return context_; } const Capabilities& ContentContext::GetDeviceCapabilities() const { return *context_->GetCapabilities(); } void ContentContext::SetWireframe(bool wireframe) { wireframe_ = wireframe; } std::shared_ptr<Pipeline<PipelineDescriptor>> ContentContext::GetCachedRuntimeEffectPipeline( const std::string& unique_entrypoint_name, const ContentContextOptions& options, const std::function<std::shared_ptr<Pipeline<PipelineDescriptor>>()>& create_callback) const { RuntimeEffectPipelineKey key{unique_entrypoint_name, options}; auto it = runtime_effect_pipelines_.find(key); if (it == runtime_effect_pipelines_.end()) { it = runtime_effect_pipelines_.insert(it, {key, create_callback()}); } return it->second; } void ContentContext::ClearCachedRuntimeEffectPipeline( const std::string& unique_entrypoint_name) const { for (auto it = runtime_effect_pipelines_.begin(); it != runtime_effect_pipelines_.end();) { if (it->first.unique_entrypoint_name == unique_entrypoint_name) { it = runtime_effect_pipelines_.erase(it); } else { it++; } } } void ContentContext::InitializeCommonlyUsedShadersIfNeeded() const { TRACE_EVENT0("flutter", "InitializeCommonlyUsedShadersIfNeeded"); GetContext()->InitializeCommonlyUsedShadersIfNeeded(); if (GetContext()->GetBackendType() == Context::BackendType::kOpenGLES) { // TODO(jonahwilliams): The OpenGL Embedder Unittests hang if this code // runs. return; } // Initialize commonly used shaders that aren't defaults. These settings were // chosen based on the knowledge that we mix and match triangle and // triangle-strip geometry, and also have fairly agressive srcOver to src // blend mode conversions. auto options = ContentContextOptions{ .sample_count = SampleCount::kCount4, .color_attachment_pixel_format = context_->GetCapabilities()->GetDefaultColorFormat()}; for (const auto mode : {BlendMode::kSource, BlendMode::kSourceOver}) { for (const auto geometry : {PrimitiveType::kTriangle, PrimitiveType::kTriangleStrip}) { options.blend_mode = mode; options.primitive_type = geometry; CreateIfNeeded(solid_fill_pipelines_, options); CreateIfNeeded(texture_pipelines_, options); if (GetContext()->GetCapabilities()->SupportsSSBO()) { CreateIfNeeded(linear_gradient_ssbo_fill_pipelines_, options); CreateIfNeeded(radial_gradient_ssbo_fill_pipelines_, options); CreateIfNeeded(sweep_gradient_ssbo_fill_pipelines_, options); CreateIfNeeded(conical_gradient_ssbo_fill_pipelines_, options); } } } options.blend_mode = BlendMode::kDestination; options.primitive_type = PrimitiveType::kTriangleStrip; for (const auto stencil_mode : {ContentContextOptions::StencilMode::kLegacyClipIncrement, ContentContextOptions::StencilMode::kLegacyClipDecrement, ContentContextOptions::StencilMode::kLegacyClipRestore}) { options.stencil_mode = stencil_mode; CreateIfNeeded(clip_pipelines_, options); } // On ARM devices, the initial usage of vkCmdCopyBufferToImage has been // observed to take 10s of ms as an internal shader is compiled to perform // the operation. Similarly, the initial render pass can also take 10s of ms // for a similar reason. Because the context object is initialized far // before the first frame, create a trivial texture and render pass to force // the driver to compiler these shaders before the frame begins. TextureDescriptor desc; desc.size = {1, 1}; desc.storage_mode = StorageMode::kHostVisible; desc.format = PixelFormat::kR8G8B8A8UNormInt; auto texture = GetContext()->GetResourceAllocator()->CreateTexture(desc); uint32_t color = 0; if (!texture->SetContents(reinterpret_cast<uint8_t*>(&color), 4u)) { VALIDATION_LOG << "Failed to set bootstrap texture."; } } } // namespace impeller
engine/impeller/entity/contents/content_context.cc/0
{ "file_path": "engine/impeller/entity/contents/content_context.cc", "repo_id": "engine", "token_count": 11517 }
182
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_ENTITY_CONTENTS_FILTERS_GAUSSIAN_BLUR_FILTER_CONTENTS_H_ #define FLUTTER_IMPELLER_ENTITY_CONTENTS_FILTERS_GAUSSIAN_BLUR_FILTER_CONTENTS_H_ #include <optional> #include "impeller/entity/contents/content_context.h" #include "impeller/entity/contents/filters/filter_contents.h" #include "impeller/entity/geometry/geometry.h" namespace impeller { struct BlurParameters { Point blur_uv_offset; Scalar blur_sigma; int blur_radius; int step_size; }; KernelPipeline::FragmentShader::KernelSamples GenerateBlurInfo( BlurParameters parameters); /// This will shrink the size of a kernel by roughly half by sampling between /// samples and relying on linear interpolation between the samples. KernelPipeline::FragmentShader::KernelSamples LerpHackKernelSamples( KernelPipeline::FragmentShader::KernelSamples samples); /// Performs a bidirectional Gaussian blur. /// /// This is accomplished by rendering multiple passes in multiple directions. /// Note: This will replace `DirectionalGaussianBlurFilterContents`. class GaussianBlurFilterContents final : public FilterContents { public: static std::string_view kNoMipsError; static const int32_t kBlurFilterRequiredMipCount; explicit GaussianBlurFilterContents( Scalar sigma_x, Scalar sigma_y, Entity::TileMode tile_mode, BlurStyle mask_blur_style, const std::shared_ptr<Geometry>& mask_geometry); Scalar GetSigmaX() const { return sigma_x_; } Scalar GetSigmaY() const { return sigma_y_; } // |FilterContents| std::optional<Rect> GetFilterSourceCoverage( const Matrix& effect_transform, const Rect& output_limit) const override; // |FilterContents| std::optional<Rect> GetFilterCoverage( const FilterInput::Vector& inputs, const Entity& entity, const Matrix& effect_transform) const override; /// Given a sigma (standard deviation) calculate the blur radius (1/2 the /// kernel size). static Scalar CalculateBlurRadius(Scalar sigma); /// Calculate the UV coordinates for rendering the filter_input. /// @param filter_input The FilterInput that should be rendered. /// @param entity The associated entity for the filter_input. /// @param source_rect The rect in source coordinates to convert to uvs. /// @param texture_size The rect to convert in source coordinates. static Quad CalculateUVs(const std::shared_ptr<FilterInput>& filter_input, const Entity& entity, const Rect& source_rect, const ISize& texture_size); /// Calculate the scale factor for the downsample pass given a sigma value. /// /// Visible for testing. static Scalar CalculateScale(Scalar sigma); /// Scales down the sigma value to match Skia's behavior. /// /// effective_blur_radius = CalculateBlurRadius(ScaleSigma(sigma_)); /// /// This function was calculated by observing Skia's behavior. Its blur at /// 500 seemed to be 0.15. Since we clamp at 500 I solved the quadratic /// equation that puts the minima there and a f(0)=1. static Scalar ScaleSigma(Scalar sigma); private: // |FilterContents| std::optional<Entity> RenderFilter( const FilterInput::Vector& input_textures, const ContentContext& renderer, const Entity& entity, const Matrix& effect_transform, const Rect& coverage, const std::optional<Rect>& coverage_hint) const override; const Scalar sigma_x_ = 0.0; const Scalar sigma_y_ = 0.0; const Entity::TileMode tile_mode_; const BlurStyle mask_blur_style_; std::shared_ptr<Geometry> mask_geometry_; }; } // namespace impeller #endif // FLUTTER_IMPELLER_ENTITY_CONTENTS_FILTERS_GAUSSIAN_BLUR_FILTER_CONTENTS_H_
engine/impeller/entity/contents/filters/gaussian_blur_filter_contents.h/0
{ "file_path": "engine/impeller/entity/contents/filters/gaussian_blur_filter_contents.h", "repo_id": "engine", "token_count": 1320 }
183
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_ENTITY_CONTENTS_FILTERS_LOCAL_MATRIX_FILTER_CONTENTS_H_ #define FLUTTER_IMPELLER_ENTITY_CONTENTS_FILTERS_LOCAL_MATRIX_FILTER_CONTENTS_H_ #include "impeller/entity/contents/filters/filter_contents.h" #include "impeller/entity/contents/filters/inputs/filter_input.h" namespace impeller { class LocalMatrixFilterContents final : public FilterContents { public: LocalMatrixFilterContents(); ~LocalMatrixFilterContents() override; void SetMatrix(Matrix matrix); // |FilterContents| Matrix GetLocalTransform(const Matrix& parent_transform) const override; // |FilterContents| std::optional<Rect> GetFilterSourceCoverage( const Matrix& effect_transform, const Rect& output_limit) const override; private: // |FilterContents| std::optional<Entity> RenderFilter( const FilterInput::Vector& input_textures, const ContentContext& renderer, const Entity& entity, const Matrix& effect_transform, const Rect& coverage, const std::optional<Rect>& coverage_hint) const override; Matrix matrix_; LocalMatrixFilterContents(const LocalMatrixFilterContents&) = delete; LocalMatrixFilterContents& operator=(const LocalMatrixFilterContents&) = delete; }; } // namespace impeller #endif // FLUTTER_IMPELLER_ENTITY_CONTENTS_FILTERS_LOCAL_MATRIX_FILTER_CONTENTS_H_
engine/impeller/entity/contents/filters/local_matrix_filter_contents.h/0
{ "file_path": "engine/impeller/entity/contents/filters/local_matrix_filter_contents.h", "repo_id": "engine", "token_count": 496 }
184
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "radial_gradient_contents.h" #include "impeller/entity/contents/clip_contents.h" #include "impeller/entity/contents/content_context.h" #include "impeller/entity/contents/gradient_generator.h" #include "impeller/entity/entity.h" #include "impeller/entity/geometry/geometry.h" #include "impeller/geometry/gradient.h" #include "impeller/renderer/render_pass.h" namespace impeller { RadialGradientContents::RadialGradientContents() = default; RadialGradientContents::~RadialGradientContents() = default; void RadialGradientContents::SetCenterAndRadius(Point center, Scalar radius) { center_ = center; radius_ = radius; } void RadialGradientContents::SetTileMode(Entity::TileMode tile_mode) { tile_mode_ = tile_mode; } void RadialGradientContents::SetColors(std::vector<Color> colors) { colors_ = std::move(colors); } void RadialGradientContents::SetStops(std::vector<Scalar> stops) { stops_ = std::move(stops); } const std::vector<Color>& RadialGradientContents::GetColors() const { return colors_; } const std::vector<Scalar>& RadialGradientContents::GetStops() const { return stops_; } bool RadialGradientContents::IsOpaque() const { if (GetOpacityFactor() < 1 || tile_mode_ == Entity::TileMode::kDecal) { return false; } for (auto color : colors_) { if (!color.IsOpaque()) { return false; } } return true; } bool RadialGradientContents::Render(const ContentContext& renderer, const Entity& entity, RenderPass& pass) const { if (renderer.GetDeviceCapabilities().SupportsSSBO()) { return RenderSSBO(renderer, entity, pass); } return RenderTexture(renderer, entity, pass); } bool RadialGradientContents::RenderSSBO(const ContentContext& renderer, const Entity& entity, RenderPass& pass) const { using VS = RadialGradientSSBOFillPipeline::VertexShader; using FS = RadialGradientSSBOFillPipeline::FragmentShader; VS::FrameInfo frame_info; frame_info.matrix = GetInverseEffectTransform(); PipelineBuilderCallback pipeline_callback = [&renderer](ContentContextOptions options) { return renderer.GetRadialGradientSSBOFillPipeline(options); }; return ColorSourceContents::DrawGeometry<VS>( renderer, entity, pass, pipeline_callback, frame_info, [this, &renderer](RenderPass& pass) { FS::FragInfo frag_info; frag_info.center = center_; frag_info.radius = radius_; frag_info.tile_mode = static_cast<Scalar>(tile_mode_); frag_info.decal_border_color = decal_border_color_; frag_info.alpha = GetOpacityFactor(); auto& host_buffer = renderer.GetTransientsBuffer(); auto colors = CreateGradientColors(colors_, stops_); frag_info.colors_length = colors.size(); auto color_buffer = host_buffer.Emplace(colors.data(), colors.size() * sizeof(StopData), DefaultUniformAlignment()); pass.SetCommandLabel("RadialGradientSSBOFill"); FS::BindFragInfo( pass, renderer.GetTransientsBuffer().EmplaceUniform(frag_info)); FS::BindColorData(pass, color_buffer); return true; }); } bool RadialGradientContents::RenderTexture(const ContentContext& renderer, const Entity& entity, RenderPass& pass) const { using VS = RadialGradientFillPipeline::VertexShader; using FS = RadialGradientFillPipeline::FragmentShader; auto gradient_data = CreateGradientBuffer(colors_, stops_); auto gradient_texture = CreateGradientTexture(gradient_data, renderer.GetContext()); if (gradient_texture == nullptr) { return false; } VS::FrameInfo frame_info; frame_info.matrix = GetInverseEffectTransform(); VS::BindFrameInfo(pass, renderer.GetTransientsBuffer().EmplaceUniform(frame_info)); PipelineBuilderCallback pipeline_callback = [&renderer](ContentContextOptions options) { return renderer.GetRadialGradientFillPipeline(options); }; return ColorSourceContents::DrawGeometry<VS>( renderer, entity, pass, pipeline_callback, frame_info, [this, &renderer, &gradient_texture](RenderPass& pass) { FS::FragInfo frag_info; frag_info.center = center_; frag_info.radius = radius_; frag_info.tile_mode = static_cast<Scalar>(tile_mode_); frag_info.decal_border_color = decal_border_color_; frag_info.texture_sampler_y_coord_scale = gradient_texture->GetYCoordScale(); frag_info.alpha = GetOpacityFactor(); frag_info.half_texel = Vector2(0.5 / gradient_texture->GetSize().width, 0.5 / gradient_texture->GetSize().height); SamplerDescriptor sampler_desc; sampler_desc.min_filter = MinMagFilter::kLinear; sampler_desc.mag_filter = MinMagFilter::kLinear; pass.SetCommandLabel("RadialGradientFill"); FS::BindFragInfo( pass, renderer.GetTransientsBuffer().EmplaceUniform(frag_info)); FS::BindTextureSampler( pass, gradient_texture, renderer.GetContext()->GetSamplerLibrary()->GetSampler( sampler_desc)); return true; }); } bool RadialGradientContents::ApplyColorFilter( const ColorFilterProc& color_filter_proc) { for (Color& color : colors_) { color = color_filter_proc(color); } decal_border_color_ = color_filter_proc(decal_border_color_); return true; } } // namespace impeller
engine/impeller/entity/contents/radial_gradient_contents.cc/0
{ "file_path": "engine/impeller/entity/contents/radial_gradient_contents.cc", "repo_id": "engine", "token_count": 2356 }
185
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "impeller/entity/contents/text_contents.h" #include <cstring> #include <optional> #include <utility> #include "impeller/core/formats.h" #include "impeller/core/sampler_descriptor.h" #include "impeller/entity/contents/content_context.h" #include "impeller/entity/entity.h" #include "impeller/renderer/render_pass.h" #include "impeller/typographer/glyph_atlas.h" #include "impeller/typographer/lazy_glyph_atlas.h" namespace impeller { TextContents::TextContents() = default; TextContents::~TextContents() = default; void TextContents::SetTextFrame(const std::shared_ptr<TextFrame>& frame) { frame_ = frame; } void TextContents::SetColor(Color color) { color_ = color; } Color TextContents::GetColor() const { return color_.WithAlpha(color_.alpha * inherited_opacity_); } bool TextContents::CanInheritOpacity(const Entity& entity) const { return !frame_->MaybeHasOverlapping(); } void TextContents::SetInheritedOpacity(Scalar opacity) { inherited_opacity_ = opacity; } void TextContents::SetOffset(Vector2 offset) { offset_ = offset; } void TextContents::SetForceTextColor(bool value) { force_text_color_ = value; } std::optional<Rect> TextContents::GetCoverage(const Entity& entity) const { return frame_->GetBounds().TransformBounds(entity.GetTransform()); } void TextContents::PopulateGlyphAtlas( const std::shared_ptr<LazyGlyphAtlas>& lazy_glyph_atlas, Scalar scale) { lazy_glyph_atlas->AddTextFrame(*frame_, scale); scale_ = scale; } bool TextContents::Render(const ContentContext& renderer, const Entity& entity, RenderPass& pass) const { auto color = GetColor(); if (color.IsTransparent()) { return true; } auto type = frame_->GetAtlasType(); const std::shared_ptr<GlyphAtlas>& atlas = renderer.GetLazyGlyphAtlas()->CreateOrGetGlyphAtlas( *renderer.GetContext(), type); if (!atlas || !atlas->IsValid()) { VALIDATION_LOG << "Cannot render glyphs without prepared atlas."; return false; } // Information shared by all glyph draw calls. pass.SetCommandLabel("TextFrame"); auto opts = OptionsFromPassAndEntity(pass, entity); opts.primitive_type = PrimitiveType::kTriangle; if (type == GlyphAtlas::Type::kAlphaBitmap) { pass.SetPipeline(renderer.GetGlyphAtlasPipeline(opts)); } else { pass.SetPipeline(renderer.GetGlyphAtlasColorPipeline(opts)); } pass.SetStencilReference(entity.GetClipDepth()); using VS = GlyphAtlasPipeline::VertexShader; using FS = GlyphAtlasPipeline::FragmentShader; // Common vertex uniforms for all glyphs. VS::FrameInfo frame_info; frame_info.depth = entity.GetShaderClipDepth(); frame_info.mvp = pass.GetOrthographicTransform(); frame_info.atlas_size = Vector2{static_cast<Scalar>(atlas->GetTexture()->GetSize().width), static_cast<Scalar>(atlas->GetTexture()->GetSize().height)}; frame_info.offset = offset_; frame_info.is_translation_scale = entity.GetTransform().IsTranslationScaleOnly(); frame_info.entity_transform = entity.GetTransform(); frame_info.text_color = ToVector(color.Premultiply()); VS::BindFrameInfo(pass, renderer.GetTransientsBuffer().EmplaceUniform(frame_info)); if (type == GlyphAtlas::Type::kColorBitmap) { using FSS = GlyphAtlasColorPipeline::FragmentShader; FSS::FragInfo frag_info; frag_info.use_text_color = force_text_color_ ? 1.0 : 0.0; FSS::BindFragInfo(pass, renderer.GetTransientsBuffer().EmplaceUniform(frag_info)); } SamplerDescriptor sampler_desc; if (frame_info.is_translation_scale) { sampler_desc.min_filter = MinMagFilter::kNearest; sampler_desc.mag_filter = MinMagFilter::kNearest; } else { // Currently, we only propagate the scale of the transform to the atlas // renderer, so if the transform has more than just a translation, we turn // on linear sampling to prevent crunchiness caused by the pixel grid not // being perfectly aligned. // The downside is that this slightly over-blurs rotated/skewed text. sampler_desc.min_filter = MinMagFilter::kLinear; sampler_desc.mag_filter = MinMagFilter::kLinear; } sampler_desc.mip_filter = MipFilter::kNearest; FS::BindGlyphAtlasSampler( pass, // command atlas->GetTexture(), // texture renderer.GetContext()->GetSamplerLibrary()->GetSampler( sampler_desc) // sampler ); // Common vertex information for all glyphs. // All glyphs are given the same vertex information in the form of a // unit-sized quad. The size of the glyph is specified in per instance data // and the vertex shader uses this to size the glyph correctly. The // interpolated vertex information is also used in the fragment shader to // sample from the glyph atlas. constexpr std::array<Point, 6> unit_points = {Point{0, 0}, Point{1, 0}, Point{0, 1}, Point{1, 0}, Point{0, 1}, Point{1, 1}}; auto& host_buffer = renderer.GetTransientsBuffer(); size_t vertex_count = 0; for (const auto& run : frame_->GetRuns()) { vertex_count += run.GetGlyphPositions().size(); } vertex_count *= 6; auto buffer_view = host_buffer.Emplace( vertex_count * sizeof(VS::PerVertexData), alignof(VS::PerVertexData), [&](uint8_t* contents) { VS::PerVertexData vtx; VS::PerVertexData* vtx_contents = reinterpret_cast<VS::PerVertexData*>(contents); for (const TextRun& run : frame_->GetRuns()) { const Font& font = run.GetFont(); Scalar rounded_scale = TextFrame::RoundScaledFontSize( scale_, font.GetMetrics().point_size); const FontGlyphAtlas* font_atlas = atlas->GetFontGlyphAtlas(font, rounded_scale); if (!font_atlas) { VALIDATION_LOG << "Could not find font in the atlas."; continue; } for (const TextRun::GlyphPosition& glyph_position : run.GetGlyphPositions()) { std::optional<Rect> maybe_atlas_glyph_bounds = font_atlas->FindGlyphBounds(glyph_position.glyph); if (!maybe_atlas_glyph_bounds.has_value()) { VALIDATION_LOG << "Could not find glyph position in the atlas."; continue; } const Rect& atlas_glyph_bounds = maybe_atlas_glyph_bounds.value(); vtx.atlas_glyph_bounds = Vector4(atlas_glyph_bounds.GetXYWH()); vtx.glyph_bounds = Vector4(glyph_position.glyph.bounds.GetXYWH()); vtx.glyph_position = glyph_position.position; for (const Point& point : unit_points) { vtx.unit_position = point; std::memcpy(vtx_contents++, &vtx, sizeof(VS::PerVertexData)); } } } }); pass.SetVertexBuffer({ .vertex_buffer = std::move(buffer_view), .index_buffer = {}, .vertex_count = vertex_count, .index_type = IndexType::kNone, }); return pass.Draw().ok(); } } // namespace impeller
engine/impeller/entity/contents/text_contents.cc/0
{ "file_path": "engine/impeller/entity/contents/text_contents.cc", "repo_id": "engine", "token_count": 2936 }
186
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "impeller/entity/entity_pass_target.h" #include "impeller/base/validation.h" #include "impeller/core/formats.h" #include "impeller/core/texture.h" namespace impeller { EntityPassTarget::EntityPassTarget(const RenderTarget& render_target, bool supports_read_from_resolve, bool supports_implicit_msaa) : target_(render_target), supports_read_from_resolve_(supports_read_from_resolve), supports_implicit_msaa_(supports_implicit_msaa) {} std::shared_ptr<Texture> EntityPassTarget::Flip(Allocator& allocator) { auto color0 = target_.GetColorAttachments().find(0)->second; if (!color0.resolve_texture) { VALIDATION_LOG << "EntityPassTarget Flip should never be called for a " "non-MSAA target."; // ...because there is never a circumstance where doing so would be // necessary. Unlike MSAA passes, non-MSAA passes can be trivially loaded // with `LoadAction::kLoad`. return color0.texture; } if (supports_read_from_resolve_) { // Just return the current resolve texture, which is safe to read in the // next render pass that'll resolve to `target_`. // // Note that this can only be done when MSAA is being used. return color0.resolve_texture; } if (!secondary_color_texture_) { // The second texture is allocated lazily to avoid unused allocations. TextureDescriptor new_descriptor = color0.resolve_texture->GetTextureDescriptor(); secondary_color_texture_ = allocator.CreateTexture(new_descriptor); if (!secondary_color_texture_) { return nullptr; } } // If the color0 resolve texture is the same as the texture, then we're // running on the GLES backend with implicit resolve. if (supports_implicit_msaa_) { auto new_secondary = color0.resolve_texture; color0.resolve_texture = secondary_color_texture_; color0.texture = secondary_color_texture_; secondary_color_texture_ = new_secondary; } else { std::swap(color0.resolve_texture, secondary_color_texture_); } target_.SetColorAttachment(color0, 0); // Return the previous backdrop texture, which is safe to read in the next // render pass that attaches `target_`. return secondary_color_texture_; } const RenderTarget& EntityPassTarget::GetRenderTarget() const { return target_; } bool EntityPassTarget::IsValid() const { return target_.IsValid(); } } // namespace impeller
engine/impeller/entity/entity_pass_target.cc/0
{ "file_path": "engine/impeller/entity/entity_pass_target.cc", "repo_id": "engine", "token_count": 919 }
187
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/testing/testing.h" #include "impeller/entity/geometry/geometry.h" #include "impeller/entity/geometry/stroke_path_geometry.h" #include "impeller/geometry/geometry_asserts.h" #include "impeller/geometry/path_builder.h" inline ::testing::AssertionResult SolidVerticesNear( std::vector<impeller::SolidFillVertexShader::PerVertexData> a, std::vector<impeller::SolidFillVertexShader::PerVertexData> b) { if (a.size() != b.size()) { return ::testing::AssertionFailure() << "Colors length does not match"; } for (auto i = 0u; i < b.size(); i++) { if (!PointNear(a[i].position, b[i].position)) { return ::testing::AssertionFailure() << "Positions are not equal."; } } return ::testing::AssertionSuccess(); } inline ::testing::AssertionResult TextureVerticesNear( std::vector<impeller::TextureFillVertexShader::PerVertexData> a, std::vector<impeller::TextureFillVertexShader::PerVertexData> b) { if (a.size() != b.size()) { return ::testing::AssertionFailure() << "Colors length does not match"; } for (auto i = 0u; i < b.size(); i++) { if (!PointNear(a[i].position, b[i].position)) { return ::testing::AssertionFailure() << "Positions are not equal."; } if (!PointNear(a[i].texture_coords, b[i].texture_coords)) { return ::testing::AssertionFailure() << "Texture coords are not equal."; } } return ::testing::AssertionSuccess(); } #define EXPECT_SOLID_VERTICES_NEAR(a, b) \ EXPECT_PRED2(&::SolidVerticesNear, a, b) #define EXPECT_TEXTURE_VERTICES_NEAR(a, b) \ EXPECT_PRED2(&::TextureVerticesNear, a, b) namespace impeller { class ImpellerEntityUnitTestAccessor { public: static std::vector<SolidFillVertexShader::PerVertexData> GenerateSolidStrokeVertices(const Path::Polyline& polyline, Scalar stroke_width, Scalar miter_limit, Join stroke_join, Cap stroke_cap, Scalar scale) { return StrokePathGeometry::GenerateSolidStrokeVertices( polyline, stroke_width, miter_limit, stroke_join, stroke_cap, scale); } static std::vector<TextureFillVertexShader::PerVertexData> GenerateSolidStrokeVerticesUV(const Path::Polyline& polyline, Scalar stroke_width, Scalar miter_limit, Join stroke_join, Cap stroke_cap, Scalar scale, Point texture_origin, Size texture_size, const Matrix& effect_transform) { return StrokePathGeometry::GenerateSolidStrokeVerticesUV( polyline, stroke_width, miter_limit, stroke_join, stroke_cap, scale, texture_origin, texture_size, effect_transform); } }; namespace testing { TEST(EntityGeometryTest, RectGeometryCoversArea) { auto geometry = Geometry::MakeRect(Rect::MakeLTRB(0, 0, 100, 100)); ASSERT_TRUE(geometry->CoversArea({}, Rect::MakeLTRB(0, 0, 100, 100))); ASSERT_FALSE(geometry->CoversArea({}, Rect::MakeLTRB(-1, 0, 100, 100))); ASSERT_TRUE(geometry->CoversArea({}, Rect::MakeLTRB(1, 1, 100, 100))); ASSERT_TRUE(geometry->CoversArea({}, Rect())); } TEST(EntityGeometryTest, FillPathGeometryCoversArea) { auto path = PathBuilder{}.AddRect(Rect::MakeLTRB(0, 0, 100, 100)).TakePath(); auto geometry = Geometry::MakeFillPath( path, /* inner rect */ Rect::MakeLTRB(0, 0, 100, 100)); ASSERT_TRUE(geometry->CoversArea({}, Rect::MakeLTRB(0, 0, 100, 100))); ASSERT_FALSE(geometry->CoversArea({}, Rect::MakeLTRB(-1, 0, 100, 100))); ASSERT_TRUE(geometry->CoversArea({}, Rect::MakeLTRB(1, 1, 100, 100))); ASSERT_TRUE(geometry->CoversArea({}, Rect())); } TEST(EntityGeometryTest, FillPathGeometryCoversAreaNoInnerRect) { auto path = PathBuilder{}.AddRect(Rect::MakeLTRB(0, 0, 100, 100)).TakePath(); auto geometry = Geometry::MakeFillPath(path); ASSERT_FALSE(geometry->CoversArea({}, Rect::MakeLTRB(0, 0, 100, 100))); ASSERT_FALSE(geometry->CoversArea({}, Rect::MakeLTRB(-1, 0, 100, 100))); ASSERT_FALSE(geometry->CoversArea({}, Rect::MakeLTRB(1, 1, 100, 100))); ASSERT_FALSE(geometry->CoversArea({}, Rect())); } TEST(EntityGeometryTest, LineGeometryCoverage) { { auto geometry = Geometry::MakeLine({10, 10}, {20, 10}, 2, Cap::kButt); EXPECT_EQ(geometry->GetCoverage({}), Rect::MakeLTRB(10, 9, 20, 11)); EXPECT_TRUE(geometry->CoversArea({}, Rect::MakeLTRB(10, 9, 20, 11))); } { auto geometry = Geometry::MakeLine({10, 10}, {20, 10}, 2, Cap::kSquare); EXPECT_EQ(geometry->GetCoverage({}), Rect::MakeLTRB(9, 9, 21, 11)); EXPECT_TRUE(geometry->CoversArea({}, Rect::MakeLTRB(9, 9, 21, 11))); } { auto geometry = Geometry::MakeLine({10, 10}, {10, 20}, 2, Cap::kButt); EXPECT_EQ(geometry->GetCoverage({}), Rect::MakeLTRB(9, 10, 11, 20)); EXPECT_TRUE(geometry->CoversArea({}, Rect::MakeLTRB(9, 10, 11, 20))); } { auto geometry = Geometry::MakeLine({10, 10}, {10, 20}, 2, Cap::kSquare); EXPECT_EQ(geometry->GetCoverage({}), Rect::MakeLTRB(9, 9, 11, 21)); EXPECT_TRUE(geometry->CoversArea({}, Rect::MakeLTRB(9, 9, 11, 21))); } } TEST(EntityGeometryTest, RoundRectGeometryCoversArea) { auto geometry = Geometry::MakeRoundRect(Rect::MakeLTRB(0, 0, 100, 100), Size(20, 20)); EXPECT_FALSE(geometry->CoversArea({}, Rect::MakeLTRB(15, 15, 85, 85))); EXPECT_TRUE(geometry->CoversArea({}, Rect::MakeLTRB(20, 20, 80, 80))); EXPECT_TRUE(geometry->CoversArea({}, Rect::MakeLTRB(30, 1, 70, 99))); EXPECT_TRUE(geometry->CoversArea({}, Rect::MakeLTRB(1, 30, 99, 70))); } TEST(EntityGeometryTest, StrokePathGeometryTransformOfLine) { auto path = PathBuilder().AddLine(Point(100, 100), Point(200, 100)).TakePath(); auto points = std::make_unique<std::vector<Point>>(); auto polyline = path.CreatePolyline(1.0f, std::move(points), [&points](Path::Polyline::PointBufferPtr reclaimed) { points = std::move(reclaimed); }); auto vertices = ImpellerEntityUnitTestAccessor::GenerateSolidStrokeVertices( polyline, 10.0f, 10.0f, Join::kBevel, Cap::kButt, 1.0); std::vector<SolidFillVertexShader::PerVertexData> expected = { {.position = Point(100.0f, 105.0f)}, // {.position = Point(100.0f, 95.0f)}, // {.position = Point(100.0f, 105.0f)}, // {.position = Point(100.0f, 95.0f)}, // {.position = Point(200.0f, 105.0f)}, // {.position = Point(200.0f, 95.0f)}, // {.position = Point(200.0f, 105.0f)}, // {.position = Point(200.0f, 95.0f)}, // }; EXPECT_SOLID_VERTICES_NEAR(vertices, expected); { auto uv_vertices = ImpellerEntityUnitTestAccessor::GenerateSolidStrokeVerticesUV( polyline, 10.0f, 10.0f, Join::kBevel, Cap::kButt, 1.0, // Point(50.0f, 40.0f), Size(20.0f, 40.0f), Matrix()); // uvx = (x - 50) / 20 // uvy = (y - 40) / 40 auto uv = [](const Point& p) { return Point((p.x - 50.0f) / 20.0f, // (p.y - 40.0f) / 40.0f); }; std::vector<TextureFillVertexShader::PerVertexData> uv_expected; for (size_t i = 0; i < expected.size(); i++) { auto p = expected[i].position; uv_expected.push_back({.position = p, .texture_coords = uv(p)}); } EXPECT_TEXTURE_VERTICES_NEAR(uv_vertices, uv_expected); } { auto uv_vertices = ImpellerEntityUnitTestAccessor::GenerateSolidStrokeVerticesUV( polyline, 10.0f, 10.0f, Join::kBevel, Cap::kButt, 1.0, // Point(50.0f, 40.0f), Size(20.0f, 40.0f), Matrix::MakeScale({8.0f, 4.0f, 1.0f})); // uvx = ((x * 8) - 50) / 20 // uvy = ((y * 4) - 40) / 40 auto uv = [](const Point& p) { return Point(((p.x * 8.0f) - 50.0f) / 20.0f, ((p.y * 4.0f) - 40.0f) / 40.0f); }; std::vector<TextureFillVertexShader::PerVertexData> uv_expected; for (size_t i = 0; i < expected.size(); i++) { auto p = expected[i].position; uv_expected.push_back({.position = p, .texture_coords = uv(p)}); } EXPECT_TEXTURE_VERTICES_NEAR(uv_vertices, uv_expected); } { auto uv_vertices = ImpellerEntityUnitTestAccessor::GenerateSolidStrokeVerticesUV( polyline, 10.0f, 10.0f, Join::kBevel, Cap::kButt, 1.0, // Point(50.0f, 40.0f), Size(20.0f, 40.0f), Matrix::MakeTranslation({8.0f, 4.0f})); // uvx = ((x + 8) - 50) / 20 // uvy = ((y + 4) - 40) / 40 auto uv = [](const Point& p) { return Point(((p.x + 8.0f) - 50.0f) / 20.0f, ((p.y + 4.0f) - 40.0f) / 40.0f); }; std::vector<TextureFillVertexShader::PerVertexData> uv_expected; for (size_t i = 0; i < expected.size(); i++) { auto p = expected[i].position; uv_expected.push_back({.position = p, .texture_coords = uv(p)}); } EXPECT_TEXTURE_VERTICES_NEAR(uv_vertices, uv_expected); } } TEST(EntityGeometryTest, GeometryResultHasReasonableDefaults) { GeometryResult result; EXPECT_EQ(result.type, PrimitiveType::kTriangleStrip); EXPECT_EQ(result.transform, Matrix()); EXPECT_EQ(result.mode, GeometryResult::Mode::kNormal); } } // namespace testing } // namespace impeller
engine/impeller/entity/geometry/geometry_unittests.cc/0
{ "file_path": "engine/impeller/entity/geometry/geometry_unittests.cc", "repo_id": "engine", "token_count": 4329 }
188
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_ENTITY_RENDER_TARGET_CACHE_H_ #define FLUTTER_IMPELLER_ENTITY_RENDER_TARGET_CACHE_H_ #include "impeller/renderer/render_target.h" namespace impeller { /// @brief An implementation of the [RenderTargetAllocator] that caches all /// allocated texture data for one frame. /// /// Any textures unused after a frame are immediately discarded. class RenderTargetCache : public RenderTargetAllocator { public: explicit RenderTargetCache(std::shared_ptr<Allocator> allocator); ~RenderTargetCache() = default; // |RenderTargetAllocator| void Start() override; // |RenderTargetAllocator| void End() override; RenderTarget CreateOffscreen( const Context& context, ISize size, int mip_count, const std::string& label = "Offscreen", RenderTarget::AttachmentConfig color_attachment_config = RenderTarget::kDefaultColorAttachmentConfig, std::optional<RenderTarget::AttachmentConfig> stencil_attachment_config = RenderTarget::kDefaultStencilAttachmentConfig, const std::shared_ptr<Texture>& existing_color_texture = nullptr, const std::shared_ptr<Texture>& existing_depth_stencil_texture = nullptr) override; RenderTarget CreateOffscreenMSAA( const Context& context, ISize size, int mip_count, const std::string& label = "Offscreen MSAA", RenderTarget::AttachmentConfigMSAA color_attachment_config = RenderTarget::kDefaultColorAttachmentConfigMSAA, std::optional<RenderTarget::AttachmentConfig> stencil_attachment_config = RenderTarget::kDefaultStencilAttachmentConfig, const std::shared_ptr<Texture>& existing_color_msaa_texture = nullptr, const std::shared_ptr<Texture>& existing_color_resolve_texture = nullptr, const std::shared_ptr<Texture>& existing_depth_stencil_texture = nullptr) override; // visible for testing. size_t CachedTextureCount() const; private: struct RenderTargetData { bool used_this_frame; RenderTargetConfig config; RenderTarget render_target; }; std::vector<RenderTargetData> render_target_data_; RenderTargetCache(const RenderTargetCache&) = delete; RenderTargetCache& operator=(const RenderTargetCache&) = delete; public: /// Visible for testing. std::vector<RenderTargetData>::const_iterator GetRenderTargetDataBegin() const { return render_target_data_.begin(); } /// Visible for testing. std::vector<RenderTargetData>::const_iterator GetRenderTargetDataEnd() const { return render_target_data_.end(); } }; } // namespace impeller #endif // FLUTTER_IMPELLER_ENTITY_RENDER_TARGET_CACHE_H_
engine/impeller/entity/render_target_cache.h/0
{ "file_path": "engine/impeller/entity/render_target_cache.h", "repo_id": "engine", "token_count": 972 }
189
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. uniform FragInfo { vec2 circle_positions[4]; vec4 colors[4]; } frag_info; in vec2 v_position; out vec4 frag_color; float SphereDistance(vec2 position, float radius) { return length(v_position - position) - radius; } void main() { for (int i = 0; i < 4; i++) { if (SphereDistance(frag_info.circle_positions[i].xy, 20) <= 0) { frag_color = frag_info.colors[i]; return; } } frag_color = vec4(0); }
engine/impeller/fixtures/array.frag/0
{ "file_path": "engine/impeller/fixtures/array.frag", "repo_id": "engine", "token_count": 222 }
190
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. uniform VertInfo { // 128 bytes (alignment = NPOT(largest member)) mat4 mvp; // offset 0 bytes, size 64 bytes vec4 color; // offset 64 bytes, size 16 bytes } vert_info; in vec2 position; out vec4 v_color; void main() { v_color = vert_info.color; gl_Position = vert_info.mvp * vec4(position, 0.0, 1.0); }
engine/impeller/fixtures/flutter_gpu_unlit.vert/0
{ "file_path": "engine/impeller/fixtures/flutter_gpu_unlit.vert", "repo_id": "engine", "token_count": 173 }
191
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. uniform FrameInfo { mat4 mvp; vec2 atlas_size; vec4 text_color; } frame_info; in vec2 unit_vertex; in mat4 glyph_position; // <--- Causes multiple slots to be used and is a failure. in vec2 destination_size; in vec2 source_position; in vec2 source_glyph_size; out vec2 v_unit_vertex; out vec2 v_source_position; out vec2 v_source_glyph_size; out vec2 v_atlas_size; out vec4 v_text_color; void main() { gl_Position = frame_info.mvp * glyph_position * vec4(unit_vertex.x * destination_size.x, unit_vertex.y * destination_size.y, 0.0, 1.0); v_unit_vertex = unit_vertex; v_source_position = source_position; v_source_glyph_size = source_glyph_size; v_atlas_size = frame_info.atlas_size; v_text_color = frame_info.text_color; }
engine/impeller/fixtures/struct_def_bug.vert/0
{ "file_path": "engine/impeller/fixtures/struct_def_bug.vert", "repo_id": "engine", "token_count": 376 }
192
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_GEOMETRY_MATRIX_DECOMPOSITION_H_ #define FLUTTER_IMPELLER_GEOMETRY_MATRIX_DECOMPOSITION_H_ #include "impeller/geometry/quaternion.h" #include "impeller/geometry/scalar.h" #include "impeller/geometry/shear.h" #include "impeller/geometry/vector.h" namespace impeller { struct MatrixDecomposition { Vector3 translation; Vector3 scale; Shear shear; Vector4 perspective; Quaternion rotation; enum class Component { kTranslation = 1 << 0, kScale = 1 << 1, kShear = 1 << 2, kPerspective = 1 << 3, kRotation = 1 << 4, }; uint64_t GetComponentsMask() const; }; } // namespace impeller #endif // FLUTTER_IMPELLER_GEOMETRY_MATRIX_DECOMPOSITION_H_
engine/impeller/geometry/matrix_decomposition.h/0
{ "file_path": "engine/impeller/geometry/matrix_decomposition.h", "repo_id": "engine", "token_count": 335 }
193
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_GEOMETRY_SATURATED_MATH_H_ #define FLUTTER_IMPELLER_GEOMETRY_SATURATED_MATH_H_ #include <algorithm> #include <limits> #include <type_traits> #include "flutter/fml/logging.h" #include "impeller/geometry/scalar.h" namespace impeller { namespace saturated { // NOLINTBEGIN(readability-identifier-naming) template <typename T> inline constexpr bool is_signed_integral_v = std::is_integral_v<T> && std::is_signed_v<T>; // NOLINTEND(readability-identifier-naming) #define ONLY_ON_SIGNED_INT_RET(Type, Ret) \ template <typename Type> \ constexpr inline std::enable_if_t<is_signed_integral_v<Type>, Ret> #define ONLY_ON_SIGNED_INT(Type) ONLY_ON_SIGNED_INT_RET(Type, Type) #define ONLY_ON_FLOAT_RET(Type, Ret) \ template <typename Type> \ constexpr inline std::enable_if_t<std::is_floating_point_v<Type>, Ret> #define ONLY_ON_FLOAT(Type) ONLY_ON_FLOAT_RET(Type, Type) #define ONLY_ON_FLOAT_TO_SIGNED_INT_RET(FPType, SIType, Ret) \ template <typename FPType, typename SIType> \ constexpr inline std::enable_if_t< \ std::is_floating_point_v<FPType> && is_signed_integral_v<SIType>, Ret> #define ONLY_ON_FLOAT_TO_SIGNED_INT(FPType, SIType) \ ONLY_ON_FLOAT_TO_SIGNED_INT_RET(FPType, SIType, SIType) #define ONLY_ON_DIFFERING_FLOAT_RET(FPType1, FPType2, Ret) \ template <typename FPType1, typename FPType2> \ constexpr inline std::enable_if_t<std::is_floating_point_v<FPType1> && \ std::is_floating_point_v<FPType2> && \ !std::is_same_v<FPType1, FPType2>, \ Ret> #define ONLY_ON_DIFFERING_FLOAT(FPType1, FPType2) \ ONLY_ON_DIFFERING_FLOAT_RET(FPType1, FPType2, FPType2) #define ONLY_ON_SAME_TYPES_RET(Type1, Type2, Ret) \ template <typename Type1, typename Type2> \ constexpr inline std::enable_if_t<std::is_same_v<Type1, Type2>, Ret> #define ONLY_ON_SAME_TYPES(Type1, Type2) \ ONLY_ON_SAME_TYPES_RET(Type1, Type2, Type2) ONLY_ON_SIGNED_INT(SI) Add(SI location, SI distance) { if (location >= 0) { if (distance > std::numeric_limits<SI>::max() - location) { return std::numeric_limits<SI>::max(); } } else if (distance < std::numeric_limits<SI>::min() - location) { return std::numeric_limits<SI>::min(); } return location + distance; } ONLY_ON_FLOAT(FP) Add(FP location, FP distance) { return location + distance; } ONLY_ON_SIGNED_INT(SI) Sub(SI upper, SI lower) { if (upper >= 0) { if (lower < 0 && upper > std::numeric_limits<SI>::max() + lower) { return std::numeric_limits<SI>::max(); } } else if (lower > 0 && upper < std::numeric_limits<SI>::min() + lower) { return std::numeric_limits<SI>::min(); } return upper - lower; } ONLY_ON_FLOAT(FP) Sub(FP upper, FP lower) { return upper - lower; } ONLY_ON_SIGNED_INT_RET(SI, Scalar) AverageScalar(SI a, SI b) { // scalbn has an implementation for ints that converts to double // while adjusting the exponent. return static_cast<Scalar>(std::scalbn(a, -1) + std::scalbn(b, -1)); } ONLY_ON_FLOAT(FP) AverageScalar(FP a, FP b) { // GetCenter might want this to return 0 for a Maximum Rect, but it // will currently produce NaN instead. For the Maximum Rect itself // a 0 would make sense as the center, but for a computed rect that // incidentally ended up with infinities, NaN may be a better choice. // return static_cast<Scalar>(std::scalbn(a, -1) + std::scalbn(b, -1)); // This equation would save an extra scalbn operation but at the cost // of having very large (or very neagive) a's and b's overflow to // +/- infinity. Scaling first allows finite numbers to be more likely // to have a finite average. // return std::scalbn(a + b, -1); return static_cast<Scalar>(std::scalbn(a, -1) + std::scalbn(b, -1)); } ONLY_ON_SAME_TYPES(T, U) Cast(T v) { return v; } ONLY_ON_FLOAT_TO_SIGNED_INT(FP, SI) Cast(FP v) { if (v <= static_cast<FP>(std::numeric_limits<SI>::min())) { return std::numeric_limits<SI>::min(); } else if (v >= static_cast<FP>(std::numeric_limits<SI>::max())) { return std::numeric_limits<SI>::max(); } return static_cast<SI>(v); } ONLY_ON_DIFFERING_FLOAT(FP1, FP2) Cast(FP1 v) { if (std::isfinite(v)) { // Avoid truncation to inf/-inf. return std::clamp(static_cast<FP2>(v), // std::numeric_limits<FP2>::lowest(), std::numeric_limits<FP2>::max()); } else { return static_cast<FP2>(v); } } #undef ONLY_ON_SAME_TYPES #undef ONLY_ON_SAME_TYPES_RET #undef ONLY_ON_DIFFERING_FLOAT #undef ONLY_ON_DIFFERING_FLOAT_RET #undef ONLY_ON_FLOAT_TO_SIGNED_INT #undef ONLY_ON_FLOAT_TO_SIGNED_INT_RET #undef ONLY_ON_FLOAT #undef ONLY_ON_FLOAT_RET #undef ONLY_ON_SIGNED_INT #undef ONLY_ON_SIGNED_INT_RET } // namespace saturated } // namespace impeller #endif // FLUTTER_IMPELLER_GEOMETRY_SATURATED_MATH_H_
engine/impeller/geometry/saturated_math.h/0
{ "file_path": "engine/impeller/geometry/saturated_math.h", "repo_id": "engine", "token_count": 2299 }
194
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_GEOMETRY_VECTOR_H_ #define FLUTTER_IMPELLER_GEOMETRY_VECTOR_H_ #include <cmath> #include <string> #include "impeller/geometry/color.h" #include "impeller/geometry/point.h" #include "impeller/geometry/scalar.h" #include "impeller/geometry/size.h" namespace impeller { // NOLINTBEGIN(google-explicit-constructor) struct Vector3 { union { struct { Scalar x = 0.0f; Scalar y = 0.0f; Scalar z = 0.0f; }; Scalar e[3]; }; constexpr Vector3(){}; constexpr Vector3(const Color& c) : x(c.red), y(c.green), z(c.blue) {} constexpr Vector3(const Point& p) : x(p.x), y(p.y) {} constexpr Vector3(const Size& s) : x(s.width), y(s.height) {} constexpr Vector3(Scalar x, Scalar y) : x(x), y(y) {} constexpr Vector3(Scalar x, Scalar y, Scalar z) : x(x), y(y), z(z) {} /** * The length (or magnitude of the vector). * * @return the calculated length. */ constexpr Scalar Length() const { return sqrt(x * x + y * y + z * z); } constexpr Vector3 Normalize() const { const auto len = Length(); return {x / len, y / len, z / len}; } constexpr Scalar Dot(const Vector3& other) const { return ((x * other.x) + (y * other.y) + (z * other.z)); } constexpr Vector3 Abs() const { return {std::fabs(x), std::fabs(y), std::fabs(z)}; } constexpr Vector3 Cross(const Vector3& other) const { return { (y * other.z) - (z * other.y), // (z * other.x) - (x * other.z), // (x * other.y) - (y * other.x) // }; } constexpr Vector3 Min(const Vector3& p) const { return {std::min(x, p.x), std::min(y, p.y), std::min(z, p.z)}; } constexpr Vector3 Max(const Vector3& p) const { return {std::max(x, p.x), std::max(y, p.y), std::max(z, p.z)}; } constexpr Vector3 Floor() const { return {std::floor(x), std::floor(y), std::floor(z)}; } constexpr Vector3 Ceil() const { return {std::ceil(x), std::ceil(y), std::ceil(z)}; } constexpr Vector3 Round() const { return {std::round(x), std::round(y), std::round(z)}; } constexpr bool operator==(const Vector3& v) const { return v.x == x && v.y == y && v.z == z; } constexpr bool operator!=(const Vector3& v) const { return v.x != x || v.y != y || v.z != z; } constexpr Vector3 operator+=(const Vector3& p) { x += p.x; y += p.y; z += p.z; return *this; } constexpr Vector3 operator-=(const Vector3& p) { x -= p.x; y -= p.y; z -= p.z; return *this; } constexpr Vector3 operator*=(const Vector3& p) { x *= p.x; y *= p.y; z *= p.z; return *this; } template <class U, class = std::enable_if_t<std::is_arithmetic_v<U>>> constexpr Vector3 operator*=(U scale) { x *= scale; y *= scale; z *= scale; return *this; } constexpr Vector3 operator/=(const Vector3& p) { x /= p.x; y /= p.y; z /= p.z; return *this; } template <class U, class = std::enable_if_t<std::is_arithmetic_v<U>>> constexpr Vector3 operator/=(U scale) { x /= scale; y /= scale; z /= scale; return *this; } constexpr Vector3 operator-() const { return Vector3(-x, -y, -z); } constexpr Vector3 operator+(const Vector3& v) const { return Vector3(x + v.x, y + v.y, z + v.z); } constexpr Vector3 operator-(const Vector3& v) const { return Vector3(x - v.x, y - v.y, z - v.z); } constexpr Vector3 operator+(Scalar s) const { return Vector3(x + s, y + s, z + s); } constexpr Vector3 operator-(Scalar s) const { return Vector3(x - s, y - s, z - s); } constexpr Vector3 operator*(const Vector3& v) const { return Vector3(x * v.x, y * v.y, z * v.z); } template <class U, class = std::enable_if_t<std::is_arithmetic_v<U>>> constexpr Vector3 operator*(U scale) const { return Vector3(x * scale, y * scale, z * scale); } constexpr Vector3 operator/(const Vector3& v) const { return Vector3(x / v.x, y / v.y, z / v.z); } template <class U, class = std::enable_if_t<std::is_arithmetic_v<U>>> constexpr Vector3 operator/(U scale) const { return Vector3(x / scale, y / scale, z / scale); } constexpr Vector3 Lerp(const Vector3& v, Scalar t) const { return *this + (v - *this) * t; } /** * Make a linear combination of two vectors and return the result. * * @param a the first vector. * @param aScale the scale to use for the first vector. * @param b the second vector. * @param bScale the scale to use for the second vector. * * @return the combined vector. */ static constexpr Vector3 Combine(const Vector3& a, Scalar aScale, const Vector3& b, Scalar bScale) { return { aScale * a.x + bScale * b.x, // aScale * a.y + bScale * b.y, // aScale * a.z + bScale * b.z, // }; } std::string ToString() const; }; // RHS algebraic operations with arithmetic types. template <class U, class = std::enable_if_t<std::is_arithmetic_v<U>>> constexpr Vector3 operator*(U s, const Vector3& p) { return p * s; } template <class U, class = std::enable_if_t<std::is_arithmetic_v<U>>> constexpr Vector3 operator+(U s, const Vector3& p) { return p + s; } template <class U, class = std::enable_if_t<std::is_arithmetic_v<U>>> constexpr Vector3 operator-(U s, const Vector3& p) { return -p + s; } template <class U, class = std::enable_if_t<std::is_arithmetic_v<U>>> constexpr Vector3 operator/(U s, const Vector3& p) { return { static_cast<Scalar>(s) / p.x, static_cast<Scalar>(s) / p.y, static_cast<Scalar>(s) / p.z, }; } struct Vector4 { union { struct { Scalar x = 0.0f; Scalar y = 0.0f; Scalar z = 0.0f; Scalar w = 1.0f; }; Scalar e[4]; }; constexpr Vector4() {} constexpr Vector4(const Color& c) : x(c.red), y(c.green), z(c.blue), w(c.alpha) {} constexpr Vector4(Scalar x, Scalar y, Scalar z, Scalar w) : x(x), y(y), z(z), w(w) {} constexpr Vector4(const Vector3& v) : x(v.x), y(v.y), z(v.z) {} constexpr Vector4(const Point& p) : x(p.x), y(p.y) {} constexpr Vector4(std::array<Scalar, 4> values) : x(values[0]), y(values[1]), z(values[2]), w(values[3]) {} Vector4 Normalize() const { const Scalar inverse = 1.0f / sqrt(x * x + y * y + z * z + w * w); return Vector4(x * inverse, y * inverse, z * inverse, w * inverse); } constexpr bool operator==(const Vector4& v) const { return (x == v.x) && (y == v.y) && (z == v.z) && (w == v.w); } constexpr bool operator!=(const Vector4& v) const { return (x != v.x) || (y != v.y) || (z != v.z) || (w != v.w); } constexpr Vector4 operator+(const Vector4& v) const { return Vector4(x + v.x, y + v.y, z + v.z, w + v.w); } constexpr Vector4 operator-(const Vector4& v) const { return Vector4(x - v.x, y - v.y, z - v.z, w - v.w); } constexpr Vector4 operator*(Scalar f) const { return Vector4(x * f, y * f, z * f, w * f); } constexpr Vector4 operator*(const Vector4& v) const { return Vector4(x * v.x, y * v.y, z * v.z, w * v.w); } constexpr Vector4 Min(const Vector4& p) const { return {std::min(x, p.x), std::min(y, p.y), std::min(z, p.z), std::min(w, p.w)}; } constexpr Vector4 Max(const Vector4& p) const { return {std::max(x, p.x), std::max(y, p.y), std::max(z, p.z), std::max(w, p.w)}; } constexpr Vector4 Floor() const { return {std::floor(x), std::floor(y), std::floor(z), std::floor(w)}; } constexpr Vector4 Ceil() const { return {std::ceil(x), std::ceil(y), std::ceil(z), std::ceil(w)}; } constexpr Vector4 Round() const { return {std::round(x), std::round(y), std::round(z), std::round(w)}; } constexpr Vector4 Lerp(const Vector4& v, Scalar t) const { return *this + (v - *this) * t; } std::string ToString() const; }; static_assert(sizeof(Vector3) == 3 * sizeof(Scalar)); static_assert(sizeof(Vector4) == 4 * sizeof(Scalar)); } // namespace impeller namespace std { inline std::ostream& operator<<(std::ostream& out, const impeller::Vector3& p) { out << "(" << p.x << ", " << p.y << ", " << p.z << ")"; return out; } inline std::ostream& operator<<(std::ostream& out, const impeller::Vector4& p) { out << "(" << p.x << ", " << p.y << ", " << p.z << ", " << p.w << ")"; return out; } // NOLINTEND(google-explicit-constructor) } // namespace std #endif // FLUTTER_IMPELLER_GEOMETRY_VECTOR_H_
engine/impeller/geometry/vector.h/0
{ "file_path": "engine/impeller/geometry/vector.h", "repo_id": "engine", "token_count": 3782 }
195
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_GOLDEN_TESTS_VULKAN_SCREENSHOTTER_H_ #define FLUTTER_IMPELLER_GOLDEN_TESTS_VULKAN_SCREENSHOTTER_H_ #include "flutter/fml/macros.h" #include "flutter/impeller/aiks/picture.h" #include "flutter/impeller/golden_tests/metal_screenshot.h" #include "flutter/impeller/golden_tests/screenshotter.h" #include "flutter/impeller/playground/playground_impl.h" namespace impeller { namespace testing { /// Converts `Picture`s and `DisplayList`s to `MetalScreenshot`s with the /// playground backend. class VulkanScreenshotter : public Screenshotter { public: explicit VulkanScreenshotter( const std::unique_ptr<PlaygroundImpl>& playground); std::unique_ptr<Screenshot> MakeScreenshot( AiksContext& aiks_context, const Picture& picture, const ISize& size = {300, 300}, bool scale_content = true) override; PlaygroundImpl& GetPlayground() override { return *playground_; } private: const std::unique_ptr<PlaygroundImpl>& playground_; }; } // namespace testing } // namespace impeller #endif // FLUTTER_IMPELLER_GOLDEN_TESTS_VULKAN_SCREENSHOTTER_H_
engine/impeller/golden_tests/vulkan_screenshotter.h/0
{ "file_path": "engine/impeller/golden_tests/vulkan_screenshotter.h", "repo_id": "engine", "token_count": 456 }
196
# Copyright 2013 The Flutter Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import("//flutter/impeller/tools/impeller.gni") impeller_component("image") { testonly = true public = [ "compressed_image.h", "decompressed_image.h", ] sources = [ "compressed_image.cc", "decompressed_image.cc", ] public_deps = [ "../../base", "../../geometry", ] deps = [ "//flutter/fml" ] } impeller_component("image_skia_backend") { testonly = true public = [ "backends/skia/compressed_image_skia.h" ] sources = [ "backends/skia/compressed_image_skia.cc" ] public_deps = [ ":image", "../../base", "../../geometry", ] deps = [ "//flutter/fml", "//flutter/skia", ] }
engine/impeller/playground/image/BUILD.gn/0
{ "file_path": "engine/impeller/playground/image/BUILD.gn", "repo_id": "engine", "token_count": 332 }
197
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_PLAYGROUND_PLAYGROUND_IMPL_H_ #define FLUTTER_IMPELLER_PLAYGROUND_PLAYGROUND_IMPL_H_ #include <memory> #include "flutter/fml/macros.h" #include "impeller/playground/playground.h" #include "impeller/playground/switches.h" #include "impeller/renderer/context.h" #include "impeller/renderer/surface.h" namespace impeller { class PlaygroundImpl { public: static std::unique_ptr<PlaygroundImpl> Create(PlaygroundBackend backend, PlaygroundSwitches switches); virtual ~PlaygroundImpl(); using WindowHandle = void*; virtual WindowHandle GetWindowHandle() const = 0; virtual std::shared_ptr<Context> GetContext() const = 0; virtual std::unique_ptr<Surface> AcquireSurfaceFrame( std::shared_ptr<Context> context) = 0; Vector2 GetContentScale() const; virtual fml::Status SetCapabilities( const std::shared_ptr<Capabilities>& capabilities) = 0; protected: const PlaygroundSwitches switches_; explicit PlaygroundImpl(PlaygroundSwitches switches); private: PlaygroundImpl(const PlaygroundImpl&) = delete; PlaygroundImpl& operator=(const PlaygroundImpl&) = delete; }; } // namespace impeller #endif // FLUTTER_IMPELLER_PLAYGROUND_PLAYGROUND_IMPL_H_
engine/impeller/playground/playground_impl.h/0
{ "file_path": "engine/impeller/playground/playground_impl.h", "repo_id": "engine", "token_count": 491 }
198
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "impeller/renderer/backend/gles/buffer_bindings_gles.h" #include <cstring> #include <vector> #include "impeller/base/validation.h" #include "impeller/core/shader_types.h" #include "impeller/renderer/backend/gles/device_buffer_gles.h" #include "impeller/renderer/backend/gles/formats_gles.h" #include "impeller/renderer/backend/gles/sampler_gles.h" #include "impeller/renderer/backend/gles/texture_gles.h" namespace impeller { // This prefix is used in the names of inputs generated by ANGLE's framebuffer // fetch emulation. static constexpr std::string_view kAngleInputAttachmentPrefix = "ANGLEInputAttachment"; BufferBindingsGLES::BufferBindingsGLES() = default; BufferBindingsGLES::~BufferBindingsGLES() = default; bool BufferBindingsGLES::RegisterVertexStageInput( const ProcTableGLES& gl, const std::vector<ShaderStageIOSlot>& p_inputs, const std::vector<ShaderStageBufferLayout>& layouts) { std::vector<VertexAttribPointer> vertex_attrib_arrays; for (auto i = 0u; i < p_inputs.size(); i++) { const auto& input = p_inputs[i]; const auto& layout = layouts[input.binding]; VertexAttribPointer attrib; attrib.index = input.location; // Component counts must be 1, 2, 3 or 4. Do that validation now. if (input.vec_size < 1u || input.vec_size > 4u) { return false; } attrib.size = input.vec_size; auto type = ToVertexAttribType(input.type); if (!type.has_value()) { return false; } attrib.type = type.value(); attrib.normalized = GL_FALSE; attrib.offset = input.offset; attrib.stride = layout.stride; vertex_attrib_arrays.emplace_back(attrib); } vertex_attrib_arrays_ = std::move(vertex_attrib_arrays); return true; } static std::string NormalizeUniformKey(const std::string& key) { std::string result; result.reserve(key.length()); for (char ch : key) { if (ch != '_') { result.push_back(toupper(ch)); } } return result; } static std::string CreateUniformMemberKey(const std::string& struct_name, const std::string& member, bool is_array) { std::string result; result.reserve(struct_name.length() + member.length() + (is_array ? 4 : 1)); result += struct_name; if (!member.empty()) { result += '.'; result += member; } if (is_array) { result += "[0]"; } return NormalizeUniformKey(result); } static std::string CreateUniformMemberKey( const std::string& non_struct_member) { return NormalizeUniformKey(non_struct_member); } bool BufferBindingsGLES::ReadUniformsBindings(const ProcTableGLES& gl, GLuint program) { if (!gl.IsProgram(program)) { return false; } GLint max_name_size = 0; gl.GetProgramiv(program, GL_ACTIVE_UNIFORM_MAX_LENGTH, &max_name_size); GLint uniform_count = 0; gl.GetProgramiv(program, GL_ACTIVE_UNIFORMS, &uniform_count); // Query the Program for all active uniform locations, and // record this via normalized key. for (GLint i = 0; i < uniform_count; i++) { std::vector<GLchar> name; name.resize(max_name_size); GLsizei written_count = 0u; GLint uniform_var_size = 0u; GLenum uniform_type = GL_FLOAT; // Note: Active uniforms are defined as uniforms that may have an impact on // the output of the shader. Drivers are allowed to (and often do) // optimize out unused uniforms. gl.GetActiveUniform(program, // program i, // index max_name_size, // buffer_size &written_count, // length &uniform_var_size, // size &uniform_type, // type name.data() // name ); // Skip unrecognized variables generated by ANGLE. if (gl.GetCapabilities()->IsANGLE()) { if (written_count >= static_cast<GLsizei>(kAngleInputAttachmentPrefix.length()) && std::string_view(name.data(), kAngleInputAttachmentPrefix.length()) == kAngleInputAttachmentPrefix) { continue; } } auto location = gl.GetUniformLocation(program, name.data()); if (location == -1) { VALIDATION_LOG << "Could not query the location of an active uniform."; return false; } if (written_count <= 0) { VALIDATION_LOG << "Uniform name could not be read for active uniform."; return false; } uniform_locations_[NormalizeUniformKey(std::string{ name.data(), static_cast<size_t>(written_count)})] = location; } return true; } bool BufferBindingsGLES::BindVertexAttributes(const ProcTableGLES& gl, size_t vertex_offset) const { for (const auto& array : vertex_attrib_arrays_) { gl.EnableVertexAttribArray(array.index); gl.VertexAttribPointer(array.index, // index array.size, // size (must be 1, 2, 3, or 4) array.type, // type array.normalized, // normalized array.stride, // stride reinterpret_cast<const GLvoid*>(static_cast<GLsizei>( vertex_offset + array.offset)) // pointer ); } return true; } bool BufferBindingsGLES::BindUniformData(const ProcTableGLES& gl, Allocator& transients_allocator, const Bindings& vertex_bindings, const Bindings& fragment_bindings) { for (const auto& buffer : vertex_bindings.buffers) { if (!BindUniformBuffer(gl, transients_allocator, buffer.view)) { return false; } } for (const auto& buffer : fragment_bindings.buffers) { if (!BindUniformBuffer(gl, transients_allocator, buffer.view)) { return false; } } std::optional<size_t> next_unit_index = BindTextures(gl, vertex_bindings, ShaderStage::kVertex); if (!next_unit_index.has_value()) { return false; } if (!BindTextures(gl, fragment_bindings, ShaderStage::kFragment, *next_unit_index) .has_value()) { return false; } return true; } bool BufferBindingsGLES::UnbindVertexAttributes(const ProcTableGLES& gl) const { for (const auto& array : vertex_attrib_arrays_) { gl.DisableVertexAttribArray(array.index); } return true; } GLint BufferBindingsGLES::ComputeTextureLocation( const ShaderMetadata* metadata) { auto location = binding_map_.find(metadata->name); if (location != binding_map_.end()) { return location->second[0]; } auto& locations = binding_map_[metadata->name] = {}; auto computed_location = uniform_locations_.find(CreateUniformMemberKey(metadata->name)); if (computed_location == uniform_locations_.end()) { locations.push_back(-1); } else { locations.push_back(computed_location->second); } return locations[0]; } const std::vector<GLint>& BufferBindingsGLES::ComputeUniformLocations( const ShaderMetadata* metadata) { auto location = binding_map_.find(metadata->name); if (location != binding_map_.end()) { return location->second; } // For each metadata member, look up the binding location and record // it in the binding map. auto& locations = binding_map_[metadata->name] = {}; for (const auto& member : metadata->members) { if (member.type == ShaderType::kVoid) { // Void types are used for padding. We are obviously not going to find // mappings for these. Keep going. locations.push_back(-1); continue; } size_t element_count = member.array_elements.value_or(1); const auto member_key = CreateUniformMemberKey(metadata->name, member.name, element_count > 1); const auto computed_location = uniform_locations_.find(member_key); if (computed_location == uniform_locations_.end()) { // Uniform was not active. locations.push_back(-1); continue; } locations.push_back(computed_location->second); } return locations; } bool BufferBindingsGLES::BindUniformBuffer(const ProcTableGLES& gl, Allocator& transients_allocator, const BufferResource& buffer) { const auto* metadata = buffer.GetMetadata(); auto device_buffer = buffer.resource.buffer; if (!device_buffer) { VALIDATION_LOG << "Device buffer not found."; return false; } const auto& device_buffer_gles = DeviceBufferGLES::Cast(*device_buffer); const uint8_t* buffer_ptr = device_buffer_gles.GetBufferData() + buffer.resource.range.offset; if (metadata->members.empty()) { VALIDATION_LOG << "Uniform buffer had no members. This is currently " "unsupported in the OpenGL ES backend. Use a uniform " "buffer block."; return false; } const auto& locations = ComputeUniformLocations(metadata); for (auto i = 0u; i < metadata->members.size(); i++) { const auto& member = metadata->members[i]; auto location = locations[i]; // Void type or inactive uniform. if (location == -1 || member.type == ShaderType::kVoid) { continue; } size_t element_count = member.array_elements.value_or(1); size_t element_stride = member.byte_length / element_count; auto* buffer_data = reinterpret_cast<const GLfloat*>(buffer_ptr + member.offset); std::vector<uint8_t> array_element_buffer; if (element_count > 1) { // When binding uniform arrays, the elements must be contiguous. Copy // the uniforms to a temp buffer to eliminate any padding needed by the // other backends. array_element_buffer.resize(member.size * element_count); for (size_t element_i = 0; element_i < element_count; element_i++) { std::memcpy(array_element_buffer.data() + element_i * member.size, reinterpret_cast<const char*>(buffer_data) + element_i * element_stride, member.size); } buffer_data = reinterpret_cast<const GLfloat*>(array_element_buffer.data()); } switch (member.type) { case ShaderType::kFloat: switch (member.size) { case sizeof(Matrix): gl.UniformMatrix4fv(location, // location element_count, // count GL_FALSE, // normalize buffer_data // data ); continue; case sizeof(Vector4): gl.Uniform4fv(location, // location element_count, // count buffer_data // data ); continue; case sizeof(Vector3): gl.Uniform3fv(location, // location element_count, // count buffer_data // data ); continue; case sizeof(Vector2): gl.Uniform2fv(location, // location element_count, // count buffer_data // data ); continue; case sizeof(Scalar): gl.Uniform1fv(location, // location element_count, // count buffer_data // data ); continue; } VALIDATION_LOG << "Size " << member.size << " could not be mapped ShaderType::kFloat for key: " << member.name; case ShaderType::kBoolean: case ShaderType::kSignedByte: case ShaderType::kUnsignedByte: case ShaderType::kSignedShort: case ShaderType::kUnsignedShort: case ShaderType::kSignedInt: case ShaderType::kUnsignedInt: case ShaderType::kSignedInt64: case ShaderType::kUnsignedInt64: case ShaderType::kAtomicCounter: case ShaderType::kUnknown: case ShaderType::kVoid: case ShaderType::kHalfFloat: case ShaderType::kDouble: case ShaderType::kStruct: case ShaderType::kImage: case ShaderType::kSampledImage: case ShaderType::kSampler: VALIDATION_LOG << "Could not bind uniform buffer data for key: " << member.name << " : " << static_cast<int>(member.type); return false; } } return true; } std::optional<size_t> BufferBindingsGLES::BindTextures( const ProcTableGLES& gl, const Bindings& bindings, ShaderStage stage, size_t unit_start_index) { size_t active_index = unit_start_index; for (const auto& data : bindings.sampled_images) { const auto& texture_gles = TextureGLES::Cast(*data.texture.resource); if (data.texture.GetMetadata() == nullptr) { VALIDATION_LOG << "No metadata found for texture binding."; return std::nullopt; } auto location = ComputeTextureLocation(data.texture.GetMetadata()); if (location == -1) { return std::nullopt; } //-------------------------------------------------------------------------- /// Set the active texture unit. /// if (active_index >= gl.GetCapabilities()->GetMaxTextureUnits(stage)) { VALIDATION_LOG << "Texture units specified exceed the capabilities for " "this shader stage."; return std::nullopt; } gl.ActiveTexture(GL_TEXTURE0 + active_index); //-------------------------------------------------------------------------- /// Bind the texture. /// if (!texture_gles.Bind()) { return std::nullopt; } //-------------------------------------------------------------------------- /// If there is a sampler for the texture at the same index, configure the /// bound texture using that sampler. /// const auto& sampler_gles = SamplerGLES::Cast(*data.sampler); if (!sampler_gles.ConfigureBoundTexture(texture_gles, gl)) { return std::nullopt; } //-------------------------------------------------------------------------- /// Set the texture uniform location. /// gl.Uniform1i(location, active_index); //-------------------------------------------------------------------------- /// Bump up the active index at binding. /// active_index++; } return active_index; } } // namespace impeller
engine/impeller/renderer/backend/gles/buffer_bindings_gles.cc/0
{ "file_path": "engine/impeller/renderer/backend/gles/buffer_bindings_gles.cc", "repo_id": "engine", "token_count": 6246 }
199
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_RENDERER_BACKEND_GLES_GPU_TRACER_GLES_H_ #define FLUTTER_IMPELLER_RENDERER_BACKEND_GLES_GPU_TRACER_GLES_H_ #include <cstdint> #include <deque> #include <thread> #include "impeller/renderer/backend/gles/proc_table_gles.h" namespace impeller { /// @brief Trace GPU execution times using GL_EXT_disjoint_timer_query on GLES. /// /// Note: there are a substantial number of GPUs where usage of the this API is /// known to cause crashes. As a result, this functionality is disabled by /// default and can only be enabled in debug/profile mode via a specific opt-in /// flag that is exposed in the Android manifest. /// /// To enable, add the following metadata to the application's Android manifest: /// <meta-data /// android:name="io.flutter.embedding.android.EnableOpenGLGPUTracing" /// android:value="false" /> class GPUTracerGLES { public: GPUTracerGLES(const ProcTableGLES& gl, bool enable_tracing); ~GPUTracerGLES() = default; /// @brief Record the thread id of the raster thread. void RecordRasterThread(); /// @brief Record the start of a frame workload, if one hasn't already been /// started. void MarkFrameStart(const ProcTableGLES& gl); /// @brief Record the end of a frame workload. void MarkFrameEnd(const ProcTableGLES& gl); private: void ProcessQueries(const ProcTableGLES& gl); std::deque<uint32_t> pending_traces_; std::optional<uint32_t> active_frame_ = std::nullopt; std::thread::id raster_thread_; bool enabled_ = false; }; } // namespace impeller #endif // FLUTTER_IMPELLER_RENDERER_BACKEND_GLES_GPU_TRACER_GLES_H_
engine/impeller/renderer/backend/gles/gpu_tracer_gles.h/0
{ "file_path": "engine/impeller/renderer/backend/gles/gpu_tracer_gles.h", "repo_id": "engine", "token_count": 600 }
200
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_RENDERER_BACKEND_GLES_SAMPLER_LIBRARY_GLES_H_ #define FLUTTER_IMPELLER_RENDERER_BACKEND_GLES_SAMPLER_LIBRARY_GLES_H_ #include "impeller/core/sampler.h" #include "impeller/core/sampler_descriptor.h" #include "impeller/renderer/sampler_library.h" namespace impeller { class SamplerLibraryGLES final : public SamplerLibrary { public: explicit SamplerLibraryGLES(bool supports_decal_sampler_address_mode); // |SamplerLibrary| ~SamplerLibraryGLES() override; private: friend class ContextGLES; SamplerMap samplers_; SamplerLibraryGLES(); // |SamplerLibrary| const std::unique_ptr<const Sampler>& GetSampler( SamplerDescriptor descriptor) override; bool supports_decal_sampler_address_mode_ = false; SamplerLibraryGLES(const SamplerLibraryGLES&) = delete; SamplerLibraryGLES& operator=(const SamplerLibraryGLES&) = delete; }; } // namespace impeller #endif // FLUTTER_IMPELLER_RENDERER_BACKEND_GLES_SAMPLER_LIBRARY_GLES_H_
engine/impeller/renderer/backend/gles/sampler_library_gles.h/0
{ "file_path": "engine/impeller/renderer/backend/gles/sampler_library_gles.h", "repo_id": "engine", "token_count": 412 }
201
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "impeller/renderer/backend/gles/texture_gles.h" #include <optional> #include <utility> #include "flutter/fml/logging.h" #include "flutter/fml/mapping.h" #include "flutter/fml/trace_event.h" #include "impeller/base/allocation.h" #include "impeller/base/validation.h" #include "impeller/core/formats.h" #include "impeller/core/texture_descriptor.h" #include "impeller/renderer/backend/gles/formats_gles.h" namespace impeller { static bool IsDepthStencilFormat(PixelFormat format) { switch (format) { case PixelFormat::kS8UInt: case PixelFormat::kD24UnormS8Uint: case PixelFormat::kD32FloatS8UInt: return true; case PixelFormat::kUnknown: case PixelFormat::kA8UNormInt: case PixelFormat::kR8UNormInt: case PixelFormat::kR8G8UNormInt: case PixelFormat::kR8G8B8A8UNormInt: case PixelFormat::kR8G8B8A8UNormIntSRGB: case PixelFormat::kB8G8R8A8UNormInt: case PixelFormat::kB8G8R8A8UNormIntSRGB: case PixelFormat::kR32G32B32A32Float: case PixelFormat::kR16G16B16A16Float: case PixelFormat::kB10G10R10XR: case PixelFormat::kB10G10R10XRSRGB: case PixelFormat::kB10G10R10A10XR: return false; } FML_UNREACHABLE(); } static TextureGLES::Type GetTextureTypeFromDescriptor( const TextureDescriptor& desc) { const auto usage = static_cast<TextureUsageMask>(desc.usage); const auto render_target = TextureUsage::kRenderTarget; const auto is_msaa = desc.sample_count == SampleCount::kCount4; if (usage == render_target && IsDepthStencilFormat(desc.format)) { return is_msaa ? TextureGLES::Type::kRenderBufferMultisampled : TextureGLES::Type::kRenderBuffer; } return is_msaa ? TextureGLES::Type::kTextureMultisampled : TextureGLES::Type::kTexture; } HandleType ToHandleType(TextureGLES::Type type) { switch (type) { case TextureGLES::Type::kTexture: case TextureGLES::Type::kTextureMultisampled: return HandleType::kTexture; case TextureGLES::Type::kRenderBuffer: case TextureGLES::Type::kRenderBufferMultisampled: return HandleType::kRenderBuffer; } FML_UNREACHABLE(); } TextureGLES::TextureGLES(ReactorGLES::Ref reactor, TextureDescriptor desc) : TextureGLES(std::move(reactor), desc, false) {} TextureGLES::TextureGLES(ReactorGLES::Ref reactor, TextureDescriptor desc, enum IsWrapped wrapped) : TextureGLES(std::move(reactor), desc, true) {} TextureGLES::TextureGLES(std::shared_ptr<ReactorGLES> reactor, TextureDescriptor desc, bool is_wrapped) : Texture(desc), reactor_(std::move(reactor)), type_(GetTextureTypeFromDescriptor(GetTextureDescriptor())), handle_(reactor_->CreateHandle(ToHandleType(type_))), is_wrapped_(is_wrapped) { // Ensure the texture descriptor itself is valid. if (!GetTextureDescriptor().IsValid()) { VALIDATION_LOG << "Invalid texture descriptor."; return; } // Ensure the texture doesn't exceed device capabilities. const auto tex_size = GetTextureDescriptor().size; const auto max_size = reactor_->GetProcTable().GetCapabilities()->max_texture_size; if (tex_size.Max(max_size) != max_size) { VALIDATION_LOG << "Texture of size " << tex_size << " would exceed max supported size of " << max_size << "."; return; } is_valid_ = true; } // |Texture| TextureGLES::~TextureGLES() { reactor_->CollectHandle(handle_); } // |Texture| bool TextureGLES::IsValid() const { return is_valid_; } // |Texture| void TextureGLES::SetLabel(std::string_view label) { reactor_->SetDebugLabel(handle_, std::string{label.data(), label.size()}); } struct TexImage2DData { GLint internal_format = 0; GLenum external_format = GL_NONE; GLenum type = GL_NONE; std::shared_ptr<const fml::Mapping> data; explicit TexImage2DData(PixelFormat pixel_format) { switch (pixel_format) { case PixelFormat::kA8UNormInt: internal_format = GL_ALPHA; external_format = GL_ALPHA; type = GL_UNSIGNED_BYTE; break; case PixelFormat::kR8UNormInt: internal_format = GL_RED; external_format = GL_RED; type = GL_UNSIGNED_BYTE; break; case PixelFormat::kR8G8B8A8UNormInt: case PixelFormat::kB8G8R8A8UNormInt: case PixelFormat::kR8G8B8A8UNormIntSRGB: case PixelFormat::kB8G8R8A8UNormIntSRGB: internal_format = GL_RGBA; external_format = GL_RGBA; type = GL_UNSIGNED_BYTE; break; case PixelFormat::kR32G32B32A32Float: internal_format = GL_RGBA; external_format = GL_RGBA; type = GL_FLOAT; break; case PixelFormat::kR16G16B16A16Float: internal_format = GL_RGBA; external_format = GL_RGBA; type = GL_HALF_FLOAT; break; case PixelFormat::kS8UInt: // Pure stencil textures are only available in OpenGL 4.4+, which is // ~0% of mobile devices. Instead, we use a depth-stencil texture and // only use the stencil component. // // https://registry.khronos.org/OpenGL-Refpages/gl4/html/glTexImage2D.xhtml case PixelFormat::kD24UnormS8Uint: internal_format = GL_DEPTH_STENCIL; external_format = GL_DEPTH_STENCIL; type = GL_UNSIGNED_INT_24_8; break; case PixelFormat::kUnknown: case PixelFormat::kD32FloatS8UInt: case PixelFormat::kR8G8UNormInt: case PixelFormat::kB10G10R10XRSRGB: case PixelFormat::kB10G10R10XR: case PixelFormat::kB10G10R10A10XR: return; } is_valid_ = true; } TexImage2DData(PixelFormat pixel_format, std::shared_ptr<const fml::Mapping> mapping) : TexImage2DData(pixel_format) { data = std::move(mapping); } bool IsValid() const { return is_valid_; } private: bool is_valid_ = false; }; // |Texture| bool TextureGLES::OnSetContents(const uint8_t* contents, size_t length, size_t slice) { return OnSetContents(CreateMappingWithCopy(contents, length), slice); } // |Texture| bool TextureGLES::OnSetContents(std::shared_ptr<const fml::Mapping> mapping, size_t slice) { if (!mapping) { return false; } if (mapping->GetSize() == 0u) { return true; } if (mapping->GetMapping() == nullptr) { return false; } if (GetType() != Type::kTexture) { VALIDATION_LOG << "Incorrect texture usage flags for setting contents on " "this texture object."; return false; } if (is_wrapped_) { VALIDATION_LOG << "Cannot set the contents of a wrapped texture."; return false; } const auto& tex_descriptor = GetTextureDescriptor(); if (tex_descriptor.size.IsEmpty()) { return true; } if (!tex_descriptor.IsValid()) { return false; } if (mapping->GetSize() < tex_descriptor.GetByteSizeOfBaseMipLevel()) { return false; } GLenum texture_type; GLenum texture_target; switch (tex_descriptor.type) { case TextureType::kTexture2D: texture_type = GL_TEXTURE_2D; texture_target = GL_TEXTURE_2D; break; case TextureType::kTexture2DMultisample: VALIDATION_LOG << "Multisample texture uploading is not supported for " "the OpenGLES backend."; return false; case TextureType::kTextureCube: texture_type = GL_TEXTURE_CUBE_MAP; texture_target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + slice; break; case TextureType::kTextureExternalOES: texture_type = GL_TEXTURE_EXTERNAL_OES; texture_target = GL_TEXTURE_EXTERNAL_OES; break; } auto data = std::make_shared<TexImage2DData>(tex_descriptor.format, std::move(mapping)); if (!data || !data->IsValid()) { VALIDATION_LOG << "Invalid texture format."; return false; } ReactorGLES::Operation texture_upload = [handle = handle_, // data, // size = tex_descriptor.size, // texture_type, // texture_target // ](const auto& reactor) { auto gl_handle = reactor.GetGLHandle(handle); if (!gl_handle.has_value()) { VALIDATION_LOG << "Texture was collected before it could be uploaded to the GPU."; return; } const auto& gl = reactor.GetProcTable(); gl.BindTexture(texture_type, gl_handle.value()); const GLvoid* tex_data = nullptr; if (data->data) { tex_data = data->data->GetMapping(); } { TRACE_EVENT1("impeller", "TexImage2DUpload", "Bytes", std::to_string(data->data->GetSize()).c_str()); gl.TexImage2D(texture_target, // target 0u, // LOD level data->internal_format, // internal format size.width, // width size.height, // height 0u, // border data->external_format, // external format data->type, // type tex_data // data ); } }; contents_initialized_ = reactor_->AddOperation(texture_upload); return contents_initialized_; } // |Texture| ISize TextureGLES::GetSize() const { return GetTextureDescriptor().size; } static std::optional<GLenum> ToRenderBufferFormat(PixelFormat format) { switch (format) { case PixelFormat::kB8G8R8A8UNormInt: case PixelFormat::kR8G8B8A8UNormInt: return GL_RGBA4; case PixelFormat::kR32G32B32A32Float: return GL_RGBA32F; case PixelFormat::kR16G16B16A16Float: return GL_RGBA16F; case PixelFormat::kS8UInt: return GL_STENCIL_INDEX8; case PixelFormat::kD24UnormS8Uint: return GL_DEPTH24_STENCIL8; case PixelFormat::kD32FloatS8UInt: return GL_DEPTH32F_STENCIL8; case PixelFormat::kUnknown: case PixelFormat::kA8UNormInt: case PixelFormat::kR8UNormInt: case PixelFormat::kR8G8UNormInt: case PixelFormat::kR8G8B8A8UNormIntSRGB: case PixelFormat::kB8G8R8A8UNormIntSRGB: case PixelFormat::kB10G10R10XRSRGB: case PixelFormat::kB10G10R10XR: case PixelFormat::kB10G10R10A10XR: return std::nullopt; } FML_UNREACHABLE(); } void TextureGLES::InitializeContentsIfNecessary() const { if (!IsValid()) { return; } if (contents_initialized_) { return; } contents_initialized_ = true; if (is_wrapped_) { return; } auto size = GetSize(); if (size.IsEmpty()) { return; } const auto& gl = reactor_->GetProcTable(); auto handle = reactor_->GetGLHandle(handle_); if (!handle.has_value()) { VALIDATION_LOG << "Could not initialize the contents of texture."; return; } switch (type_) { case Type::kTexture: case Type::kTextureMultisampled: { TexImage2DData tex_data(GetTextureDescriptor().format); if (!tex_data.IsValid()) { VALIDATION_LOG << "Invalid format for texture image."; return; } gl.BindTexture(GL_TEXTURE_2D, handle.value()); { TRACE_EVENT0("impeller", "TexImage2DInitialization"); gl.TexImage2D(GL_TEXTURE_2D, // target 0u, // LOD level (base mip level size checked) tex_data.internal_format, // internal format size.width, // width size.height, // height 0u, // border tex_data.external_format, // format tex_data.type, // type nullptr // data ); } } break; case Type::kRenderBuffer: case Type::kRenderBufferMultisampled: { auto render_buffer_format = ToRenderBufferFormat(GetTextureDescriptor().format); if (!render_buffer_format.has_value()) { VALIDATION_LOG << "Invalid format for render-buffer image."; return; } gl.BindRenderbuffer(GL_RENDERBUFFER, handle.value()); { TRACE_EVENT0("impeller", "RenderBufferStorageInitialization"); if (type_ == Type::kRenderBufferMultisampled) { gl.RenderbufferStorageMultisampleEXT( GL_RENDERBUFFER, // target 4, // samples render_buffer_format.value(), // internal format size.width, // width size.height // height ); } else { gl.RenderbufferStorage( GL_RENDERBUFFER, // target render_buffer_format.value(), // internal format size.width, // width size.height // height ); } } } break; } } std::optional<GLuint> TextureGLES::GetGLHandle() const { if (!IsValid()) { return std::nullopt; } return reactor_->GetGLHandle(handle_); } bool TextureGLES::Bind() const { auto handle = GetGLHandle(); if (!handle.has_value()) { return false; } const auto& gl = reactor_->GetProcTable(); switch (type_) { case Type::kTexture: case Type::kTextureMultisampled: { const auto target = ToTextureTarget(GetTextureDescriptor().type); if (!target.has_value()) { VALIDATION_LOG << "Could not bind texture of this type."; return false; } gl.BindTexture(target.value(), handle.value()); } break; case Type::kRenderBuffer: case Type::kRenderBufferMultisampled: gl.BindRenderbuffer(GL_RENDERBUFFER, handle.value()); break; } InitializeContentsIfNecessary(); return true; } bool TextureGLES::GenerateMipmap() { if (!IsValid()) { return false; } auto type = GetTextureDescriptor().type; switch (type) { case TextureType::kTexture2D: break; case TextureType::kTexture2DMultisample: VALIDATION_LOG << "Generating mipmaps for multisample textures is not " "supported in the GLES backend."; return false; case TextureType::kTextureCube: break; case TextureType::kTextureExternalOES: break; } if (!Bind()) { return false; } auto handle = GetGLHandle(); if (!handle.has_value()) { return false; } const auto& gl = reactor_->GetProcTable(); gl.GenerateMipmap(ToTextureType(type)); mipmap_generated_ = true; return true; } TextureGLES::Type TextureGLES::GetType() const { return type_; } static GLenum ToAttachmentType(TextureGLES::AttachmentType point) { switch (point) { case TextureGLES::AttachmentType::kColor0: return GL_COLOR_ATTACHMENT0; case TextureGLES::AttachmentType::kDepth: return GL_DEPTH_ATTACHMENT; case TextureGLES::AttachmentType::kStencil: return GL_STENCIL_ATTACHMENT; } } bool TextureGLES::SetAsFramebufferAttachment( GLenum target, AttachmentType attachment_type) const { if (!IsValid()) { return false; } InitializeContentsIfNecessary(); auto handle = GetGLHandle(); if (!handle.has_value()) { return false; } const auto& gl = reactor_->GetProcTable(); switch (type_) { case Type::kTexture: gl.FramebufferTexture2D(target, // target ToAttachmentType(attachment_type), // attachment GL_TEXTURE_2D, // textarget handle.value(), // texture 0 // level ); break; case Type::kTextureMultisampled: gl.FramebufferTexture2DMultisampleEXT( target, // target ToAttachmentType(attachment_type), // attachment GL_TEXTURE_2D, // textarget handle.value(), // texture 0, // level 4 // samples ); break; case Type::kRenderBuffer: case Type::kRenderBufferMultisampled: gl.FramebufferRenderbuffer( target, // target ToAttachmentType(attachment_type), // attachment GL_RENDERBUFFER, // render-buffer target handle.value() // render-buffer ); break; } return true; } // |Texture| Scalar TextureGLES::GetYCoordScale() const { switch (GetCoordinateSystem()) { case TextureCoordinateSystem::kUploadFromHost: return 1.0; case TextureCoordinateSystem::kRenderToTexture: return -1.0; } FML_UNREACHABLE(); } } // namespace impeller
engine/impeller/renderer/backend/gles/texture_gles.cc/0
{ "file_path": "engine/impeller/renderer/backend/gles/texture_gles.cc", "repo_id": "engine", "token_count": 7991 }
202
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "impeller/renderer/backend/metal/compute_pipeline_mtl.h" namespace impeller { ComputePipelineMTL::ComputePipelineMTL(std::weak_ptr<PipelineLibrary> library, const ComputePipelineDescriptor& desc, id<MTLComputePipelineState> state) : Pipeline(std::move(library), desc), pipeline_state_(state) { if (!pipeline_state_) { return; } is_valid_ = true; } ComputePipelineMTL::~ComputePipelineMTL() = default; bool ComputePipelineMTL::IsValid() const { return is_valid_; } id<MTLComputePipelineState> ComputePipelineMTL::GetMTLComputePipelineState() const { return pipeline_state_; } } // namespace impeller
engine/impeller/renderer/backend/metal/compute_pipeline_mtl.mm/0
{ "file_path": "engine/impeller/renderer/backend/metal/compute_pipeline_mtl.mm", "repo_id": "engine", "token_count": 368 }
203
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "impeller/renderer/backend/metal/pipeline_mtl.h" namespace impeller { PipelineMTL::PipelineMTL(std::weak_ptr<PipelineLibrary> library, const PipelineDescriptor& desc, id<MTLRenderPipelineState> state, id<MTLDepthStencilState> depth_stencil_state) : Pipeline(std::move(library), desc), pipeline_state_(state), depth_stencil_state_(depth_stencil_state) { if (!pipeline_state_) { return; } is_valid_ = true; } PipelineMTL::~PipelineMTL() = default; bool PipelineMTL::IsValid() const { return is_valid_; } id<MTLRenderPipelineState> PipelineMTL::GetMTLRenderPipelineState() const { return pipeline_state_; } id<MTLDepthStencilState> PipelineMTL::GetMTLDepthStencilState() const { return depth_stencil_state_; } } // namespace impeller
engine/impeller/renderer/backend/metal/pipeline_mtl.mm/0
{ "file_path": "engine/impeller/renderer/backend/metal/pipeline_mtl.mm", "repo_id": "engine", "token_count": 421 }
204
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_RENDERER_BACKEND_METAL_TEXTURE_WRAPPER_MTL_H_ #define FLUTTER_IMPELLER_RENDERER_BACKEND_METAL_TEXTURE_WRAPPER_MTL_H_ #include "impeller/core/texture.h" namespace impeller { std::shared_ptr<Texture> WrapTextureMTL( TextureDescriptor desc, const void* mtl_texture, std::function<void()> deletion_proc = nullptr); } // namespace impeller #endif // FLUTTER_IMPELLER_RENDERER_BACKEND_METAL_TEXTURE_WRAPPER_MTL_H_
engine/impeller/renderer/backend/metal/texture_wrapper_mtl.h/0
{ "file_path": "engine/impeller/renderer/backend/metal/texture_wrapper_mtl.h", "repo_id": "engine", "token_count": 234 }
205
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_BLIT_PASS_VK_H_ #define FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_BLIT_PASS_VK_H_ #include "flutter/fml/macros.h" #include "flutter/impeller/base/config.h" #include "impeller/renderer/backend/vulkan/blit_command_vk.h" #include "impeller/renderer/blit_pass.h" namespace impeller { class CommandEncoderVK; class CommandBufferVK; class BlitPassVK final : public BlitPass { public: // |BlitPass| ~BlitPassVK() override; private: friend class CommandBufferVK; std::weak_ptr<CommandBufferVK> command_buffer_; std::vector<std::unique_ptr<BlitEncodeVK>> commands_; std::string label_; explicit BlitPassVK(std::weak_ptr<CommandBufferVK> command_buffer); // |BlitPass| bool IsValid() const override; // |BlitPass| void OnSetLabel(std::string label) override; // |BlitPass| bool EncodeCommands( const std::shared_ptr<Allocator>& transients_allocator) const override; // |BlitPass| bool OnCopyTextureToTextureCommand(std::shared_ptr<Texture> source, std::shared_ptr<Texture> destination, IRect source_region, IPoint destination_origin, std::string label) override; // |BlitPass| bool OnCopyTextureToBufferCommand(std::shared_ptr<Texture> source, std::shared_ptr<DeviceBuffer> destination, IRect source_region, size_t destination_offset, std::string label) override; // |BlitPass| bool OnCopyBufferToTextureCommand(BufferView source, std::shared_ptr<Texture> destination, IPoint destination_origin, std::string label) override; // |BlitPass| bool OnGenerateMipmapCommand(std::shared_ptr<Texture> texture, std::string label) override; BlitPassVK(const BlitPassVK&) = delete; BlitPassVK& operator=(const BlitPassVK&) = delete; }; } // namespace impeller #endif // FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_BLIT_PASS_VK_H_
engine/impeller/renderer/backend/vulkan/blit_pass_vk.h/0
{ "file_path": "engine/impeller/renderer/backend/vulkan/blit_pass_vk.h", "repo_id": "engine", "token_count": 1117 }
206
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_COMPUTE_PIPELINE_VK_H_ #define FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_COMPUTE_PIPELINE_VK_H_ #include <memory> #include "flutter/fml/macros.h" #include "impeller/base/backend_cast.h" #include "impeller/renderer/backend/vulkan/device_holder_vk.h" #include "impeller/renderer/backend/vulkan/vk.h" #include "impeller/renderer/pipeline.h" namespace impeller { class ComputePipelineVK final : public Pipeline<ComputePipelineDescriptor>, public BackendCast<ComputePipelineVK, Pipeline<ComputePipelineDescriptor>> { public: ComputePipelineVK(std::weak_ptr<DeviceHolderVK> device_holder, std::weak_ptr<PipelineLibrary> library, const ComputePipelineDescriptor& desc, vk::UniquePipeline pipeline, vk::UniquePipelineLayout layout, vk::UniqueDescriptorSetLayout descriptor_set_layout); // |Pipeline| ~ComputePipelineVK() override; const vk::Pipeline& GetPipeline() const; const vk::PipelineLayout& GetPipelineLayout() const; const vk::DescriptorSetLayout& GetDescriptorSetLayout() const; private: friend class PipelineLibraryVK; std::weak_ptr<DeviceHolderVK> device_holder_; vk::UniquePipeline pipeline_; vk::UniquePipelineLayout layout_; vk::UniqueDescriptorSetLayout descriptor_set_layout_; bool is_valid_ = false; // |Pipeline| bool IsValid() const override; ComputePipelineVK(const ComputePipelineVK&) = delete; ComputePipelineVK& operator=(const ComputePipelineVK&) = delete; }; } // namespace impeller #endif // FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_COMPUTE_PIPELINE_VK_H_
engine/impeller/renderer/backend/vulkan/compute_pipeline_vk.h/0
{ "file_path": "engine/impeller/renderer/backend/vulkan/compute_pipeline_vk.h", "repo_id": "engine", "token_count": 770 }
207
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_FENCE_WAITER_VK_H_ #define FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_FENCE_WAITER_VK_H_ #include <condition_variable> #include <memory> #include <thread> #include <vector> #include "flutter/fml/closure.h" #include "flutter/fml/macros.h" #include "impeller/base/thread.h" #include "impeller/renderer/backend/vulkan/device_holder_vk.h" #include "impeller/renderer/backend/vulkan/shared_object_vk.h" #include "impeller/renderer/backend/vulkan/vk.h" namespace impeller { class ContextVK; class WaitSetEntry; using WaitSet = std::vector<std::shared_ptr<WaitSetEntry>>; class FenceWaiterVK { public: ~FenceWaiterVK(); bool IsValid() const; void Terminate(); bool AddFence(vk::UniqueFence fence, const fml::closure& callback); private: friend class ContextVK; std::weak_ptr<DeviceHolderVK> device_holder_; std::unique_ptr<std::thread> waiter_thread_; std::mutex wait_set_mutex_; std::condition_variable wait_set_cv_; WaitSet wait_set_; bool terminate_ = false; explicit FenceWaiterVK(std::weak_ptr<DeviceHolderVK> device_holder); void Main(); bool Wait(); void WaitUntilEmpty(); FenceWaiterVK(const FenceWaiterVK&) = delete; FenceWaiterVK& operator=(const FenceWaiterVK&) = delete; }; } // namespace impeller #endif // FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_FENCE_WAITER_VK_H_
engine/impeller/renderer/backend/vulkan/fence_waiter_vk.h/0
{ "file_path": "engine/impeller/renderer/backend/vulkan/fence_waiter_vk.h", "repo_id": "engine", "token_count": 577 }
208
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_RENDER_PASS_BUILDER_VK_H_ #define FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_RENDER_PASS_BUILDER_VK_H_ #include <map> #include <optional> #include "impeller/core/formats.h" #include "impeller/renderer/backend/vulkan/context_vk.h" #include "impeller/renderer/backend/vulkan/vk.h" namespace impeller { class RenderPassBuilderVK { public: RenderPassBuilderVK(); ~RenderPassBuilderVK(); RenderPassBuilderVK(const RenderPassBuilderVK&) = delete; RenderPassBuilderVK& operator=(const RenderPassBuilderVK&) = delete; RenderPassBuilderVK& SetColorAttachment(size_t index, PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action); RenderPassBuilderVK& SetDepthStencilAttachment(PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action); RenderPassBuilderVK& SetStencilAttachment(PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action); vk::UniqueRenderPass Build(const vk::Device& device) const; private: std::map<size_t, vk::AttachmentDescription> colors_; std::map<size_t, vk::AttachmentDescription> resolves_; std::optional<vk::AttachmentDescription> depth_stencil_; }; //------------------------------------------------------------------------------ /// @brief Inserts the appropriate barriers to ensure that subsequent /// commands can read from the specified image (itself a framebuffer /// attachment) as an input attachment. /// /// Unlike most barriers, this barrier may only be inserted within a /// Vulkan render-pass. /// /// The type of barrier inserted depends on the subpass setup and /// self-dependencies. Only use this utility method for inserting /// barriers in render passes created by `RenderPassBuilderVK`. /// /// @param[in] buffer The buffer /// @param[in] image The image /// void InsertBarrierForInputAttachmentRead(const vk::CommandBuffer& buffer, const vk::Image& image); } // namespace impeller #endif // FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_RENDER_PASS_BUILDER_VK_H_
engine/impeller/renderer/backend/vulkan/render_pass_builder_vk.h/0
{ "file_path": "engine/impeller/renderer/backend/vulkan/render_pass_builder_vk.h", "repo_id": "engine", "token_count": 1256 }
209
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_SHARED_OBJECT_VK_H_ #define FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_SHARED_OBJECT_VK_H_ #include <memory> #include "flutter/fml/macros.h" #include "impeller/renderer/backend/vulkan/vk.h" namespace impeller { class SharedObjectVK { public: virtual ~SharedObjectVK() = default; }; template <class T> class SharedObjectVKT : public SharedObjectVK { public: using Resource = T; using UniqueResource = vk::UniqueHandle<Resource, VULKAN_HPP_DEFAULT_DISPATCHER_TYPE>; explicit SharedObjectVKT(UniqueResource res) : resource_(std::move(res)) {} // NOLINTNEXTLINE(google-explicit-constructor) operator Resource() const { return Get(); } const Resource& Get() const { return *resource_; } private: UniqueResource resource_; SharedObjectVKT(const SharedObjectVKT&) = delete; SharedObjectVKT& operator=(const SharedObjectVKT&) = delete; }; template <class T> auto MakeSharedVK( vk::UniqueHandle<T, VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> handle) { if (!handle) { return std::shared_ptr<SharedObjectVKT<T>>{nullptr}; } return std::make_shared<SharedObjectVKT<T>>(std::move(handle)); } template <class T> using SharedHandleVK = std::shared_ptr<SharedObjectVKT<T>>; } // namespace impeller #endif // FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_SHARED_OBJECT_VK_H_
engine/impeller/renderer/backend/vulkan/shared_object_vk.h/0
{ "file_path": "engine/impeller/renderer/backend/vulkan/shared_object_vk.h", "repo_id": "engine", "token_count": 554 }
210
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/testing/testing.h" // IWYU pragma: keep #include "gtest/gtest.h" #include "impeller/renderer/backend/vulkan/test/mock_vulkan.h" #include "vulkan/vulkan_enums.hpp" namespace impeller { namespace testing { TEST(MockVulkanContextTest, IsThreadSafe) { // In a typical app, there is a single ContextVK per app, shared b/w threads. // // This test ensures that the (mock) ContextVK is thread-safe. auto const context = MockVulkanContextBuilder().Build(); // Spawn two threads, and have them create a CommandPoolVK each. std::thread thread1([&context]() { auto const pool = context->GetCommandPoolRecycler()->Get(); EXPECT_TRUE(pool); }); std::thread thread2([&context]() { auto const pool = context->GetCommandPoolRecycler()->Get(); EXPECT_TRUE(pool); }); thread1.join(); thread2.join(); context->Shutdown(); } TEST(MockVulkanContextTest, DefaultFenceAlwaysReportsSuccess) { auto const context = MockVulkanContextBuilder().Build(); auto const device = context->GetDevice(); auto fence = device.createFenceUnique({}).value; EXPECT_EQ(vk::Result::eSuccess, device.getFenceStatus(*fence)); } TEST(MockVulkanContextTest, MockedFenceReportsStatus) { auto const context = MockVulkanContextBuilder().Build(); auto const device = context->GetDevice(); auto fence = device.createFenceUnique({}).value; MockFence::SetStatus(fence, vk::Result::eNotReady); EXPECT_EQ(vk::Result::eNotReady, device.getFenceStatus(fence.get())); MockFence::SetStatus(fence, vk::Result::eSuccess); EXPECT_EQ(vk::Result::eSuccess, device.getFenceStatus(*fence)); } } // namespace testing } // namespace impeller
engine/impeller/renderer/backend/vulkan/test/mock_vulkan_unittests.cc/0
{ "file_path": "engine/impeller/renderer/backend/vulkan/test/mock_vulkan_unittests.cc", "repo_id": "engine", "token_count": 607 }
211
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_YUV_CONVERSION_VK_H_ #define FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_YUV_CONVERSION_VK_H_ #include <unordered_map> #include "flutter/fml/build_config.h" #include "impeller/base/comparable.h" #include "impeller/base/thread.h" #include "impeller/core/sampler.h" #include "impeller/renderer/backend/vulkan/sampler_vk.h" #include "impeller/renderer/backend/vulkan/shared_object_vk.h" #include "impeller/renderer/backend/vulkan/vk.h" namespace impeller { //------------------------------------------------------------------------------ /// A descriptor used to create a new YUV conversion in a conversion library. /// using YUVConversionDescriptorVK = vk::StructureChain<vk::SamplerYcbcrConversionCreateInfo #if FML_OS_ANDROID // For VK_ANDROID_external_memory_android_hardware_buffer , vk::ExternalFormatANDROID #endif // FML_OS_ANDROID >; class YUVConversionLibraryVK; //------------------------------------------------------------------------------ /// @brief It is sometimes necessary to deal with formats not native to /// Vulkan. In such cases, extra information is necessary to access /// images. A YUV conversion object is needed in such instances. /// /// There are usually only a handful of viable conversions in a /// given context. However, due to the way the Vulkan spec. treats /// "identically defined" conversions, only a single conversion /// object is valid for an equivalent `YUVConversionDescriptorVK`. /// Because of this restriction, it is not possible to just create a /// conversion from a descriptor (as the underlying handles will be /// equivalent but different). Instead, a conversion may only be /// obtained from a conversion library. Libraries handle hashing and /// caching conversions by descriptor. Caller can find a library on /// the top-level context. They may not create their own (the /// constructor is private). /// class YUVConversionVK final { public: ~YUVConversionVK(); YUVConversionVK(const YUVConversionVK&) = delete; YUVConversionVK& operator=(const YUVConversionVK&) = delete; //---------------------------------------------------------------------------- /// @return `true` if this conversion is valid for use with images and /// samplers. /// bool IsValid() const; //---------------------------------------------------------------------------- /// @brief Get the descriptor used to create this conversion. /// const YUVConversionDescriptorVK& GetDescriptor() const; //---------------------------------------------------------------------------- /// @return The Vulkan handle of the YUV conversion. /// vk::SamplerYcbcrConversion GetConversion() const; private: friend class YUVConversionLibraryVK; YUVConversionDescriptorVK chain_; vk::UniqueSamplerYcbcrConversion conversion_; YUVConversionVK(const vk::Device& device, const YUVConversionDescriptorVK& chain); }; struct YUVConversionDescriptorVKHash { std::size_t operator()(const YUVConversionDescriptorVK& object) const; }; struct YUVConversionDescriptorVKEqual { bool operator()(const YUVConversionDescriptorVK& lhs, const YUVConversionDescriptorVK& rhs) const; }; struct ImmutableSamplerKeyVK : public Comparable<ImmutableSamplerKeyVK> { SamplerDescriptor sampler; YUVConversionDescriptorVK yuv_conversion; explicit ImmutableSamplerKeyVK(const SamplerVK& sampler); // |Comparable<ImmutableSamplerKey>| std::size_t GetHash() const override; // |Comparable<ImmutableSamplerKey>| bool IsEqual(const ImmutableSamplerKeyVK& other) const override; }; } // namespace impeller #endif // FLUTTER_IMPELLER_RENDERER_BACKEND_VULKAN_YUV_CONVERSION_VK_H_
engine/impeller/renderer/backend/vulkan/yuv_conversion_vk.h/0
{ "file_path": "engine/impeller/renderer/backend/vulkan/yuv_conversion_vk.h", "repo_id": "engine", "token_count": 1420 }
212
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_RENDERER_COMPUTE_PASS_H_ #define FLUTTER_IMPELLER_RENDERER_COMPUTE_PASS_H_ #include <string> #include "fml/status.h" #include "impeller/core/resource_binder.h" #include "impeller/renderer/compute_pipeline_descriptor.h" #include "impeller/renderer/pipeline_descriptor.h" namespace impeller { //------------------------------------------------------------------------------ /// @brief Compute passes encode compute shader into the underlying command /// buffer. /// /// @see `CommandBuffer` /// class ComputePass : public ResourceBinder { public: virtual ~ComputePass(); virtual bool IsValid() const = 0; void SetLabel(const std::string& label); virtual void SetCommandLabel(std::string_view label) = 0; virtual void SetPipeline( const std::shared_ptr<Pipeline<ComputePipelineDescriptor>>& pipeline) = 0; virtual fml::Status Compute(const ISize& grid_size) = 0; /// @brief Ensures all previously encoded compute command's buffer writes are /// visible to any subsequent compute commands. /// /// On Vulkan, it does not matter if the compute command is in a /// different command buffer, only that it is executed later in queue /// order. virtual void AddBufferMemoryBarrier() = 0; /// @brief Ensures all previously encoded compute command's texture writes are /// visible to any subsequent compute commands. /// /// On Vulkan, it does not matter if the compute command is in a /// different command buffer, only that it is executed later in queue /// order. virtual void AddTextureMemoryBarrier() = 0; //---------------------------------------------------------------------------- /// @brief Encode the recorded commands to the underlying command buffer. /// /// @return If the commands were encoded to the underlying command /// buffer. /// virtual bool EncodeCommands() const = 0; const Context& GetContext() const { return *context_; } protected: const std::shared_ptr<const Context> context_; explicit ComputePass(std::shared_ptr<const Context> context); virtual void OnSetLabel(const std::string& label) = 0; private: ComputePass(const ComputePass&) = delete; ComputePass& operator=(const ComputePass&) = delete; }; } // namespace impeller #endif // FLUTTER_IMPELLER_RENDERER_COMPUTE_PASS_H_
engine/impeller/renderer/compute_pass.h/0
{ "file_path": "engine/impeller/renderer/compute_pass.h", "repo_id": "engine", "token_count": 807 }
213
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_RENDERER_PIPELINE_BUILDER_H_ #define FLUTTER_IMPELLER_RENDERER_PIPELINE_BUILDER_H_ #include "flutter/fml/logging.h" #include "flutter/fml/macros.h" #include "impeller/base/strings.h" #include "impeller/base/validation.h" #include "impeller/core/formats.h" #include "impeller/renderer/context.h" #include "impeller/renderer/pipeline_descriptor.h" #include "impeller/renderer/shader_library.h" #include "impeller/renderer/vertex_descriptor.h" namespace impeller { //------------------------------------------------------------------------------ /// @brief An optional (but highly recommended) utility for creating /// pipelines from reflected shader information. /// /// @tparam VertexShader_ The reflected vertex shader information. Found /// in a generated header file called /// <shader_name>.vert.h. /// @tparam FragmentShader_ The reflected fragment shader information. /// Found in a generated header file called /// <shader_name>.frag.h. /// template <class VertexShader_, class FragmentShader_> struct PipelineBuilder { public: using VertexShader = VertexShader_; using FragmentShader = FragmentShader_; static constexpr size_t kVertexBufferIndex = VertexDescriptor::kReservedVertexBufferIndex; //---------------------------------------------------------------------------- /// @brief Create a default pipeline descriptor using the combination /// reflected shader information. The descriptor can be configured /// further before a pipeline state object is created using it. /// /// @param[in] context The context /// /// @return If the combination of reflected shader information is /// compatible and the requisite functions can be found in the /// context, a pipeline descriptor. /// static std::optional<PipelineDescriptor> MakeDefaultPipelineDescriptor( const Context& context, const std::vector<Scalar>& constants = {}) { PipelineDescriptor desc; desc.SetSpecializationConstants(constants); if (InitializePipelineDescriptorDefaults(context, desc)) { return {std::move(desc)}; } return std::nullopt; } [[nodiscard]] static bool InitializePipelineDescriptorDefaults( const Context& context, PipelineDescriptor& desc) { // Setup debug instrumentation. desc.SetLabel(SPrintF("%s Pipeline", FragmentShader::kLabel.data())); // Resolve pipeline entrypoints. { auto vertex_function = context.GetShaderLibrary()->GetFunction( VertexShader::kEntrypointName, ShaderStage::kVertex); auto fragment_function = context.GetShaderLibrary()->GetFunction( FragmentShader::kEntrypointName, ShaderStage::kFragment); if (!vertex_function || !fragment_function) { VALIDATION_LOG << "Could not resolve pipeline entrypoint(s) '" << VertexShader::kEntrypointName << "' and '" << FragmentShader::kEntrypointName << "' for pipeline named '" << VertexShader::kLabel << "'."; return false; } desc.AddStageEntrypoint(std::move(vertex_function)); desc.AddStageEntrypoint(std::move(fragment_function)); } // Setup the vertex descriptor from reflected information. { auto vertex_descriptor = std::make_shared<VertexDescriptor>(); vertex_descriptor->SetStageInputs(VertexShader::kAllShaderStageInputs, VertexShader::kInterleavedBufferLayout); vertex_descriptor->RegisterDescriptorSetLayouts( VertexShader::kDescriptorSetLayouts); vertex_descriptor->RegisterDescriptorSetLayouts( FragmentShader::kDescriptorSetLayouts); desc.SetVertexDescriptor(std::move(vertex_descriptor)); } // Setup fragment shader output descriptions. { // Configure the sole color attachments pixel format. This is by // convention. ColorAttachmentDescriptor color0; color0.format = context.GetCapabilities()->GetDefaultColorFormat(); color0.blending_enabled = true; desc.SetColorAttachmentDescriptor(0u, color0); } // Setup default depth buffer descriptions. { DepthAttachmentDescriptor depth0; depth0.depth_compare = CompareFunction::kAlways; desc.SetDepthStencilAttachmentDescriptor(depth0); desc.SetDepthPixelFormat( context.GetCapabilities()->GetDefaultDepthStencilFormat()); } // Setup default stencil buffer descriptions. { StencilAttachmentDescriptor stencil0; stencil0.stencil_compare = CompareFunction::kEqual; desc.SetStencilAttachmentDescriptors(stencil0); desc.SetStencilPixelFormat( context.GetCapabilities()->GetDefaultDepthStencilFormat()); } return true; } }; } // namespace impeller #endif // FLUTTER_IMPELLER_RENDERER_PIPELINE_BUILDER_H_
engine/impeller/renderer/pipeline_builder.h/0
{ "file_path": "engine/impeller/renderer/pipeline_builder.h", "repo_id": "engine", "token_count": 1997 }
214
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/fml/logging.h" #include "impeller/core/device_buffer_descriptor.h" #include "impeller/core/formats.h" #include "impeller/core/host_buffer.h" #include "impeller/core/sampler_descriptor.h" #include "impeller/fixtures/array.frag.h" #include "impeller/fixtures/array.vert.h" #include "impeller/fixtures/box_fade.frag.h" #include "impeller/fixtures/box_fade.vert.h" #include "impeller/fixtures/colors.frag.h" #include "impeller/fixtures/colors.vert.h" #include "impeller/fixtures/impeller.frag.h" #include "impeller/fixtures/impeller.vert.h" #include "impeller/fixtures/inactive_uniforms.frag.h" #include "impeller/fixtures/inactive_uniforms.vert.h" #include "impeller/fixtures/instanced_draw.frag.h" #include "impeller/fixtures/instanced_draw.vert.h" #include "impeller/fixtures/mipmaps.frag.h" #include "impeller/fixtures/mipmaps.vert.h" #include "impeller/fixtures/sepia.frag.h" #include "impeller/fixtures/sepia.vert.h" #include "impeller/fixtures/swizzle.frag.h" #include "impeller/fixtures/test_texture.frag.h" #include "impeller/fixtures/test_texture.vert.h" #include "impeller/fixtures/texture.frag.h" #include "impeller/fixtures/texture.vert.h" #include "impeller/geometry/path_builder.h" #include "impeller/playground/playground_test.h" #include "impeller/renderer/command.h" #include "impeller/renderer/command_buffer.h" #include "impeller/renderer/pipeline_builder.h" #include "impeller/renderer/pipeline_library.h" #include "impeller/renderer/render_pass.h" #include "impeller/renderer/render_target.h" #include "impeller/renderer/renderer.h" #include "impeller/renderer/vertex_buffer_builder.h" #include "impeller/tessellator/tessellator.h" #include "third_party/imgui/imgui.h" // TODO(zanderso): https://github.com/flutter/flutter/issues/127701 // NOLINTBEGIN(bugprone-unchecked-optional-access) namespace impeller { namespace testing { using RendererTest = PlaygroundTest; INSTANTIATE_PLAYGROUND_SUITE(RendererTest); TEST_P(RendererTest, CanCreateBoxPrimitive) { using VS = BoxFadeVertexShader; using FS = BoxFadeFragmentShader; auto context = GetContext(); ASSERT_TRUE(context); using BoxPipelineBuilder = PipelineBuilder<VS, FS>; auto desc = BoxPipelineBuilder::MakeDefaultPipelineDescriptor(*context); ASSERT_TRUE(desc.has_value()); desc->SetSampleCount(SampleCount::kCount4); desc->SetStencilAttachmentDescriptors(std::nullopt); // Vertex buffer. VertexBufferBuilder<VS::PerVertexData> vertex_builder; vertex_builder.SetLabel("Box"); vertex_builder.AddVertices({ {{100, 100, 0.0}, {0.0, 0.0}}, // 1 {{800, 100, 0.0}, {1.0, 0.0}}, // 2 {{800, 800, 0.0}, {1.0, 1.0}}, // 3 {{100, 100, 0.0}, {0.0, 0.0}}, // 1 {{800, 800, 0.0}, {1.0, 1.0}}, // 3 {{100, 800, 0.0}, {0.0, 1.0}}, // 4 }); auto bridge = CreateTextureForFixture("bay_bridge.jpg"); auto boston = CreateTextureForFixture("boston.jpg"); ASSERT_TRUE(bridge && boston); const std::unique_ptr<const Sampler>& sampler = context->GetSamplerLibrary()->GetSampler({}); ASSERT_TRUE(sampler); auto host_buffer = HostBuffer::Create(context->GetResourceAllocator()); SinglePassCallback callback = [&](RenderPass& pass) { ImGui::Begin("Controls", nullptr, ImGuiWindowFlags_AlwaysAutoResize); static bool wireframe; ImGui::Checkbox("Wireframe", &wireframe); ImGui::End(); desc->SetPolygonMode(wireframe ? PolygonMode::kLine : PolygonMode::kFill); auto pipeline = context->GetPipelineLibrary()->GetPipeline(desc).Get(); assert(pipeline && pipeline->IsValid()); pass.SetCommandLabel("Box"); pass.SetPipeline(pipeline); pass.SetVertexBuffer( vertex_builder.CreateVertexBuffer(*context->GetResourceAllocator())); VS::UniformBuffer uniforms; EXPECT_EQ(pass.GetOrthographicTransform(), Matrix::MakeOrthographic(pass.GetRenderTargetSize())); uniforms.mvp = pass.GetOrthographicTransform() * Matrix::MakeScale(GetContentScale()); VS::BindUniformBuffer(pass, host_buffer->EmplaceUniform(uniforms)); FS::FrameInfo frame_info; frame_info.current_time = GetSecondsElapsed(); frame_info.cursor_position = GetCursorPosition(); frame_info.window_size.x = GetWindowSize().width; frame_info.window_size.y = GetWindowSize().height; FS::BindFrameInfo(pass, host_buffer->EmplaceUniform(frame_info)); FS::BindContents1(pass, boston, sampler); FS::BindContents2(pass, bridge, sampler); host_buffer->Reset(); return pass.Draw().ok(); }; OpenPlaygroundHere(callback); } TEST_P(RendererTest, CanRenderPerspectiveCube) { using VS = ColorsVertexShader; using FS = ColorsFragmentShader; auto context = GetContext(); ASSERT_TRUE(context); auto desc = PipelineBuilder<VS, FS>::MakeDefaultPipelineDescriptor(*context); ASSERT_TRUE(desc.has_value()); desc->SetCullMode(CullMode::kBackFace); desc->SetWindingOrder(WindingOrder::kCounterClockwise); desc->SetSampleCount(SampleCount::kCount4); desc->SetStencilAttachmentDescriptors(std::nullopt); auto pipeline = context->GetPipelineLibrary()->GetPipeline(std::move(desc)).Get(); ASSERT_TRUE(pipeline); struct Cube { VS::PerVertexData vertices[8] = { // -Z {{-1, -1, -1}, Color::Red()}, {{1, -1, -1}, Color::Yellow()}, {{1, 1, -1}, Color::Green()}, {{-1, 1, -1}, Color::Blue()}, // +Z {{-1, -1, 1}, Color::Green()}, {{1, -1, 1}, Color::Blue()}, {{1, 1, 1}, Color::Red()}, {{-1, 1, 1}, Color::Yellow()}, }; uint16_t indices[36] = { 1, 5, 2, 2, 5, 6, // +X 4, 0, 7, 7, 0, 3, // -X 4, 5, 0, 0, 5, 1, // +Y 3, 2, 7, 7, 2, 6, // -Y 5, 4, 6, 6, 4, 7, // +Z 0, 1, 3, 3, 1, 2, // -Z }; } cube; VertexBuffer vertex_buffer; { auto device_buffer = context->GetResourceAllocator()->CreateBufferWithCopy( reinterpret_cast<uint8_t*>(&cube), sizeof(cube)); vertex_buffer.vertex_buffer = { .buffer = device_buffer, .range = Range(offsetof(Cube, vertices), sizeof(Cube::vertices))}; vertex_buffer.index_buffer = { .buffer = device_buffer, .range = Range(offsetof(Cube, indices), sizeof(Cube::indices))}; vertex_buffer.vertex_count = 36; vertex_buffer.index_type = IndexType::k16bit; } const std::unique_ptr<const Sampler>& sampler = context->GetSamplerLibrary()->GetSampler({}); ASSERT_TRUE(sampler); Vector3 euler_angles; auto host_buffer = HostBuffer::Create(context->GetResourceAllocator()); SinglePassCallback callback = [&](RenderPass& pass) { static Degrees fov_y(60); static Scalar distance = 10; ImGui::Begin("Controls", nullptr, ImGuiWindowFlags_AlwaysAutoResize); ImGui::SliderFloat("Field of view", &fov_y.degrees, 0, 180); ImGui::SliderFloat("Camera distance", &distance, 0, 30); ImGui::End(); pass.SetCommandLabel("Perspective Cube"); pass.SetPipeline(pipeline); pass.SetVertexBuffer(vertex_buffer); VS::UniformBuffer uniforms; Scalar time = GetSecondsElapsed(); euler_angles = Vector3(0.19 * time, 0.7 * time, 0.43 * time); uniforms.mvp = Matrix::MakePerspective(fov_y, pass.GetRenderTargetSize(), 0, 10) * Matrix::MakeTranslation({0, 0, distance}) * Matrix::MakeRotationX(Radians(euler_angles.x)) * Matrix::MakeRotationY(Radians(euler_angles.y)) * Matrix::MakeRotationZ(Radians(euler_angles.z)); VS::BindUniformBuffer(pass, host_buffer->EmplaceUniform(uniforms)); host_buffer->Reset(); return pass.Draw().ok(); }; OpenPlaygroundHere(callback); } TEST_P(RendererTest, CanRenderMultiplePrimitives) { using VS = BoxFadeVertexShader; using FS = BoxFadeFragmentShader; auto context = GetContext(); ASSERT_TRUE(context); using BoxPipelineBuilder = PipelineBuilder<VS, FS>; auto desc = BoxPipelineBuilder::MakeDefaultPipelineDescriptor(*context); ASSERT_TRUE(desc.has_value()); desc->SetSampleCount(SampleCount::kCount4); desc->SetStencilAttachmentDescriptors(std::nullopt); auto box_pipeline = context->GetPipelineLibrary()->GetPipeline(std::move(desc)).Get(); ASSERT_TRUE(box_pipeline); // Vertex buffer. VertexBufferBuilder<VS::PerVertexData> vertex_builder; vertex_builder.SetLabel("Box"); vertex_builder.AddVertices({ {{100, 100, 0.0}, {0.0, 0.0}}, // 1 {{800, 100, 0.0}, {1.0, 0.0}}, // 2 {{800, 800, 0.0}, {1.0, 1.0}}, // 3 {{100, 100, 0.0}, {0.0, 0.0}}, // 1 {{800, 800, 0.0}, {1.0, 1.0}}, // 3 {{100, 800, 0.0}, {0.0, 1.0}}, // 4 }); auto vertex_buffer = vertex_builder.CreateVertexBuffer(*context->GetResourceAllocator()); ASSERT_TRUE(vertex_buffer); auto bridge = CreateTextureForFixture("bay_bridge.jpg"); auto boston = CreateTextureForFixture("boston.jpg"); ASSERT_TRUE(bridge && boston); const std::unique_ptr<const Sampler>& sampler = context->GetSamplerLibrary()->GetSampler({}); ASSERT_TRUE(sampler); auto host_buffer = HostBuffer::Create(context->GetResourceAllocator()); SinglePassCallback callback = [&](RenderPass& pass) { for (size_t i = 0; i < 1; i++) { for (size_t j = 0; j < 1; j++) { pass.SetCommandLabel("Box"); pass.SetPipeline(box_pipeline); pass.SetVertexBuffer(vertex_buffer); FS::FrameInfo frame_info; frame_info.current_time = GetSecondsElapsed(); frame_info.cursor_position = GetCursorPosition(); frame_info.window_size.x = GetWindowSize().width; frame_info.window_size.y = GetWindowSize().height; FS::BindFrameInfo(pass, host_buffer->EmplaceUniform(frame_info)); FS::BindContents1(pass, boston, sampler); FS::BindContents2(pass, bridge, sampler); VS::UniformBuffer uniforms; EXPECT_EQ(pass.GetOrthographicTransform(), Matrix::MakeOrthographic(pass.GetRenderTargetSize())); uniforms.mvp = pass.GetOrthographicTransform() * Matrix::MakeScale(GetContentScale()) * Matrix::MakeTranslation({i * 50.0f, j * 50.0f, 0.0f}); VS::BindUniformBuffer(pass, host_buffer->EmplaceUniform(uniforms)); if (!pass.Draw().ok()) { return false; } } } host_buffer->Reset(); return true; }; OpenPlaygroundHere(callback); } TEST_P(RendererTest, CanRenderToTexture) { using VS = BoxFadeVertexShader; using FS = BoxFadeFragmentShader; auto context = GetContext(); ASSERT_TRUE(context); using BoxPipelineBuilder = PipelineBuilder<VS, FS>; auto pipeline_desc = BoxPipelineBuilder::MakeDefaultPipelineDescriptor(*context); pipeline_desc->SetSampleCount(SampleCount::kCount1); pipeline_desc->ClearDepthAttachment(); pipeline_desc->SetStencilPixelFormat(PixelFormat::kS8UInt); ASSERT_TRUE(pipeline_desc.has_value()); auto box_pipeline = context->GetPipelineLibrary()->GetPipeline(pipeline_desc).Get(); ASSERT_TRUE(box_pipeline); auto host_buffer = HostBuffer::Create(context->GetResourceAllocator()); VertexBufferBuilder<VS::PerVertexData> vertex_builder; vertex_builder.SetLabel("Box"); vertex_builder.AddVertices({ {{100, 100, 0.0}, {0.0, 0.0}}, // 1 {{800, 100, 0.0}, {1.0, 0.0}}, // 2 {{800, 800, 0.0}, {1.0, 1.0}}, // 3 {{100, 100, 0.0}, {0.0, 0.0}}, // 1 {{800, 800, 0.0}, {1.0, 1.0}}, // 3 {{100, 800, 0.0}, {0.0, 1.0}}, // 4 }); auto vertex_buffer = vertex_builder.CreateVertexBuffer(*context->GetResourceAllocator()); ASSERT_TRUE(vertex_buffer); auto bridge = CreateTextureForFixture("bay_bridge.jpg"); auto boston = CreateTextureForFixture("boston.jpg"); ASSERT_TRUE(bridge && boston); const std::unique_ptr<const Sampler>& sampler = context->GetSamplerLibrary()->GetSampler({}); ASSERT_TRUE(sampler); std::shared_ptr<RenderPass> r2t_pass; auto cmd_buffer = context->CreateCommandBuffer(); ASSERT_TRUE(cmd_buffer); { ColorAttachment color0; color0.load_action = LoadAction::kClear; color0.store_action = StoreAction::kStore; TextureDescriptor texture_descriptor; ASSERT_NE(pipeline_desc->GetColorAttachmentDescriptor(0u), nullptr); texture_descriptor.format = pipeline_desc->GetColorAttachmentDescriptor(0u)->format; texture_descriptor.storage_mode = StorageMode::kHostVisible; texture_descriptor.size = {400, 400}; texture_descriptor.mip_count = 1u; texture_descriptor.usage = TextureUsage::kRenderTarget; color0.texture = context->GetResourceAllocator()->CreateTexture(texture_descriptor); ASSERT_TRUE(color0.IsValid()); color0.texture->SetLabel("r2t_target"); StencilAttachment stencil0; stencil0.load_action = LoadAction::kClear; stencil0.store_action = StoreAction::kDontCare; TextureDescriptor stencil_texture_desc; stencil_texture_desc.storage_mode = StorageMode::kDeviceTransient; stencil_texture_desc.size = texture_descriptor.size; stencil_texture_desc.format = PixelFormat::kS8UInt; stencil_texture_desc.usage = TextureUsage::kRenderTarget; stencil0.texture = context->GetResourceAllocator()->CreateTexture(stencil_texture_desc); RenderTarget r2t_desc; r2t_desc.SetColorAttachment(color0, 0u); r2t_desc.SetStencilAttachment(stencil0); r2t_pass = cmd_buffer->CreateRenderPass(r2t_desc); ASSERT_TRUE(r2t_pass && r2t_pass->IsValid()); } r2t_pass->SetCommandLabel("Box"); r2t_pass->SetPipeline(box_pipeline); r2t_pass->SetVertexBuffer(vertex_buffer); FS::FrameInfo frame_info; frame_info.current_time = GetSecondsElapsed(); frame_info.cursor_position = GetCursorPosition(); frame_info.window_size.x = GetWindowSize().width; frame_info.window_size.y = GetWindowSize().height; FS::BindFrameInfo(*r2t_pass, host_buffer->EmplaceUniform(frame_info)); FS::BindContents1(*r2t_pass, boston, sampler); FS::BindContents2(*r2t_pass, bridge, sampler); VS::UniformBuffer uniforms; uniforms.mvp = Matrix::MakeOrthographic(ISize{1024, 768}) * Matrix::MakeTranslation({50.0f, 50.0f, 0.0f}); VS::BindUniformBuffer(*r2t_pass, host_buffer->EmplaceUniform(uniforms)); ASSERT_TRUE(r2t_pass->Draw().ok()); ASSERT_TRUE(r2t_pass->EncodeCommands()); } TEST_P(RendererTest, CanRenderInstanced) { if (GetParam() == PlaygroundBackend::kOpenGLES) { GTEST_SKIP_("Instancing is not supported on OpenGL."); } using VS = InstancedDrawVertexShader; using FS = InstancedDrawFragmentShader; VertexBufferBuilder<VS::PerVertexData> builder; ASSERT_EQ(Tessellator::Result::kSuccess, Tessellator{}.Tessellate( PathBuilder{} .AddRect(Rect::MakeXYWH(10, 10, 100, 100)) .TakePath(FillType::kOdd), 1.0f, [&builder](const float* vertices, size_t vertices_count, const uint16_t* indices, size_t indices_count) { for (auto i = 0u; i < vertices_count * 2; i += 2) { VS::PerVertexData data; data.vtx = {vertices[i], vertices[i + 1]}; builder.AppendVertex(data); } for (auto i = 0u; i < indices_count; i++) { builder.AppendIndex(indices[i]); } return true; })); ASSERT_NE(GetContext(), nullptr); auto pipeline = GetContext() ->GetPipelineLibrary() ->GetPipeline(PipelineBuilder<VS, FS>::MakeDefaultPipelineDescriptor( *GetContext()) ->SetSampleCount(SampleCount::kCount4) .SetStencilAttachmentDescriptors(std::nullopt)) .Get(); ASSERT_TRUE(pipeline && pipeline->IsValid()); static constexpr size_t kInstancesCount = 5u; VS::InstanceInfo<kInstancesCount> instances; for (size_t i = 0; i < kInstancesCount; i++) { instances.colors[i] = Color::Random(); } auto host_buffer = HostBuffer::Create(GetContext()->GetResourceAllocator()); ASSERT_TRUE(OpenPlaygroundHere([&](RenderPass& pass) -> bool { pass.SetPipeline(pipeline); pass.SetCommandLabel("InstancedDraw"); VS::FrameInfo frame_info; EXPECT_EQ(pass.GetOrthographicTransform(), Matrix::MakeOrthographic(pass.GetRenderTargetSize())); frame_info.mvp = pass.GetOrthographicTransform() * Matrix::MakeScale(GetContentScale()); VS::BindFrameInfo(pass, host_buffer->EmplaceUniform(frame_info)); VS::BindInstanceInfo(pass, host_buffer->EmplaceStorageBuffer(instances)); pass.SetVertexBuffer(builder.CreateVertexBuffer(*host_buffer)); pass.SetInstanceCount(kInstancesCount); pass.Draw(); host_buffer->Reset(); return true; })); } TEST_P(RendererTest, CanBlitTextureToTexture) { if (GetBackend() == PlaygroundBackend::kOpenGLES) { GTEST_SKIP() << "Mipmap test shader not supported on GLES."; } auto context = GetContext(); ASSERT_TRUE(context); using VS = MipmapsVertexShader; using FS = MipmapsFragmentShader; auto desc = PipelineBuilder<VS, FS>::MakeDefaultPipelineDescriptor(*context); ASSERT_TRUE(desc.has_value()); desc->SetSampleCount(SampleCount::kCount4); desc->SetStencilAttachmentDescriptors(std::nullopt); auto mipmaps_pipeline = context->GetPipelineLibrary()->GetPipeline(std::move(desc)).Get(); ASSERT_TRUE(mipmaps_pipeline); TextureDescriptor texture_desc; texture_desc.storage_mode = StorageMode::kHostVisible; texture_desc.format = PixelFormat::kR8G8B8A8UNormInt; texture_desc.size = {800, 600}; texture_desc.mip_count = 1u; texture_desc.usage = TextureUsage::kRenderTarget | TextureUsage::kShaderRead; auto texture = context->GetResourceAllocator()->CreateTexture(texture_desc); ASSERT_TRUE(texture); auto bridge = CreateTextureForFixture("bay_bridge.jpg"); auto boston = CreateTextureForFixture("boston.jpg"); ASSERT_TRUE(bridge && boston); const std::unique_ptr<const Sampler>& sampler = context->GetSamplerLibrary()->GetSampler({}); ASSERT_TRUE(sampler); // Vertex buffer. VertexBufferBuilder<VS::PerVertexData> vertex_builder; vertex_builder.SetLabel("Box"); auto size = Point(boston->GetSize()); vertex_builder.AddVertices({ {{0, 0}, {0.0, 0.0}}, // 1 {{size.x, 0}, {1.0, 0.0}}, // 2 {{size.x, size.y}, {1.0, 1.0}}, // 3 {{0, 0}, {0.0, 0.0}}, // 1 {{size.x, size.y}, {1.0, 1.0}}, // 3 {{0, size.y}, {0.0, 1.0}}, // 4 }); auto vertex_buffer = vertex_builder.CreateVertexBuffer(*context->GetResourceAllocator()); ASSERT_TRUE(vertex_buffer); auto host_buffer = HostBuffer::Create(context->GetResourceAllocator()); Renderer::RenderCallback callback = [&](RenderTarget& render_target) { auto buffer = context->CreateCommandBuffer(); if (!buffer) { return false; } buffer->SetLabel("Playground Command Buffer"); { auto pass = buffer->CreateBlitPass(); if (!pass) { return false; } pass->SetLabel("Playground Blit Pass"); if (render_target.GetColorAttachments().empty()) { return false; } // Blit `bridge` to the top left corner of the texture. pass->AddCopy(bridge, texture); if (!pass->EncodeCommands(context->GetResourceAllocator())) { return false; } } { auto pass = buffer->CreateRenderPass(render_target); if (!pass) { return false; } pass->SetLabel("Playground Render Pass"); { pass->SetCommandLabel("Image"); pass->SetPipeline(mipmaps_pipeline); pass->SetVertexBuffer(vertex_buffer); VS::FrameInfo frame_info; EXPECT_EQ(pass->GetOrthographicTransform(), Matrix::MakeOrthographic(pass->GetRenderTargetSize())); frame_info.mvp = pass->GetOrthographicTransform() * Matrix::MakeScale(GetContentScale()); VS::BindFrameInfo(*pass, host_buffer->EmplaceUniform(frame_info)); FS::FragInfo frag_info; frag_info.lod = 0; FS::BindFragInfo(*pass, host_buffer->EmplaceUniform(frag_info)); auto& sampler = context->GetSamplerLibrary()->GetSampler({}); FS::BindTex(*pass, texture, sampler); pass->Draw(); } pass->EncodeCommands(); } if (!context->GetCommandQueue()->Submit({buffer}).ok()) { return false; } host_buffer->Reset(); return true; }; OpenPlaygroundHere(callback); } TEST_P(RendererTest, CanBlitTextureToBuffer) { if (GetBackend() == PlaygroundBackend::kOpenGLES) { GTEST_SKIP() << "Mipmap test shader not supported on GLES."; } auto context = GetContext(); ASSERT_TRUE(context); using VS = MipmapsVertexShader; using FS = MipmapsFragmentShader; auto desc = PipelineBuilder<VS, FS>::MakeDefaultPipelineDescriptor(*context); ASSERT_TRUE(desc.has_value()); desc->SetSampleCount(SampleCount::kCount4); desc->SetStencilAttachmentDescriptors(std::nullopt); auto mipmaps_pipeline = context->GetPipelineLibrary()->GetPipeline(std::move(desc)).Get(); ASSERT_TRUE(mipmaps_pipeline); auto bridge = CreateTextureForFixture("bay_bridge.jpg"); auto boston = CreateTextureForFixture("boston.jpg"); ASSERT_TRUE(bridge && boston); const std::unique_ptr<const Sampler>& sampler = context->GetSamplerLibrary()->GetSampler({}); ASSERT_TRUE(sampler); TextureDescriptor texture_desc; texture_desc.storage_mode = StorageMode::kHostVisible; texture_desc.format = PixelFormat::kR8G8B8A8UNormInt; texture_desc.size = bridge->GetTextureDescriptor().size; texture_desc.mip_count = 1u; texture_desc.usage = TextureUsage::kRenderTarget | TextureUsage::kShaderWrite | TextureUsage::kShaderRead; DeviceBufferDescriptor device_buffer_desc; device_buffer_desc.storage_mode = StorageMode::kHostVisible; device_buffer_desc.size = bridge->GetTextureDescriptor().GetByteSizeOfBaseMipLevel(); auto device_buffer = context->GetResourceAllocator()->CreateBuffer(device_buffer_desc); // Vertex buffer. VertexBufferBuilder<VS::PerVertexData> vertex_builder; vertex_builder.SetLabel("Box"); auto size = Point(boston->GetSize()); vertex_builder.AddVertices({ {{0, 0}, {0.0, 0.0}}, // 1 {{size.x, 0}, {1.0, 0.0}}, // 2 {{size.x, size.y}, {1.0, 1.0}}, // 3 {{0, 0}, {0.0, 0.0}}, // 1 {{size.x, size.y}, {1.0, 1.0}}, // 3 {{0, size.y}, {0.0, 1.0}}, // 4 }); auto vertex_buffer = vertex_builder.CreateVertexBuffer(*context->GetResourceAllocator()); ASSERT_TRUE(vertex_buffer); auto host_buffer = HostBuffer::Create(context->GetResourceAllocator()); Renderer::RenderCallback callback = [&](RenderTarget& render_target) { { auto buffer = context->CreateCommandBuffer(); if (!buffer) { return false; } buffer->SetLabel("Playground Command Buffer"); auto pass = buffer->CreateBlitPass(); if (!pass) { return false; } pass->SetLabel("Playground Blit Pass"); if (render_target.GetColorAttachments().empty()) { return false; } // Blit `bridge` to the top left corner of the texture. pass->AddCopy(bridge, device_buffer); pass->EncodeCommands(context->GetResourceAllocator()); if (!context->GetCommandQueue()->Submit({buffer}).ok()) { return false; } } { auto buffer = context->CreateCommandBuffer(); if (!buffer) { return false; } buffer->SetLabel("Playground Command Buffer"); auto pass = buffer->CreateRenderPass(render_target); if (!pass) { return false; } pass->SetLabel("Playground Render Pass"); { pass->SetCommandLabel("Image"); pass->SetPipeline(mipmaps_pipeline); pass->SetVertexBuffer(vertex_buffer); VS::FrameInfo frame_info; EXPECT_EQ(pass->GetOrthographicTransform(), Matrix::MakeOrthographic(pass->GetRenderTargetSize())); frame_info.mvp = pass->GetOrthographicTransform() * Matrix::MakeScale(GetContentScale()); VS::BindFrameInfo(*pass, host_buffer->EmplaceUniform(frame_info)); FS::FragInfo frag_info; frag_info.lod = 0; FS::BindFragInfo(*pass, host_buffer->EmplaceUniform(frag_info)); const std::unique_ptr<const Sampler>& sampler = context->GetSamplerLibrary()->GetSampler({}); auto buffer_view = DeviceBuffer::AsBufferView(device_buffer); auto texture = context->GetResourceAllocator()->CreateTexture(texture_desc); if (!texture->SetContents(device_buffer->OnGetContents(), buffer_view.range.length)) { VALIDATION_LOG << "Could not upload texture to device memory"; return false; } FS::BindTex(*pass, texture, sampler); pass->Draw().ok(); } pass->EncodeCommands(); if (!context->GetCommandQueue()->Submit({buffer}).ok()) { return false; } } host_buffer->Reset(); return true; }; OpenPlaygroundHere(callback); } TEST_P(RendererTest, CanGenerateMipmaps) { if (GetBackend() == PlaygroundBackend::kOpenGLES) { GTEST_SKIP() << "Mipmap test shader not supported on GLES."; } auto context = GetContext(); ASSERT_TRUE(context); using VS = MipmapsVertexShader; using FS = MipmapsFragmentShader; auto desc = PipelineBuilder<VS, FS>::MakeDefaultPipelineDescriptor(*context); ASSERT_TRUE(desc.has_value()); desc->SetSampleCount(SampleCount::kCount4); desc->SetStencilAttachmentDescriptors(std::nullopt); auto mipmaps_pipeline = context->GetPipelineLibrary()->GetPipeline(std::move(desc)).Get(); ASSERT_TRUE(mipmaps_pipeline); auto boston = CreateTextureForFixture("boston.jpg", true); ASSERT_TRUE(boston); // Vertex buffer. VertexBufferBuilder<VS::PerVertexData> vertex_builder; vertex_builder.SetLabel("Box"); auto size = Point(boston->GetSize()); vertex_builder.AddVertices({ {{0, 0}, {0.0, 0.0}}, // 1 {{size.x, 0}, {1.0, 0.0}}, // 2 {{size.x, size.y}, {1.0, 1.0}}, // 3 {{0, 0}, {0.0, 0.0}}, // 1 {{size.x, size.y}, {1.0, 1.0}}, // 3 {{0, size.y}, {0.0, 1.0}}, // 4 }); auto vertex_buffer = vertex_builder.CreateVertexBuffer(*context->GetResourceAllocator()); ASSERT_TRUE(vertex_buffer); bool first_frame = true; auto host_buffer = HostBuffer::Create(context->GetResourceAllocator()); Renderer::RenderCallback callback = [&](RenderTarget& render_target) { const char* mip_filter_names[] = {"Nearest", "Linear"}; const MipFilter mip_filters[] = {MipFilter::kNearest, MipFilter::kLinear}; const char* min_filter_names[] = {"Nearest", "Linear"}; const MinMagFilter min_filters[] = {MinMagFilter::kNearest, MinMagFilter::kLinear}; // UI state. static int selected_mip_filter = 1; static int selected_min_filter = 0; static float lod = 4.5; ImGui::Begin("Controls", nullptr, ImGuiWindowFlags_AlwaysAutoResize); ImGui::Combo("Mip filter", &selected_mip_filter, mip_filter_names, sizeof(mip_filter_names) / sizeof(char*)); ImGui::Combo("Min filter", &selected_min_filter, min_filter_names, sizeof(min_filter_names) / sizeof(char*)); ImGui::SliderFloat("LOD", &lod, 0, boston->GetMipCount() - 1); ImGui::End(); auto buffer = context->CreateCommandBuffer(); if (!buffer) { return false; } buffer->SetLabel("Playground Command Buffer"); if (first_frame) { auto pass = buffer->CreateBlitPass(); if (!pass) { return false; } pass->SetLabel("Playground Blit Pass"); pass->GenerateMipmap(boston, "Boston Mipmap"); pass->EncodeCommands(context->GetResourceAllocator()); } first_frame = false; { auto pass = buffer->CreateRenderPass(render_target); if (!pass) { return false; } pass->SetLabel("Playground Render Pass"); { pass->SetCommandLabel("Image LOD"); pass->SetPipeline(mipmaps_pipeline); pass->SetVertexBuffer(vertex_buffer); VS::FrameInfo frame_info; EXPECT_EQ(pass->GetOrthographicTransform(), Matrix::MakeOrthographic(pass->GetRenderTargetSize())); frame_info.mvp = pass->GetOrthographicTransform() * Matrix::MakeScale(GetContentScale()); VS::BindFrameInfo(*pass, host_buffer->EmplaceUniform(frame_info)); FS::FragInfo frag_info; frag_info.lod = lod; FS::BindFragInfo(*pass, host_buffer->EmplaceUniform(frag_info)); SamplerDescriptor sampler_desc; sampler_desc.mip_filter = mip_filters[selected_mip_filter]; sampler_desc.min_filter = min_filters[selected_min_filter]; const std::unique_ptr<const Sampler>& sampler = context->GetSamplerLibrary()->GetSampler(sampler_desc); FS::BindTex(*pass, boston, sampler); pass->Draw(); } pass->EncodeCommands(); } if (!context->GetCommandQueue()->Submit({buffer}).ok()) { return false; } host_buffer->Reset(); return true; }; OpenPlaygroundHere(callback); } TEST_P(RendererTest, TheImpeller) { using VS = ImpellerVertexShader; using FS = ImpellerFragmentShader; auto context = GetContext(); auto pipeline_descriptor = PipelineBuilder<VS, FS>::MakeDefaultPipelineDescriptor(*context); ASSERT_TRUE(pipeline_descriptor.has_value()); pipeline_descriptor->SetSampleCount(SampleCount::kCount4); pipeline_descriptor->SetStencilAttachmentDescriptors(std::nullopt); auto pipeline = context->GetPipelineLibrary()->GetPipeline(pipeline_descriptor).Get(); ASSERT_TRUE(pipeline && pipeline->IsValid()); auto blue_noise = CreateTextureForFixture("blue_noise.png"); SamplerDescriptor noise_sampler_desc; noise_sampler_desc.width_address_mode = SamplerAddressMode::kRepeat; noise_sampler_desc.height_address_mode = SamplerAddressMode::kRepeat; const std::unique_ptr<const Sampler>& noise_sampler = context->GetSamplerLibrary()->GetSampler(noise_sampler_desc); auto cube_map = CreateTextureCubeForFixture( {"table_mountain_px.png", "table_mountain_nx.png", "table_mountain_py.png", "table_mountain_ny.png", "table_mountain_pz.png", "table_mountain_nz.png"}); const std::unique_ptr<const Sampler>& cube_map_sampler = context->GetSamplerLibrary()->GetSampler({}); auto host_buffer = HostBuffer::Create(context->GetResourceAllocator()); SinglePassCallback callback = [&](RenderPass& pass) { auto size = pass.GetRenderTargetSize(); pass.SetPipeline(pipeline); pass.SetCommandLabel("Impeller SDF scene"); VertexBufferBuilder<VS::PerVertexData> builder; builder.AddVertices({{Point()}, {Point(0, size.height)}, {Point(size.width, 0)}, {Point(size.width, 0)}, {Point(0, size.height)}, {Point(size.width, size.height)}}); pass.SetVertexBuffer(builder.CreateVertexBuffer(*host_buffer)); VS::FrameInfo frame_info; EXPECT_EQ(pass.GetOrthographicTransform(), Matrix::MakeOrthographic(size)); frame_info.mvp = pass.GetOrthographicTransform(); VS::BindFrameInfo(pass, host_buffer->EmplaceUniform(frame_info)); FS::FragInfo fs_uniform; fs_uniform.texture_size = Point(size); fs_uniform.time = GetSecondsElapsed(); FS::BindFragInfo(pass, host_buffer->EmplaceUniform(fs_uniform)); FS::BindBlueNoise(pass, blue_noise, noise_sampler); FS::BindCubeMap(pass, cube_map, cube_map_sampler); pass.Draw().ok(); host_buffer->Reset(); return true; }; OpenPlaygroundHere(callback); } TEST_P(RendererTest, ArrayUniforms) { using VS = ArrayVertexShader; using FS = ArrayFragmentShader; auto context = GetContext(); auto pipeline_descriptor = PipelineBuilder<VS, FS>::MakeDefaultPipelineDescriptor(*context); ASSERT_TRUE(pipeline_descriptor.has_value()); pipeline_descriptor->SetSampleCount(SampleCount::kCount4); pipeline_descriptor->SetStencilAttachmentDescriptors(std::nullopt); auto pipeline = context->GetPipelineLibrary()->GetPipeline(pipeline_descriptor).Get(); ASSERT_TRUE(pipeline && pipeline->IsValid()); auto host_buffer = HostBuffer::Create(context->GetResourceAllocator()); SinglePassCallback callback = [&](RenderPass& pass) { auto size = pass.GetRenderTargetSize(); pass.SetPipeline(pipeline); pass.SetCommandLabel("Google Dots"); VertexBufferBuilder<VS::PerVertexData> builder; builder.AddVertices({{Point()}, {Point(0, size.height)}, {Point(size.width, 0)}, {Point(size.width, 0)}, {Point(0, size.height)}, {Point(size.width, size.height)}}); pass.SetVertexBuffer(builder.CreateVertexBuffer(*host_buffer)); VS::FrameInfo frame_info; EXPECT_EQ(pass.GetOrthographicTransform(), Matrix::MakeOrthographic(size)); frame_info.mvp = pass.GetOrthographicTransform() * Matrix::MakeScale(GetContentScale()); VS::BindFrameInfo(pass, host_buffer->EmplaceUniform(frame_info)); auto time = GetSecondsElapsed(); auto y_pos = [&time](float x) { return 400 + 10 * std::cos(time * 5 + x / 6); }; FS::FragInfo fs_uniform = { .circle_positions = {Point(430, y_pos(0)), Point(480, y_pos(1)), Point(530, y_pos(2)), Point(580, y_pos(3))}, .colors = {Color::MakeRGBA8(66, 133, 244, 255), Color::MakeRGBA8(219, 68, 55, 255), Color::MakeRGBA8(244, 180, 0, 255), Color::MakeRGBA8(15, 157, 88, 255)}, }; FS::BindFragInfo(pass, host_buffer->EmplaceUniform(fs_uniform)); pass.Draw(); host_buffer->Reset(); return true; }; OpenPlaygroundHere(callback); } TEST_P(RendererTest, InactiveUniforms) { using VS = InactiveUniformsVertexShader; using FS = InactiveUniformsFragmentShader; auto context = GetContext(); auto pipeline_descriptor = PipelineBuilder<VS, FS>::MakeDefaultPipelineDescriptor(*context); ASSERT_TRUE(pipeline_descriptor.has_value()); pipeline_descriptor->SetSampleCount(SampleCount::kCount4); pipeline_descriptor->SetStencilAttachmentDescriptors(std::nullopt); auto pipeline = context->GetPipelineLibrary()->GetPipeline(pipeline_descriptor).Get(); ASSERT_TRUE(pipeline && pipeline->IsValid()); auto host_buffer = HostBuffer::Create(context->GetResourceAllocator()); SinglePassCallback callback = [&](RenderPass& pass) { auto size = pass.GetRenderTargetSize(); pass.SetPipeline(pipeline); pass.SetCommandLabel("Inactive Uniform"); VertexBufferBuilder<VS::PerVertexData> builder; builder.AddVertices({{Point()}, {Point(0, size.height)}, {Point(size.width, 0)}, {Point(size.width, 0)}, {Point(0, size.height)}, {Point(size.width, size.height)}}); pass.SetVertexBuffer(builder.CreateVertexBuffer(*host_buffer)); VS::FrameInfo frame_info; EXPECT_EQ(pass.GetOrthographicTransform(), Matrix::MakeOrthographic(size)); frame_info.mvp = pass.GetOrthographicTransform() * Matrix::MakeScale(GetContentScale()); VS::BindFrameInfo(pass, host_buffer->EmplaceUniform(frame_info)); FS::FragInfo fs_uniform = {.unused_color = Color::Red(), .color = Color::Green()}; FS::BindFragInfo(pass, host_buffer->EmplaceUniform(fs_uniform)); pass.Draw().ok(); host_buffer->Reset(); return true; }; OpenPlaygroundHere(callback); } TEST_P(RendererTest, CanCreateCPUBackedTexture) { if (GetParam() == PlaygroundBackend::kOpenGLES) { GTEST_SKIP_("CPU backed textures are not supported on OpenGLES."); } auto context = GetContext(); auto allocator = context->GetResourceAllocator(); size_t dimension = 2; do { ISize size(dimension, dimension); TextureDescriptor texture_descriptor; texture_descriptor.storage_mode = StorageMode::kHostVisible; texture_descriptor.format = PixelFormat::kR8G8B8A8UNormInt; texture_descriptor.size = size; auto row_bytes = std::max(static_cast<uint16_t>(size.width * 4), allocator->MinimumBytesPerRow(texture_descriptor.format)); auto buffer_size = size.height * row_bytes; DeviceBufferDescriptor buffer_descriptor; buffer_descriptor.storage_mode = StorageMode::kHostVisible; buffer_descriptor.size = buffer_size; auto buffer = allocator->CreateBuffer(buffer_descriptor); ASSERT_TRUE(buffer); auto texture = buffer->AsTexture(*allocator, texture_descriptor, row_bytes); ASSERT_TRUE(texture); ASSERT_TRUE(texture->IsValid()); dimension *= 2; } while (dimension <= 8192); } TEST_P(RendererTest, DefaultIndexSize) { using VS = BoxFadeVertexShader; // Default to 16bit index buffer size, as this is a reasonable default and // supported on all backends without extensions. VertexBufferBuilder<VS::PerVertexData> vertex_builder; vertex_builder.AppendIndex(0u); ASSERT_EQ(vertex_builder.GetIndexType(), IndexType::k16bit); } TEST_P(RendererTest, DefaultIndexBehavior) { using VS = BoxFadeVertexShader; // Do not create any index buffer if no indices were provided. VertexBufferBuilder<VS::PerVertexData> vertex_builder; ASSERT_EQ(vertex_builder.GetIndexType(), IndexType::kNone); } TEST_P(RendererTest, VertexBufferBuilder) { // Does not create index buffer if one is provided. using VS = BoxFadeVertexShader; VertexBufferBuilder<VS::PerVertexData> vertex_builder; vertex_builder.SetLabel("Box"); vertex_builder.AddVertices({ {{100, 100, 0.0}, {0.0, 0.0}}, // 1 {{800, 100, 0.0}, {1.0, 0.0}}, // 2 {{800, 800, 0.0}, {1.0, 1.0}}, // 3 {{100, 800, 0.0}, {0.0, 1.0}}, // 4 }); vertex_builder.AppendIndex(0); vertex_builder.AppendIndex(1); vertex_builder.AppendIndex(2); vertex_builder.AppendIndex(1); vertex_builder.AppendIndex(2); vertex_builder.AppendIndex(3); ASSERT_EQ(vertex_builder.GetIndexCount(), 6u); ASSERT_EQ(vertex_builder.GetVertexCount(), 4u); } class CompareFunctionUIData { public: CompareFunctionUIData() { labels_.push_back("Never"); functions_.push_back(CompareFunction::kNever); labels_.push_back("Always"); functions_.push_back(CompareFunction::kAlways); labels_.push_back("Less"); functions_.push_back(CompareFunction::kLess); labels_.push_back("Equal"); functions_.push_back(CompareFunction::kEqual); labels_.push_back("LessEqual"); functions_.push_back(CompareFunction::kLessEqual); labels_.push_back("Greater"); functions_.push_back(CompareFunction::kGreater); labels_.push_back("NotEqual"); functions_.push_back(CompareFunction::kNotEqual); labels_.push_back("GreaterEqual"); functions_.push_back(CompareFunction::kGreaterEqual); assert(labels_.size() == functions_.size()); } const char* const* labels() const { return &labels_[0]; } int size() const { return labels_.size(); } int IndexOf(CompareFunction func) const { for (size_t i = 0; i < functions_.size(); i++) { if (functions_[i] == func) { return i; } } FML_UNREACHABLE(); return -1; } CompareFunction FunctionOf(int index) const { return functions_[index]; } private: std::vector<const char*> labels_; std::vector<CompareFunction> functions_; }; static const CompareFunctionUIData& CompareFunctionUI() { static CompareFunctionUIData data; return data; } TEST_P(RendererTest, StencilMask) { using VS = BoxFadeVertexShader; using FS = BoxFadeFragmentShader; auto context = GetContext(); ASSERT_TRUE(context); using BoxFadePipelineBuilder = PipelineBuilder<VS, FS>; auto desc = BoxFadePipelineBuilder::MakeDefaultPipelineDescriptor(*context); ASSERT_TRUE(desc.has_value()); // Vertex buffer. VertexBufferBuilder<VS::PerVertexData> vertex_builder; vertex_builder.SetLabel("Box"); vertex_builder.AddVertices({ {{100, 100, 0.0}, {0.0, 0.0}}, // 1 {{800, 100, 0.0}, {1.0, 0.0}}, // 2 {{800, 800, 0.0}, {1.0, 1.0}}, // 3 {{100, 100, 0.0}, {0.0, 0.0}}, // 1 {{800, 800, 0.0}, {1.0, 1.0}}, // 3 {{100, 800, 0.0}, {0.0, 1.0}}, // 4 }); auto vertex_buffer = vertex_builder.CreateVertexBuffer(*context->GetResourceAllocator()); ASSERT_TRUE(vertex_buffer); desc->SetSampleCount(SampleCount::kCount4); desc->SetStencilAttachmentDescriptors(std::nullopt); auto bridge = CreateTextureForFixture("bay_bridge.jpg"); auto boston = CreateTextureForFixture("boston.jpg"); ASSERT_TRUE(bridge && boston); const std::unique_ptr<const Sampler>& sampler = context->GetSamplerLibrary()->GetSampler({}); ASSERT_TRUE(sampler); static bool mirror = false; static int stencil_reference_write = 0xFF; static int stencil_reference_read = 0x1; std::vector<uint8_t> stencil_contents; static int last_stencil_contents_reference_value = 0; static int current_front_compare = CompareFunctionUI().IndexOf(CompareFunction::kLessEqual); static int current_back_compare = CompareFunctionUI().IndexOf(CompareFunction::kLessEqual); auto host_buffer = HostBuffer::Create(context->GetResourceAllocator()); Renderer::RenderCallback callback = [&](RenderTarget& render_target) { auto buffer = context->CreateCommandBuffer(); if (!buffer) { return false; } buffer->SetLabel("Playground Command Buffer"); { // Configure the stencil attachment for the test. RenderTarget::AttachmentConfig stencil_config; stencil_config.load_action = LoadAction::kLoad; stencil_config.store_action = StoreAction::kDontCare; stencil_config.storage_mode = StorageMode::kHostVisible; render_target.SetupDepthStencilAttachments( *context, *context->GetResourceAllocator(), render_target.GetRenderTargetSize(), true, "stencil", stencil_config); // Fill the stencil buffer with an checkerboard pattern. const auto target_width = render_target.GetRenderTargetSize().width; const auto target_height = render_target.GetRenderTargetSize().height; const size_t target_size = target_width * target_height; if (stencil_contents.size() != target_size || last_stencil_contents_reference_value != stencil_reference_write) { stencil_contents.resize(target_size); last_stencil_contents_reference_value = stencil_reference_write; for (int y = 0; y < target_height; y++) { for (int x = 0; x < target_width; x++) { const auto index = y * target_width + x; const auto kCheckSize = 64; const auto value = (((y / kCheckSize) + (x / kCheckSize)) % 2 == 0) * stencil_reference_write; stencil_contents[index] = value; } } } if (!render_target.GetStencilAttachment()->texture->SetContents( stencil_contents.data(), stencil_contents.size(), 0, false)) { VALIDATION_LOG << "Could not upload stencil contents to device memory"; return false; } auto pass = buffer->CreateRenderPass(render_target); if (!pass) { return false; } pass->SetLabel("Stencil Buffer"); ImGui::Begin("Controls", nullptr, ImGuiWindowFlags_AlwaysAutoResize); ImGui::SliderInt("Stencil Write Value", &stencil_reference_write, 0, 0xFF); ImGui::SliderInt("Stencil Compare Value", &stencil_reference_read, 0, 0xFF); ImGui::Checkbox("Back face mode", &mirror); ImGui::ListBox("Front face compare function", &current_front_compare, CompareFunctionUI().labels(), CompareFunctionUI().size()); ImGui::ListBox("Back face compare function", &current_back_compare, CompareFunctionUI().labels(), CompareFunctionUI().size()); ImGui::End(); StencilAttachmentDescriptor front; front.stencil_compare = CompareFunctionUI().FunctionOf(current_front_compare); StencilAttachmentDescriptor back; back.stencil_compare = CompareFunctionUI().FunctionOf(current_back_compare); desc->SetStencilAttachmentDescriptors(front, back); auto pipeline = context->GetPipelineLibrary()->GetPipeline(desc).Get(); assert(pipeline && pipeline->IsValid()); pass->SetCommandLabel("Box"); pass->SetPipeline(pipeline); pass->SetStencilReference(stencil_reference_read); pass->SetVertexBuffer(vertex_buffer); VS::UniformBuffer uniforms; EXPECT_EQ(pass->GetOrthographicTransform(), Matrix::MakeOrthographic(pass->GetRenderTargetSize())); uniforms.mvp = pass->GetOrthographicTransform() * Matrix::MakeScale(GetContentScale()); if (mirror) { uniforms.mvp = Matrix::MakeScale(Vector2(-1, 1)) * uniforms.mvp; } VS::BindUniformBuffer(*pass, host_buffer->EmplaceUniform(uniforms)); FS::FrameInfo frame_info; frame_info.current_time = GetSecondsElapsed(); frame_info.cursor_position = GetCursorPosition(); frame_info.window_size.x = GetWindowSize().width; frame_info.window_size.y = GetWindowSize().height; FS::BindFrameInfo(*pass, host_buffer->EmplaceUniform(frame_info)); FS::BindContents1(*pass, boston, sampler); FS::BindContents2(*pass, bridge, sampler); if (!pass->Draw().ok()) { return false; } pass->EncodeCommands(); } if (!context->GetCommandQueue()->Submit({buffer}).ok()) { return false; } host_buffer->Reset(); return true; }; OpenPlaygroundHere(callback); } TEST_P(RendererTest, CanLookupRenderTargetProperties) { auto context = GetContext(); auto cmd_buffer = context->CreateCommandBuffer(); auto render_target_cache = std::make_shared<RenderTargetAllocator>( GetContext()->GetResourceAllocator()); auto render_target = render_target_cache->CreateOffscreen( *context, {100, 100}, /*mip_count=*/1); auto render_pass = cmd_buffer->CreateRenderPass(render_target); EXPECT_EQ(render_pass->GetSampleCount(), render_target.GetSampleCount()); EXPECT_EQ(render_pass->GetRenderTargetPixelFormat(), render_target.GetRenderTargetPixelFormat()); EXPECT_EQ(render_pass->HasStencilAttachment(), render_target.GetStencilAttachment().has_value()); EXPECT_EQ(render_pass->GetRenderTargetSize(), render_target.GetRenderTargetSize()); render_pass->EncodeCommands(); } TEST_P(RendererTest, RenderTargetCreateOffscreenMSAASetsDefaultDepthStencilFormat) { auto context = GetContext(); auto render_target_cache = std::make_shared<RenderTargetAllocator>( GetContext()->GetResourceAllocator()); RenderTarget render_target = render_target_cache->CreateOffscreenMSAA( *context, {100, 100}, /*mip_count=*/1); EXPECT_EQ(render_target.GetDepthAttachment() ->texture->GetTextureDescriptor() .format, GetContext()->GetCapabilities()->GetDefaultDepthStencilFormat()); } template <class VertexShader, class FragmentShader> std::shared_ptr<Pipeline<PipelineDescriptor>> CreateDefaultPipeline( const std::shared_ptr<Context>& context) { using TexturePipelineBuilder = PipelineBuilder<VertexShader, FragmentShader>; auto pipeline_desc = TexturePipelineBuilder::MakeDefaultPipelineDescriptor(*context); if (!pipeline_desc.has_value()) { return nullptr; } pipeline_desc->SetSampleCount(SampleCount::kCount4); pipeline_desc->SetStencilAttachmentDescriptors(std::nullopt); auto pipeline = context->GetPipelineLibrary()->GetPipeline(pipeline_desc).Get(); if (!pipeline || !pipeline->IsValid()) { return nullptr; } return pipeline; } TEST_P(RendererTest, CanSepiaToneWithSubpasses) { // The GLES framebuffer fetch implementation currently does not support this. // TODO(chinmaygarde): revisit after the GLES framebuffer fetch capabilities // are clarified. if (GetParam() == PlaygroundBackend::kOpenGLES) { GTEST_SKIP_("Not supported on GLES."); } // Define shader types using TextureVS = TextureVertexShader; using TextureFS = TextureFragmentShader; using SepiaVS = SepiaVertexShader; using SepiaFS = SepiaFragmentShader; auto context = GetContext(); ASSERT_TRUE(context); if (!context->GetCapabilities()->SupportsFramebufferFetch()) { GTEST_SKIP_( "This test uses framebuffer fetch and the backend doesn't support it."); return; } // Create pipelines. auto texture_pipeline = CreateDefaultPipeline<TextureVS, TextureFS>(context); auto sepia_pipeline = CreateDefaultPipeline<SepiaVS, SepiaFS>(context); ASSERT_TRUE(texture_pipeline); ASSERT_TRUE(sepia_pipeline); // Vertex buffer builders. VertexBufferBuilder<TextureVS::PerVertexData> texture_vtx_builder; texture_vtx_builder.AddVertices({ {{100, 100, 0.0}, {0.0, 0.0}}, // 1 {{800, 100, 0.0}, {1.0, 0.0}}, // 2 {{800, 800, 0.0}, {1.0, 1.0}}, // 3 {{100, 100, 0.0}, {0.0, 0.0}}, // 1 {{800, 800, 0.0}, {1.0, 1.0}}, // 3 {{100, 800, 0.0}, {0.0, 1.0}}, // 4 }); VertexBufferBuilder<SepiaVS::PerVertexData> sepia_vtx_builder; sepia_vtx_builder.AddVertices({ {{100, 100, 0.0}}, // 1 {{800, 100, 0.0}}, // 2 {{800, 800, 0.0}}, // 3 {{100, 100, 0.0}}, // 1 {{800, 800, 0.0}}, // 3 {{100, 800, 0.0}}, // 4 }); auto boston = CreateTextureForFixture("boston.jpg"); ASSERT_TRUE(boston); const auto& sampler = context->GetSamplerLibrary()->GetSampler({}); ASSERT_TRUE(sampler); SinglePassCallback callback = [&](RenderPass& pass) { auto buffer = HostBuffer::Create(context->GetResourceAllocator()); // Draw the texture. { pass.SetPipeline(texture_pipeline); pass.SetVertexBuffer(texture_vtx_builder.CreateVertexBuffer( *context->GetResourceAllocator())); TextureVS::UniformBuffer uniforms; uniforms.mvp = Matrix::MakeOrthographic(pass.GetRenderTargetSize()) * Matrix::MakeScale(GetContentScale()); TextureVS::BindUniformBuffer(pass, buffer->EmplaceUniform(uniforms)); TextureFS::BindTextureContents(pass, boston, sampler); if (!pass.Draw().ok()) { return false; } } // Draw the sepia toner. { pass.SetPipeline(sepia_pipeline); pass.SetVertexBuffer(sepia_vtx_builder.CreateVertexBuffer( *context->GetResourceAllocator())); SepiaVS::UniformBuffer uniforms; uniforms.mvp = Matrix::MakeOrthographic(pass.GetRenderTargetSize()) * Matrix::MakeScale(GetContentScale()); SepiaVS::BindUniformBuffer(pass, buffer->EmplaceUniform(uniforms)); if (!pass.Draw().ok()) { return false; } } return true; }; OpenPlaygroundHere(callback); } TEST_P(RendererTest, CanSepiaToneThenSwizzleWithSubpasses) { // The GLES framebuffer fetch implementation currently does not support this. // TODO(chinmaygarde): revisit after the GLES framebuffer fetch capabilities // are clarified. if (GetParam() == PlaygroundBackend::kOpenGLES) { GTEST_SKIP_("Not supported on GLES."); } // Define shader types using TextureVS = TextureVertexShader; using TextureFS = TextureFragmentShader; using SwizzleVS = SepiaVertexShader; using SwizzleFS = SwizzleFragmentShader; using SepiaVS = SepiaVertexShader; using SepiaFS = SepiaFragmentShader; auto context = GetContext(); ASSERT_TRUE(context); if (!context->GetCapabilities()->SupportsFramebufferFetch()) { GTEST_SKIP_( "This test uses framebuffer fetch and the backend doesn't support it."); return; } // Create pipelines. auto texture_pipeline = CreateDefaultPipeline<TextureVS, TextureFS>(context); auto swizzle_pipeline = CreateDefaultPipeline<SwizzleVS, SwizzleFS>(context); auto sepia_pipeline = CreateDefaultPipeline<SepiaVS, SepiaFS>(context); ASSERT_TRUE(texture_pipeline); ASSERT_TRUE(swizzle_pipeline); ASSERT_TRUE(sepia_pipeline); // Vertex buffer builders. VertexBufferBuilder<TextureVS::PerVertexData> texture_vtx_builder; texture_vtx_builder.AddVertices({ {{100, 100, 0.0}, {0.0, 0.0}}, // 1 {{800, 100, 0.0}, {1.0, 0.0}}, // 2 {{800, 800, 0.0}, {1.0, 1.0}}, // 3 {{100, 100, 0.0}, {0.0, 0.0}}, // 1 {{800, 800, 0.0}, {1.0, 1.0}}, // 3 {{100, 800, 0.0}, {0.0, 1.0}}, // 4 }); VertexBufferBuilder<SepiaVS::PerVertexData> sepia_vtx_builder; sepia_vtx_builder.AddVertices({ {{100, 100, 0.0}}, // 1 {{800, 100, 0.0}}, // 2 {{800, 800, 0.0}}, // 3 {{100, 100, 0.0}}, // 1 {{800, 800, 0.0}}, // 3 {{100, 800, 0.0}}, // 4 }); auto boston = CreateTextureForFixture("boston.jpg"); ASSERT_TRUE(boston); const auto& sampler = context->GetSamplerLibrary()->GetSampler({}); ASSERT_TRUE(sampler); SinglePassCallback callback = [&](RenderPass& pass) { auto buffer = HostBuffer::Create(context->GetResourceAllocator()); // Draw the texture. { pass.SetPipeline(texture_pipeline); pass.SetVertexBuffer(texture_vtx_builder.CreateVertexBuffer( *context->GetResourceAllocator())); TextureVS::UniformBuffer uniforms; uniforms.mvp = Matrix::MakeOrthographic(pass.GetRenderTargetSize()) * Matrix::MakeScale(GetContentScale()); TextureVS::BindUniformBuffer(pass, buffer->EmplaceUniform(uniforms)); TextureFS::BindTextureContents(pass, boston, sampler); if (!pass.Draw().ok()) { return false; } } // Draw the sepia toner. { pass.SetPipeline(sepia_pipeline); pass.SetVertexBuffer(sepia_vtx_builder.CreateVertexBuffer( *context->GetResourceAllocator())); SepiaVS::UniformBuffer uniforms; uniforms.mvp = Matrix::MakeOrthographic(pass.GetRenderTargetSize()) * Matrix::MakeScale(GetContentScale()); SepiaVS::BindUniformBuffer(pass, buffer->EmplaceUniform(uniforms)); if (!pass.Draw().ok()) { return false; } } // Draw the swizzle. { pass.SetPipeline(swizzle_pipeline); pass.SetVertexBuffer(sepia_vtx_builder.CreateVertexBuffer( *context->GetResourceAllocator())); SwizzleVS::UniformBuffer uniforms; uniforms.mvp = Matrix::MakeOrthographic(pass.GetRenderTargetSize()) * Matrix::MakeScale(GetContentScale()); SwizzleVS::BindUniformBuffer(pass, buffer->EmplaceUniform(uniforms)); if (!pass.Draw().ok()) { return false; } } return true; }; OpenPlaygroundHere(callback); } } // namespace testing } // namespace impeller // NOLINTEND(bugprone-unchecked-optional-access)
engine/impeller/renderer/renderer_unittests.cc/0
{ "file_path": "engine/impeller/renderer/renderer_unittests.cc", "repo_id": "engine", "token_count": 22574 }
215
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_RENDERER_TEXTURE_MIPMAP_H_ #define FLUTTER_IMPELLER_RENDERER_TEXTURE_MIPMAP_H_ #include "flutter/fml/status.h" #include "impeller/core/texture.h" #include "impeller/renderer/command_buffer.h" #include "impeller/renderer/context.h" namespace impeller { /// Adds a blit command to the render pass. [[nodiscard]] fml::Status AddMipmapGeneration( const std::shared_ptr<CommandBuffer>& command_buffer, const std::shared_ptr<Context>& context, const std::shared_ptr<Texture>& texture); } // namespace impeller #endif // FLUTTER_IMPELLER_RENDERER_TEXTURE_MIPMAP_H_
engine/impeller/renderer/texture_mipmap.h/0
{ "file_path": "engine/impeller/renderer/texture_mipmap.h", "repo_id": "engine", "token_count": 279 }
216
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "impeller/scene/animation/animation.h" #include <algorithm> #include <cstring> #include <memory> #include <vector> #include "impeller/geometry/quaternion.h" #include "impeller/scene/importer/scene_flatbuffers.h" #include "impeller/scene/node.h" namespace impeller { namespace scene { std::shared_ptr<Animation> Animation::MakeFromFlatbuffer( const fb::Animation& animation, const std::vector<std::shared_ptr<Node>>& scene_nodes) { auto result = std::shared_ptr<Animation>(new Animation()); result->name_ = animation.name()->str(); for (auto channel : *animation.channels()) { if (channel->node() < 0 || static_cast<size_t>(channel->node()) >= scene_nodes.size() || !channel->timeline()) { continue; } Animation::Channel out_channel; out_channel.bind_target.node_name = scene_nodes[channel->node()]->GetName(); auto* times = channel->timeline(); std::vector<Scalar> out_times; out_times.resize(channel->timeline()->size()); std::copy(times->begin(), times->end(), out_times.begin()); // TODO(bdero): Why are the entries in the keyframe value arrays not // contiguous in the flatbuffer? We should be able to get rid // of the subloops below and just memcpy instead. switch (channel->keyframes_type()) { case fb::Keyframes::TranslationKeyframes: { out_channel.bind_target.property = Animation::Property::kTranslation; auto* keyframes = channel->keyframes_as_TranslationKeyframes(); if (!keyframes->values()) { continue; } std::vector<Vector3> out_values; out_values.resize(keyframes->values()->size()); for (size_t value_i = 0; value_i < keyframes->values()->size(); value_i++) { auto val = (*keyframes->values())[value_i]; out_values[value_i] = Vector3(val->x(), val->y(), val->z()); } out_channel.resolver = PropertyResolver::MakeTranslationTimeline( std::move(out_times), std::move(out_values)); break; } case fb::Keyframes::RotationKeyframes: { out_channel.bind_target.property = Animation::Property::kRotation; auto* keyframes = channel->keyframes_as_RotationKeyframes(); if (!keyframes->values()) { continue; } std::vector<Quaternion> out_values; out_values.resize(keyframes->values()->size()); for (size_t value_i = 0; value_i < keyframes->values()->size(); value_i++) { auto val = (*keyframes->values())[value_i]; out_values[value_i] = Quaternion(val->x(), val->y(), val->z(), val->w()); } out_channel.resolver = PropertyResolver::MakeRotationTimeline( std::move(out_times), std::move(out_values)); break; } case fb::Keyframes::ScaleKeyframes: { out_channel.bind_target.property = Animation::Property::kScale; auto* keyframes = channel->keyframes_as_ScaleKeyframes(); if (!keyframes->values()) { continue; } std::vector<Vector3> out_values; out_values.resize(keyframes->values()->size()); for (size_t value_i = 0; value_i < keyframes->values()->size(); value_i++) { auto val = (*keyframes->values())[value_i]; out_values[value_i] = Vector3(val->x(), val->y(), val->z()); } out_channel.resolver = PropertyResolver::MakeScaleTimeline( std::move(out_times), std::move(out_values)); break; } case fb::Keyframes::NONE: continue; } result->end_time_ = std::max(result->end_time_, out_channel.resolver->GetEndTime()); result->channels_.push_back(std::move(out_channel)); } return result; } Animation::Animation() = default; Animation::~Animation() = default; const std::string& Animation::GetName() const { return name_; } const std::vector<Animation::Channel>& Animation::GetChannels() const { return channels_; } SecondsF Animation::GetEndTime() const { return end_time_; } } // namespace scene } // namespace impeller
engine/impeller/scene/animation/animation.cc/0
{ "file_path": "engine/impeller/scene/animation/animation.cc", "repo_id": "engine", "token_count": 1757 }
217
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_SCENE_IMPORTER_IMPORTER_H_ #define FLUTTER_IMPELLER_SCENE_IMPORTER_IMPORTER_H_ #include <array> #include <memory> #include "flutter/fml/mapping.h" #include "impeller/scene/importer/scene_flatbuffers.h" namespace impeller { namespace scene { namespace importer { bool ParseGLTF(const fml::Mapping& source_mapping, fb::SceneT& out_scene); } } // namespace scene } // namespace impeller #endif // FLUTTER_IMPELLER_SCENE_IMPORTER_IMPORTER_H_
engine/impeller/scene/importer/importer.h/0
{ "file_path": "engine/impeller/scene/importer/importer.h", "repo_id": "engine", "token_count": 235 }
218
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_SCENE_PIPELINE_KEY_H_ #define FLUTTER_IMPELLER_SCENE_PIPELINE_KEY_H_ #include "flutter/fml/hash_combine.h" namespace impeller { namespace scene { enum class GeometryType { kUnskinned = 0, kSkinned = 1, kLastType = kSkinned, }; enum class MaterialType { kUnlit = 0, kLastType = kUnlit, }; struct PipelineKey { GeometryType geometry_type = GeometryType::kUnskinned; MaterialType material_type = MaterialType::kUnlit; struct Hash { constexpr std::size_t operator()(const PipelineKey& o) const { return fml::HashCombine(o.geometry_type, o.material_type); } }; struct Equal { constexpr bool operator()(const PipelineKey& lhs, const PipelineKey& rhs) const { return lhs.geometry_type == rhs.geometry_type && lhs.material_type == rhs.material_type; } }; }; } // namespace scene } // namespace impeller #endif // FLUTTER_IMPELLER_SCENE_PIPELINE_KEY_H_
engine/impeller/scene/pipeline_key.h/0
{ "file_path": "engine/impeller/scene/pipeline_key.h", "repo_id": "engine", "token_count": 445 }
219
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. namespace impeller.fb; enum RenderingBackend:byte { kOpenGLES, kVulkan, kMetal, } table ShaderArchiveBlob { rendering_backend: RenderingBackend; mapping: [ubyte]; } table MultiArchShaderArchive { // We could have just as easily used the existing `ShaderArchive` table here. // However, those tables aren't used by Metal. items: [ShaderArchiveBlob]; } root_type MultiArchShaderArchive; file_identifier "MARC";
engine/impeller/shader_archive/multi_arch_shader_archive.fbs/0
{ "file_path": "engine/impeller/shader_archive/multi_arch_shader_archive.fbs", "repo_id": "engine", "token_count": 192 }
220
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_IMPELLER_TESSELLATOR_C_TESSELLATOR_H_ #define FLUTTER_IMPELLER_TESSELLATOR_C_TESSELLATOR_H_ #include <cstdint> #include "impeller/geometry/path_builder.h" #include "impeller/tessellator/tessellator.h" #ifdef _WIN32 #define IMPELLER_API __declspec(dllexport) #else #define IMPELLER_API __attribute__((visibility("default"))) #endif extern "C" { namespace impeller { struct IMPELLER_API Vertices { float* points; uint32_t length; }; IMPELLER_API PathBuilder* CreatePathBuilder(); IMPELLER_API void DestroyPathBuilder(PathBuilder* builder); IMPELLER_API void MoveTo(PathBuilder* builder, Scalar x, Scalar y); IMPELLER_API void LineTo(PathBuilder* builder, Scalar x, Scalar y); IMPELLER_API void CubicTo(PathBuilder* builder, Scalar x1, Scalar y1, Scalar x2, Scalar y2, Scalar x3, Scalar y3); IMPELLER_API void Close(PathBuilder* builder); IMPELLER_API struct Vertices* Tessellate(PathBuilder* builder, int fill_type, Scalar tolerance); IMPELLER_API void DestroyVertices(Vertices* vertices); } // namespace impeller } #endif // FLUTTER_IMPELLER_TESSELLATOR_C_TESSELLATOR_H_
engine/impeller/tessellator/c/tessellator.h/0
{ "file_path": "engine/impeller/tessellator/c/tessellator.h", "repo_id": "engine", "token_count": 710 }
221
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "impeller/toolkit/android/surface_control.h" #include "impeller/base/validation.h" #include "impeller/toolkit/android/surface_transaction.h" namespace impeller::android { SurfaceControl::SurfaceControl(ANativeWindow* window, const char* debug_name) { if (window == nullptr) { VALIDATION_LOG << "Parent window of surface was null."; return; } if (debug_name == nullptr) { debug_name = "Impeller Layer"; } control_.reset( GetProcTable().ASurfaceControl_createFromWindow(window, debug_name)); } SurfaceControl::~SurfaceControl() { if (IsValid() && !RemoveFromParent()) { VALIDATION_LOG << "Surface control could not be removed from its parent. " "Expect a leak."; } } bool SurfaceControl::IsValid() const { return control_.is_valid(); } ASurfaceControl* SurfaceControl::GetHandle() const { return control_.get(); } bool SurfaceControl::RemoveFromParent() const { if (!IsValid()) { return false; } SurfaceTransaction transaction; if (!transaction.SetParent(*this, nullptr)) { return false; } return transaction.Apply(); } bool SurfaceControl::IsAvailableOnPlatform() { return GetProcTable().IsValid() && GetProcTable().ASurfaceControl_createFromWindow.IsAvailable(); } } // namespace impeller::android
engine/impeller/toolkit/android/surface_control.cc/0
{ "file_path": "engine/impeller/toolkit/android/surface_control.cc", "repo_id": "engine", "token_count": 493 }
222
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "impeller/toolkit/egl/surface.h" namespace impeller { namespace egl { Surface::Surface(EGLDisplay display, EGLSurface surface) : display_(display), surface_(surface) {} Surface::~Surface() { if (surface_ != EGL_NO_SURFACE) { if (::eglDestroySurface(display_, surface_) != EGL_TRUE) { IMPELLER_LOG_EGL_ERROR; } } } const EGLSurface& Surface::GetHandle() const { return surface_; } bool Surface::IsValid() const { return surface_ != EGL_NO_SURFACE; } bool Surface::Present() const { const auto result = ::eglSwapBuffers(display_, surface_) == EGL_TRUE; if (!result) { IMPELLER_LOG_EGL_ERROR; } return result; } } // namespace egl } // namespace impeller
engine/impeller/toolkit/egl/surface.cc/0
{ "file_path": "engine/impeller/toolkit/egl/surface.cc", "repo_id": "engine", "token_count": 320 }
223
# Copyright 2013 The Flutter Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import("//flutter/impeller/tools/impeller.gni") impeller_component("typographer_skia_backend") { sources = [ "glyph_atlas_context_skia.cc", "glyph_atlas_context_skia.h", "text_frame_skia.cc", "text_frame_skia.h", "typeface_skia.cc", "typeface_skia.h", "typographer_context_skia.cc", "typographer_context_skia.h", ] public_deps = [ "//flutter/impeller/typographer", "//flutter/skia", ] }
engine/impeller/typographer/backends/skia/BUILD.gn/0
{ "file_path": "engine/impeller/typographer/backends/skia/BUILD.gn", "repo_id": "engine", "token_count": 249 }
224
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "impeller/typographer/backends/stb/typographer_context_stb.h" #include <numeric> #include <utility> #include "flutter/fml/logging.h" #include "flutter/fml/trace_event.h" #include "impeller/base/allocation.h" #include "impeller/core/allocator.h" #include "impeller/typographer/backends/stb/glyph_atlas_context_stb.h" #include "impeller/typographer/font_glyph_pair.h" #include "typeface_stb.h" #define DISABLE_COLOR_FONT_SUPPORT 1 #ifdef DISABLE_COLOR_FONT_SUPPORT constexpr auto kColorFontBitsPerPixel = 1; #else constexpr auto kColorFontBitsPerPixel = 4; #endif namespace impeller { constexpr size_t kPadding = 1; std::unique_ptr<TypographerContext> TypographerContextSTB::Make() { return std::make_unique<TypographerContextSTB>(); } TypographerContextSTB::TypographerContextSTB() : TypographerContext() {} TypographerContextSTB::~TypographerContextSTB() = default; std::shared_ptr<GlyphAtlasContext> TypographerContextSTB::CreateGlyphAtlasContext() const { return std::make_shared<GlyphAtlasContextSTB>(); } // Function returns the count of "remaining pairs" not packed into rect of given // size. static size_t PairsFitInAtlasOfSize( const std::vector<FontGlyphPair>& pairs, const ISize& atlas_size, std::vector<Rect>& glyph_positions, const std::shared_ptr<RectanglePacker>& rect_packer) { if (atlas_size.IsEmpty()) { return false; } glyph_positions.clear(); glyph_positions.reserve(pairs.size()); size_t i = 0; for (auto it = pairs.begin(); it != pairs.end(); ++i, ++it) { const auto& pair = *it; const Font& font = pair.scaled_font.font; // We downcast to the correct typeface type to access `stb` specific // methods. std::shared_ptr<TypefaceSTB> typeface_stb = std::reinterpret_pointer_cast<TypefaceSTB>(font.GetTypeface()); // Conversion factor to scale font size in Points to pixels. // Note this assumes typical DPI. float text_size_pixels = font.GetMetrics().point_size * TypefaceSTB::kPointsToPixels; ISize glyph_size; { int x0 = 0, y0 = 0, x1 = 0, y1 = 0; // NOTE: We increase the size of the glyph by one pixel in all dimensions // to allow us to cut out padding later. float scale = stbtt_ScaleForPixelHeight(typeface_stb->GetFontInfo(), text_size_pixels); stbtt_GetGlyphBitmapBox(typeface_stb->GetFontInfo(), pair.glyph.index, scale, scale, &x0, &y0, &x1, &y1); glyph_size = ISize(x1 - x0, y1 - y0); } IPoint16 location_in_atlas; if (!rect_packer->addRect(glyph_size.width + kPadding, // glyph_size.height + kPadding, // &location_in_atlas // )) { return pairs.size() - i; } glyph_positions.emplace_back(Rect::MakeXYWH(location_in_atlas.x(), // location_in_atlas.y(), // glyph_size.width, // glyph_size.height // )); } return 0; } static bool CanAppendToExistingAtlas( const std::shared_ptr<GlyphAtlas>& atlas, const std::vector<FontGlyphPair>& extra_pairs, std::vector<Rect>& glyph_positions, ISize atlas_size, const std::shared_ptr<RectanglePacker>& rect_packer) { TRACE_EVENT0("impeller", __FUNCTION__); if (!rect_packer || atlas_size.IsEmpty()) { return false; } // We assume that all existing glyphs will fit. After all, they fit before. // The glyph_positions only contains the values for the additional glyphs // from extra_pairs. FML_DCHECK(glyph_positions.size() == 0); glyph_positions.reserve(extra_pairs.size()); for (size_t i = 0; i < extra_pairs.size(); i++) { const FontGlyphPair& pair = extra_pairs[i]; const Font& font = pair.scaled_font.font; // We downcast to the correct typeface type to access `stb` specific methods std::shared_ptr<TypefaceSTB> typeface_stb = std::reinterpret_pointer_cast<TypefaceSTB>(font.GetTypeface()); // Conversion factor to scale font size in Points to pixels. // Note this assumes typical DPI. float text_size_pixels = font.GetMetrics().point_size * TypefaceSTB::kPointsToPixels; ISize glyph_size; { int x0 = 0, y0 = 0, x1 = 0, y1 = 0; // NOTE: We increase the size of the glyph by one pixel in all dimensions // to allow us to cut out padding later. float scale_y = stbtt_ScaleForPixelHeight(typeface_stb->GetFontInfo(), text_size_pixels); float scale_x = scale_y; stbtt_GetGlyphBitmapBox(typeface_stb->GetFontInfo(), pair.glyph.index, scale_x, scale_y, &x0, &y0, &x1, &y1); glyph_size = ISize(x1 - x0, y1 - y0); } IPoint16 location_in_atlas; if (!rect_packer->addRect(glyph_size.width + kPadding, // glyph_size.height + kPadding, // &location_in_atlas // )) { return false; } glyph_positions.emplace_back(Rect::MakeXYWH(location_in_atlas.x(), // location_in_atlas.y(), // glyph_size.width, // glyph_size.height // )); } return true; } static ISize OptimumAtlasSizeForFontGlyphPairs( const std::vector<FontGlyphPair>& pairs, std::vector<Rect>& glyph_positions, const std::shared_ptr<GlyphAtlasContext>& atlas_context, GlyphAtlas::Type type, const ISize& max_texture_size) { static constexpr auto kMinAtlasSize = 8u; static constexpr auto kMinAlphaBitmapSize = 1024u; TRACE_EVENT0("impeller", __FUNCTION__); ISize current_size = type == GlyphAtlas::Type::kAlphaBitmap ? ISize(kMinAlphaBitmapSize, kMinAlphaBitmapSize) : ISize(kMinAtlasSize, kMinAtlasSize); size_t total_pairs = pairs.size() + 1; do { auto rect_packer = std::shared_ptr<RectanglePacker>( RectanglePacker::Factory(current_size.width, current_size.height)); auto remaining_pairs = PairsFitInAtlasOfSize(pairs, current_size, glyph_positions, rect_packer); if (remaining_pairs == 0) { atlas_context->UpdateRectPacker(rect_packer); return current_size; } else if (remaining_pairs < std::ceil(total_pairs / 2)) { current_size = ISize::MakeWH( std::max(current_size.width, current_size.height), Allocation::NextPowerOfTwoSize( std::min(current_size.width, current_size.height) + 1)); } else { current_size = ISize::MakeWH( Allocation::NextPowerOfTwoSize(current_size.width + 1), Allocation::NextPowerOfTwoSize(current_size.height + 1)); } } while (current_size.width <= max_texture_size.width && current_size.height <= max_texture_size.height); return ISize{0, 0}; } static void DrawGlyph(BitmapSTB* bitmap, const ScaledFont& scaled_font, const Glyph& glyph, const Rect& location, bool has_color) { const auto& metrics = scaled_font.font.GetMetrics(); const impeller::Font& font = scaled_font.font; auto typeface = font.GetTypeface(); // We downcast to the correct typeface type to access `stb` specific methods std::shared_ptr<TypefaceSTB> typeface_stb = std::reinterpret_pointer_cast<TypefaceSTB>(typeface); // Conversion factor to scale font size in Points to pixels. // Note this assumes typical DPI. float text_size_pixels = metrics.point_size * TypefaceSTB::kPointsToPixels; float scale_y = stbtt_ScaleForPixelHeight(typeface_stb->GetFontInfo(), text_size_pixels); float scale_x = scale_y; auto output = bitmap->GetPixelAddress({static_cast<size_t>(location.GetX()), static_cast<size_t>(location.GetY())}); // For Alpha and Signed Distance field bitmaps we can use STB to draw the // Glyph in place if (!has_color || DISABLE_COLOR_FONT_SUPPORT) { stbtt_MakeGlyphBitmap(typeface_stb->GetFontInfo(), output, location.GetWidth() - kPadding, location.GetHeight() - kPadding, bitmap->GetRowBytes(), scale_x, scale_y, glyph.index); } else { // But for color bitmaps we need to get the glyph pixels and then carry all // channels into the atlas bitmap. This may not be performant but I'm unsure // of any other approach currently. int glyph_bitmap_width = 0; int glyph_bitmap_height = 0; int glyph_bitmap_xoff = 0; int glyph_bitmap_yoff = 0; auto glyph_pixels = stbtt_GetGlyphBitmap( typeface_stb->GetFontInfo(), scale_x, scale_y, glyph.index, &glyph_bitmap_width, &glyph_bitmap_height, &glyph_bitmap_xoff, &glyph_bitmap_yoff); uint8_t* write_pos = output; for (auto y = 0; y < glyph_bitmap_height; ++y) { for (auto x = 0; x < glyph_bitmap_width; ++x) { // Color bitmaps write as White (i.e. what is 0 in an alpha bitmap is // 255 in a color bitmap) But not alpha. Alpha still carries // transparency info in the normal way. // There's some issue with color fonts, in that if the pixel color is // nonzero, the alpha is ignored during rendering. That is, partially // (or fully) transparent pixels with nonzero color are rendered as // fully opaque. uint8_t a = glyph_pixels[x + y * glyph_bitmap_width]; uint8_t c = 255 - a; // Red channel *write_pos = c; write_pos++; // Green channel *write_pos = c; write_pos++; // Blue channel *write_pos = c; write_pos++; // Alpha channel *write_pos = a; write_pos++; } // next row write_pos = output + (y * bitmap->GetRowBytes()); } stbtt_FreeBitmap(glyph_pixels, nullptr); } } static bool UpdateAtlasBitmap(const GlyphAtlas& atlas, const std::shared_ptr<BitmapSTB>& bitmap, const std::vector<FontGlyphPair>& new_pairs) { TRACE_EVENT0("impeller", __FUNCTION__); FML_DCHECK(bitmap != nullptr); bool has_color = atlas.GetType() == GlyphAtlas::Type::kColorBitmap; for (const FontGlyphPair& pair : new_pairs) { auto pos = atlas.FindFontGlyphBounds(pair); if (!pos.has_value()) { continue; } DrawGlyph(bitmap.get(), pair.scaled_font, pair.glyph, pos.value(), has_color); } return true; } static std::shared_ptr<BitmapSTB> CreateAtlasBitmap(const GlyphAtlas& atlas, const ISize& atlas_size) { TRACE_EVENT0("impeller", __FUNCTION__); size_t bytes_per_pixel = 1; if (atlas.GetType() == GlyphAtlas::Type::kColorBitmap && !DISABLE_COLOR_FONT_SUPPORT) { bytes_per_pixel = kColorFontBitsPerPixel; } auto bitmap = std::make_shared<BitmapSTB>(atlas_size.width, atlas_size.height, bytes_per_pixel); bool has_color = atlas.GetType() == GlyphAtlas::Type::kColorBitmap; atlas.IterateGlyphs([&bitmap, has_color](const ScaledFont& scaled_font, const Glyph& glyph, const Rect& location) -> bool { DrawGlyph(bitmap.get(), scaled_font, glyph, location, has_color); return true; }); return bitmap; } // static bool UpdateGlyphTextureAtlas(std::shared_ptr<SkBitmap> bitmap, static bool UpdateGlyphTextureAtlas(std::shared_ptr<BitmapSTB>& bitmap, const std::shared_ptr<Texture>& texture) { TRACE_EVENT0("impeller", __FUNCTION__); FML_DCHECK(bitmap != nullptr); auto texture_descriptor = texture->GetTextureDescriptor(); auto mapping = std::make_shared<fml::NonOwnedMapping>( reinterpret_cast<const uint8_t*>(bitmap->GetPixels()), // data texture_descriptor.GetByteSizeOfBaseMipLevel() // size // As the bitmap is static in this module I believe we don't need to // specify a release proc. ); return texture->SetContents(mapping); } static std::shared_ptr<Texture> UploadGlyphTextureAtlas( const std::shared_ptr<Allocator>& allocator, std::shared_ptr<BitmapSTB>& bitmap, const ISize& atlas_size, PixelFormat format) { TRACE_EVENT0("impeller", __FUNCTION__); if (!allocator) { return nullptr; } FML_DCHECK(bitmap != nullptr); TextureDescriptor texture_descriptor; texture_descriptor.storage_mode = StorageMode::kHostVisible; texture_descriptor.format = format; texture_descriptor.size = atlas_size; if (bitmap->GetRowBytes() * bitmap->GetHeight() != texture_descriptor.GetByteSizeOfBaseMipLevel()) { return nullptr; } auto texture = allocator->CreateTexture(texture_descriptor); if (!texture || !texture->IsValid()) { return nullptr; } texture->SetLabel("GlyphAtlas"); auto mapping = std::make_shared<fml::NonOwnedMapping>( reinterpret_cast<const uint8_t*>(bitmap->GetPixels()), // data texture_descriptor.GetByteSizeOfBaseMipLevel() // size // As the bitmap is static in this module I believe we don't need to // specify a release proc. ); if (!texture->SetContents(mapping)) { return nullptr; } return texture; } std::shared_ptr<GlyphAtlas> TypographerContextSTB::CreateGlyphAtlas( Context& context, GlyphAtlas::Type type, const std::shared_ptr<GlyphAtlasContext>& atlas_context, const FontGlyphMap& font_glyph_map) const { TRACE_EVENT0("impeller", __FUNCTION__); if (!IsValid()) { return nullptr; } auto& atlas_context_stb = GlyphAtlasContextSTB::Cast(*atlas_context); std::shared_ptr<GlyphAtlas> last_atlas = atlas_context->GetGlyphAtlas(); if (font_glyph_map.empty()) { return last_atlas; } // --------------------------------------------------------------------------- // Step 1: Determine if the atlas type and font glyph pairs are compatible // with the current atlas and reuse if possible. // --------------------------------------------------------------------------- std::vector<FontGlyphPair> new_glyphs; for (const auto& font_value : font_glyph_map) { const ScaledFont& scaled_font = font_value.first; const FontGlyphAtlas* font_glyph_atlas = last_atlas->GetFontGlyphAtlas(scaled_font.font, scaled_font.scale); if (font_glyph_atlas) { for (const Glyph& glyph : font_value.second) { if (!font_glyph_atlas->FindGlyphBounds(glyph)) { new_glyphs.emplace_back(scaled_font, glyph); } } } else { for (const Glyph& glyph : font_value.second) { new_glyphs.emplace_back(scaled_font, glyph); } } } if (last_atlas->GetType() == type && new_glyphs.size() == 0) { return last_atlas; } // --------------------------------------------------------------------------- // Step 2: Determine if the additional missing glyphs can be appended to the // existing bitmap without recreating the atlas. This requires that // the type is identical. // --------------------------------------------------------------------------- std::vector<Rect> glyph_positions; if (last_atlas->GetType() == type && CanAppendToExistingAtlas(last_atlas, new_glyphs, glyph_positions, atlas_context->GetAtlasSize(), atlas_context->GetRectPacker())) { // The old bitmap will be reused and only the additional glyphs will be // added. // --------------------------------------------------------------------------- // Step 3a: Record the positions in the glyph atlas of the newly added // glyphs. // --------------------------------------------------------------------------- for (size_t i = 0, count = glyph_positions.size(); i < count; i++) { last_atlas->AddTypefaceGlyphPosition(new_glyphs[i], glyph_positions[i]); } // --------------------------------------------------------------------------- // Step 4a: Draw new font-glyph pairs into the existing bitmap. // --------------------------------------------------------------------------- // auto bitmap = atlas_context->GetBitmap(); auto bitmap = atlas_context_stb.GetBitmap(); if (!UpdateAtlasBitmap(*last_atlas, bitmap, new_glyphs)) { return nullptr; } // --------------------------------------------------------------------------- // Step 5a: Update the existing texture with the updated bitmap. // --------------------------------------------------------------------------- if (!UpdateGlyphTextureAtlas(bitmap, last_atlas->GetTexture())) { return nullptr; } return last_atlas; } // A new glyph atlas must be created. // --------------------------------------------------------------------------- // Step 3b: Get the optimum size of the texture atlas. // --------------------------------------------------------------------------- std::vector<FontGlyphPair> font_glyph_pairs; font_glyph_pairs.reserve(std::accumulate( font_glyph_map.begin(), font_glyph_map.end(), 0, [](const int a, const auto& b) { return a + b.second.size(); })); for (const auto& font_value : font_glyph_map) { const ScaledFont& scaled_font = font_value.first; for (const Glyph& glyph : font_value.second) { font_glyph_pairs.push_back({scaled_font, glyph}); } } auto glyph_atlas = std::make_shared<GlyphAtlas>(type); auto atlas_size = OptimumAtlasSizeForFontGlyphPairs( font_glyph_pairs, // glyph_positions, // atlas_context, // type, // context.GetResourceAllocator()->GetMaxTextureSizeSupported() // ); atlas_context->UpdateGlyphAtlas(glyph_atlas, atlas_size); if (atlas_size.IsEmpty()) { return nullptr; } // --------------------------------------------------------------------------- // Step 4b: Find location of font-glyph pairs in the atlas. We have this from // the last step. So no need to do create another rect packer. But just do a // sanity check of counts. This could also be just an assertion as only a // construction issue would cause such a failure. // --------------------------------------------------------------------------- if (glyph_positions.size() != font_glyph_pairs.size()) { return nullptr; } // --------------------------------------------------------------------------- // Step 5b: Record the positions in the glyph atlas. // --------------------------------------------------------------------------- { size_t i = 0; for (auto it = font_glyph_pairs.begin(); it != font_glyph_pairs.end(); ++i, ++it) { glyph_atlas->AddTypefaceGlyphPosition(*it, glyph_positions[i]); } } // --------------------------------------------------------------------------- // Step 6b: Draw font-glyph pairs in the correct spot in the atlas. // --------------------------------------------------------------------------- auto bitmap = CreateAtlasBitmap(*glyph_atlas, atlas_size); if (!bitmap) { return nullptr; } atlas_context_stb.UpdateBitmap(bitmap); // --------------------------------------------------------------------------- // Step 7b: Upload the atlas as a texture. // --------------------------------------------------------------------------- PixelFormat format; switch (type) { case GlyphAtlas::Type::kAlphaBitmap: format = context.GetCapabilities()->GetDefaultGlyphAtlasFormat(); break; case GlyphAtlas::Type::kColorBitmap: format = DISABLE_COLOR_FONT_SUPPORT ? context.GetCapabilities()->GetDefaultGlyphAtlasFormat() : PixelFormat::kR8G8B8A8UNormInt; break; } auto texture = UploadGlyphTextureAtlas(context.GetResourceAllocator(), bitmap, atlas_size, format); if (!texture) { return nullptr; } // --------------------------------------------------------------------------- // Step 8b: Record the texture in the glyph atlas. // --------------------------------------------------------------------------- glyph_atlas->SetTexture(std::move(texture)); return glyph_atlas; } } // namespace impeller
engine/impeller/typographer/backends/stb/typographer_context_stb.cc/0
{ "file_path": "engine/impeller/typographer/backends/stb/typographer_context_stb.cc", "repo_id": "engine", "token_count": 8644 }
225
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "impeller/typographer/text_run.h" namespace impeller { TextRun::TextRun(const Font& font) : font_(font) { if (!font_.IsValid()) { return; } is_valid_ = true; } TextRun::TextRun(const Font& font, std::vector<GlyphPosition>& glyphs) : font_(font), glyphs_(std::move(glyphs)) { if (!font_.IsValid()) { return; } is_valid_ = true; } TextRun::~TextRun() = default; bool TextRun::AddGlyph(Glyph glyph, Point position) { glyphs_.emplace_back(GlyphPosition{glyph, position}); return true; } bool TextRun::IsValid() const { return is_valid_; } const std::vector<TextRun::GlyphPosition>& TextRun::GetGlyphPositions() const { return glyphs_; } size_t TextRun::GetGlyphCount() const { return glyphs_.size(); } const Font& TextRun::GetFont() const { return font_; } } // namespace impeller
engine/impeller/typographer/text_run.cc/0
{ "file_path": "engine/impeller/typographer/text_run.cc", "repo_id": "engine", "token_count": 364 }
226
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/fml/build_config.h" #ifndef FLUTTER_LIB_GPU_EXPORT_H_ #define FLUTTER_LIB_GPU_EXPORT_H_ #if FML_OS_WIN #define FLUTTER_GPU_EXPORT __declspec(dllexport) #else // FML_OS_WIN #define FLUTTER_GPU_EXPORT __attribute__((visibility("default"))) #endif // FML_OS_WIN #endif // FLUTTER_LIB_GPU_EXPORT_H_
engine/lib/gpu/export.h/0
{ "file_path": "engine/lib/gpu/export.h", "repo_id": "engine", "token_count": 180 }
227
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. import 'dart:ffi'; import 'dart:nativewrappers'; /// This is a simple test fuction. @Native<Int32 Function()>(symbol: 'InternalFlutterGpuTestProc') external int testProc(); /// A single parameter callback. typedef Callback<T> = void Function(T result); /// This is a test callback that follows the same pattern as much of dart:ui -- /// immediately returning an error string and supplying an asynchronous result /// via callback later. @Native<Handle Function(Handle)>( symbol: 'InternalFlutterGpuTestProcWithCallback') external String? testProcWithCallback(Callback<int> callback); /// This is a test of NativeFieldWrapperClass1, which is commonly used in /// dart:ui to enable Dart to dictate the lifetime of a C counterpart. base class FlutterGpuTestClass extends NativeFieldWrapperClass1 { /// Default constructor for the test class FlutterGpuTestClass() { _constructor(); } /// This "constructor" is used to instantiate and wrap the C counterpart. /// This is a common pattern in dart:ui. @Native<Void Function(Handle)>(symbol: 'InternalFlutterGpuTestClass_Create') external void _constructor(); /// This is a method that will supply a pointer to the C data counterpart when /// calling the function @Native<Void Function(Pointer<Void>, Int)>( symbol: 'InternalFlutterGpuTestClass_Method') external void coolMethod(int something); }
engine/lib/gpu/lib/src/smoketest.dart/0
{ "file_path": "engine/lib/gpu/lib/src/smoketest.dart", "repo_id": "engine", "token_count": 430 }
228
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/lib/io/dart_io.h" #include "flutter/fml/logging.h" #include "third_party/dart/runtime/include/bin/dart_io_api.h" #include "third_party/dart/runtime/include/dart_api.h" #include "third_party/tonic/converter/dart_converter.h" #include "third_party/tonic/logging/dart_error.h" using tonic::CheckAndHandleError; using tonic::ToDart; namespace flutter { void DartIO::InitForIsolate(bool may_insecurely_connect_to_all_domains, const std::string& domain_network_policy) { Dart_Handle io_lib = Dart_LookupLibrary(ToDart("dart:io")); Dart_Handle result = Dart_SetNativeResolver(io_lib, dart::bin::LookupIONative, dart::bin::LookupIONativeSymbol); FML_CHECK(!CheckAndHandleError(result)); Dart_Handle ui_lib = Dart_LookupLibrary(ToDart("dart:ui")); Dart_Handle dart_validate_args[1]; dart_validate_args[0] = ToDart(may_insecurely_connect_to_all_domains); Dart_Handle http_connection_hook_closure = Dart_Invoke(ui_lib, ToDart("_getHttpConnectionHookClosure"), /*number_of_arguments=*/1, dart_validate_args); FML_CHECK(!CheckAndHandleError(http_connection_hook_closure)); Dart_Handle http_lib = Dart_LookupLibrary(ToDart("dart:_http")); FML_CHECK(!CheckAndHandleError(http_lib)); Dart_Handle set_http_connection_hook_result = Dart_SetField( http_lib, ToDart("_httpConnectionHook"), http_connection_hook_closure); FML_CHECK(!CheckAndHandleError(set_http_connection_hook_result)); } } // namespace flutter
engine/lib/io/dart_io.cc/0
{ "file_path": "engine/lib/io/dart_io.cc", "repo_id": "engine", "token_count": 674 }
229
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_LIB_UI_COMPOSITING_SCENE_BUILDER_H_ #define FLUTTER_LIB_UI_COMPOSITING_SCENE_BUILDER_H_ #include <cstdint> #include <memory> #include <vector> #include "flutter/flow/layers/container_layer.h" #include "flutter/lib/ui/compositing/scene.h" #include "flutter/lib/ui/dart_wrapper.h" #include "flutter/lib/ui/painting/color_filter.h" #include "flutter/lib/ui/painting/engine_layer.h" #include "flutter/lib/ui/painting/image_filter.h" #include "flutter/lib/ui/painting/path.h" #include "flutter/lib/ui/painting/picture.h" #include "flutter/lib/ui/painting/rrect.h" #include "flutter/lib/ui/painting/shader.h" #include "third_party/tonic/typed_data/typed_list.h" namespace flutter { class SceneBuilder : public RefCountedDartWrappable<SceneBuilder> { DEFINE_WRAPPERTYPEINFO(); FML_FRIEND_MAKE_REF_COUNTED(SceneBuilder); public: static void Create(Dart_Handle wrapper) { UIDartState::ThrowIfUIOperationsProhibited(); auto res = fml::MakeRefCounted<SceneBuilder>(); res->AssociateWithDartWrapper(wrapper); } ~SceneBuilder() override; void pushTransformHandle(Dart_Handle layer_handle, Dart_Handle matrix4_handle, const fml::RefPtr<EngineLayer>& oldLayer) { tonic::Float64List matrix4(matrix4_handle); pushTransform(layer_handle, matrix4, oldLayer); } void pushTransform(Dart_Handle layer_handle, tonic::Float64List& matrix4, const fml::RefPtr<EngineLayer>& oldLayer); void pushOffset(Dart_Handle layer_handle, double dx, double dy, const fml::RefPtr<EngineLayer>& oldLayer); void pushClipRect(Dart_Handle layer_handle, double left, double right, double top, double bottom, int clipBehavior, const fml::RefPtr<EngineLayer>& oldLayer); void pushClipRRect(Dart_Handle layer_handle, const RRect& rrect, int clipBehavior, const fml::RefPtr<EngineLayer>& oldLayer); void pushClipPath(Dart_Handle layer_handle, const CanvasPath* path, int clipBehavior, const fml::RefPtr<EngineLayer>& oldLayer); void pushOpacity(Dart_Handle layer_handle, int alpha, double dx, double dy, const fml::RefPtr<EngineLayer>& oldLayer); void pushColorFilter(Dart_Handle layer_handle, const ColorFilter* color_filter, const fml::RefPtr<EngineLayer>& oldLayer); void pushImageFilter(Dart_Handle layer_handle, const ImageFilter* image_filter, double dx, double dy, const fml::RefPtr<EngineLayer>& oldLayer); void pushBackdropFilter(Dart_Handle layer_handle, ImageFilter* filter, int blendMode, const fml::RefPtr<EngineLayer>& oldLayer); void pushShaderMask(Dart_Handle layer_handle, Shader* shader, double maskRectLeft, double maskRectRight, double maskRectTop, double maskRectBottom, int blendMode, int filterQualityIndex, const fml::RefPtr<EngineLayer>& oldLayer); void addRetained(const fml::RefPtr<EngineLayer>& retainedLayer); void pop(); void addPerformanceOverlay(uint64_t enabledOptions, double left, double right, double top, double bottom); void addPicture(double dx, double dy, Picture* picture, int hints); void addTexture(double dx, double dy, double width, double height, int64_t textureId, bool freeze, int filterQuality); void addPlatformView(double dx, double dy, double width, double height, int64_t viewId); void setRasterizerTracingThreshold(uint32_t frameInterval); void setCheckerboardRasterCacheImages(bool checkerboard); void setCheckerboardOffscreenLayers(bool checkerboard); void build(Dart_Handle scene_handle); const std::vector<std::shared_ptr<ContainerLayer>>& layer_stack() { return layer_stack_; } private: SceneBuilder(); void AddLayer(std::shared_ptr<Layer> layer); void PushLayer(std::shared_ptr<ContainerLayer> layer); void PopLayer(); std::vector<std::shared_ptr<ContainerLayer>> layer_stack_; int rasterizer_tracing_threshold_ = 0; bool checkerboard_raster_cache_images_ = false; bool checkerboard_offscreen_layers_ = false; FML_DISALLOW_COPY_AND_ASSIGN(SceneBuilder); }; } // namespace flutter #endif // FLUTTER_LIB_UI_COMPOSITING_SCENE_BUILDER_H_
engine/lib/ui/compositing/scene_builder.h/0
{ "file_path": "engine/lib/ui/compositing/scene_builder.h", "repo_id": "engine", "token_count": 2550 }
230
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. import 'dart:async'; import 'dart:typed_data'; import 'dart:ui'; import 'dart:isolate'; import 'dart:ffi' hide Size; void main() {} /// Mutiple tests use this to signal to the C++ side that they are ready for /// validation. @pragma('vm:external-name', 'Finish') external void _finish(); @pragma('vm:entry-point') void customOnErrorTrue() { PlatformDispatcher.instance.onError = (Object error, StackTrace? stack) { _finish(); return true; }; throw Exception('true'); } @pragma('vm:entry-point') void customOnErrorFalse() { PlatformDispatcher.instance.onError = (Object error, StackTrace? stack) { _finish(); return false; }; throw Exception('false'); } @pragma('vm:entry-point') void customOnErrorThrow() { PlatformDispatcher.instance.onError = (Object error, StackTrace? stack) { _finish(); throw Exception('throw2'); }; throw Exception('throw1'); } @pragma('vm:entry-point') void setLatencyPerformanceMode() { PlatformDispatcher.instance.requestDartPerformanceMode(DartPerformanceMode.latency); _finish(); } @pragma('vm:entry-point') void validateSceneBuilderAndScene() { final SceneBuilder builder = SceneBuilder(); builder.pushOffset(10, 10); _validateBuilderHasLayers(builder); final Scene scene = builder.build(); _validateBuilderHasNoLayers(); _captureScene(scene); scene.dispose(); _validateSceneHasNoLayers(); } @pragma('vm:external-name', 'ValidateBuilderHasLayers') external _validateBuilderHasLayers(SceneBuilder builder); @pragma('vm:external-name', 'ValidateBuilderHasNoLayers') external _validateBuilderHasNoLayers(); @pragma('vm:external-name', 'CaptureScene') external _captureScene(Scene scene); @pragma('vm:external-name', 'ValidateSceneHasNoLayers') external _validateSceneHasNoLayers(); @pragma('vm:entry-point') void validateEngineLayerDispose() { final SceneBuilder builder = SceneBuilder(); final EngineLayer layer = builder.pushOffset(10, 10); _captureRootLayer(builder); final Scene scene = builder.build(); scene.dispose(); _validateLayerTreeCounts(); layer.dispose(); _validateEngineLayerDispose(); } @pragma('vm:external-name', 'CaptureRootLayer') external _captureRootLayer(SceneBuilder sceneBuilder); @pragma('vm:external-name', 'ValidateLayerTreeCounts') external _validateLayerTreeCounts(); @pragma('vm:external-name', 'ValidateEngineLayerDispose') external _validateEngineLayerDispose(); @pragma('vm:entry-point') Future<void> createSingleFrameCodec() async { final ImmutableBuffer buffer = await ImmutableBuffer.fromUint8List(Uint8List.fromList(List<int>.filled(4, 100))); final ImageDescriptor descriptor = ImageDescriptor.raw( buffer, width: 1, height: 1, pixelFormat: PixelFormat.rgba8888, ); final Codec codec = await descriptor.instantiateCodec(); _validateCodec(codec); final FrameInfo info = await codec.getNextFrame(); info.image.dispose(); _validateCodec(codec); codec.dispose(); descriptor.dispose(); buffer.dispose(); assert(buffer.debugDisposed); _finish(); } @pragma('vm:external-name', 'ValidateCodec') external void _validateCodec(Codec codec); @pragma('vm:entry-point') void createVertices() { const int uint16max = 65535; final Int32List colors = Int32List(uint16max); final Float32List coords = Float32List(uint16max * 2); final Uint16List indices = Uint16List(uint16max); final Float32List positions = Float32List(uint16max * 2); colors[0] = const Color(0xFFFF0000).value; colors[1] = const Color(0xFF00FF00).value; colors[2] = const Color(0xFF0000FF).value; colors[3] = const Color(0xFF00FFFF).value; indices[1] = indices[3] = 1; indices[2] = indices[5] = 3; indices[4] = 2; positions[2] = positions[4] = positions[5] = positions[7] = 250.0; final Vertices vertices = Vertices.raw( VertexMode.triangles, positions, textureCoordinates: coords, colors: colors, indices: indices, ); _validateVertices(vertices); } @pragma('vm:external-name', 'ValidateVertices') external void _validateVertices(Vertices vertices); @pragma('vm:entry-point') void sendSemanticsUpdate() { final SemanticsUpdateBuilder builder = SemanticsUpdateBuilder(); final String identifier = "identifier"; final String label = "label"; final List<StringAttribute> labelAttributes = <StringAttribute> [ SpellOutStringAttribute(range: TextRange(start: 1, end: 2)), ]; final String value = "value"; final List<StringAttribute> valueAttributes = <StringAttribute> [ SpellOutStringAttribute(range: TextRange(start: 2, end: 3)), ]; final String increasedValue = "increasedValue"; final List<StringAttribute> increasedValueAttributes = <StringAttribute> [ SpellOutStringAttribute(range: TextRange(start: 4, end: 5)), ]; final String decreasedValue = "decreasedValue"; final List<StringAttribute> decreasedValueAttributes = <StringAttribute> [ SpellOutStringAttribute(range: TextRange(start: 5, end: 6)), ]; final String hint = "hint"; final List<StringAttribute> hintAttributes = <StringAttribute> [ LocaleStringAttribute( locale: Locale('en', 'MX'), range: TextRange(start: 0, end: 1), ), ]; String tooltip = "tooltip"; final Float64List transform = Float64List(16); final Int32List childrenInTraversalOrder = Int32List(0); final Int32List childrenInHitTestOrder = Int32List(0); final Int32List additionalActions = Int32List(0); transform[0] = 1; transform[1] = 0; transform[2] = 0; transform[3] = 0; transform[4] = 0; transform[5] = 1; transform[6] = 0; transform[7] = 0; transform[8] = 0; transform[9] = 0; transform[10] = 1; transform[11] = 0; transform[12] = 0; transform[13] = 0; transform[14] = 0; transform[15] = 0; builder.updateNode( id: 0, flags: 0, actions: 0, maxValueLength: 0, currentValueLength: 0, textSelectionBase: -1, textSelectionExtent: -1, platformViewId: -1, scrollChildren: 0, scrollIndex: 0, scrollPosition: 0, scrollExtentMax: 0, scrollExtentMin: 0, rect: Rect.fromLTRB(0, 0, 10, 10), elevation: 0, thickness: 0, identifier: identifier, label: label, labelAttributes: labelAttributes, value: value, valueAttributes: valueAttributes, increasedValue: increasedValue, increasedValueAttributes: increasedValueAttributes, decreasedValue: decreasedValue, decreasedValueAttributes: decreasedValueAttributes, hint: hint, hintAttributes: hintAttributes, tooltip: tooltip, textDirection: TextDirection.ltr, transform: transform, childrenInTraversalOrder: childrenInTraversalOrder, childrenInHitTestOrder: childrenInHitTestOrder, additionalActions: additionalActions); _semanticsUpdate(builder.build()); } @pragma('vm:external-name', 'SemanticsUpdate') external void _semanticsUpdate(SemanticsUpdate update); @pragma('vm:entry-point') void createPath() { final Path path = Path()..lineTo(10, 10); _validatePath(path); // Arbitrarily hold a reference to the path to make sure it does not get // garbage collected. Future<void>.delayed(const Duration(days: 100)).then((_) { path.lineTo(100, 100); }); } @pragma('vm:external-name', 'ValidatePath') external void _validatePath(Path path); @pragma('vm:entry-point') void frameCallback(Object? image, int durationMilliseconds, String decodeError) { validateFrameCallback(image, durationMilliseconds, decodeError); } @pragma('vm:external-name', 'ValidateFrameCallback') external void validateFrameCallback(Object? image, int durationMilliseconds, String decodeError); @pragma('vm:entry-point') void platformMessagePortResponseTest() async { ReceivePort receivePort = ReceivePort(); _callPlatformMessageResponseDartPort(receivePort.sendPort.nativePort); List<dynamic> resultList = await receivePort.first; int identifier = resultList[0] as int; Uint8List? bytes = resultList[1] as Uint8List?; ByteData result = ByteData.sublistView(bytes!); if (result.lengthInBytes == 100) { _finishCallResponse(true); } else { _finishCallResponse(false); } } @pragma('vm:entry-point') void platformMessageResponseTest() { _callPlatformMessageResponseDart((ByteData? result) { if (result is ByteData && result.lengthInBytes == 100) { int value = result.getInt8(0); bool didThrowOnModify = false; try { result.setInt8(0, value); } catch (e) { didThrowOnModify = true; } // This should be a read only buffer. _finishCallResponse(didThrowOnModify); } else { _finishCallResponse(false); } }); } @pragma('vm:external-name', 'CallPlatformMessageResponseDartPort') external void _callPlatformMessageResponseDartPort(int port); @pragma('vm:external-name', 'CallPlatformMessageResponseDart') external void _callPlatformMessageResponseDart(void Function(ByteData? result) callback); @pragma('vm:external-name', 'FinishCallResponse') external void _finishCallResponse(bool didPass); @pragma('vm:entry-point') void messageCallback(dynamic data) {} @pragma('vm:entry-point') @pragma('vm:external-name', 'ValidateConfiguration') external void validateConfiguration(); // Draw a circle on a Canvas that has a PictureRecorder. Take the image from // the PictureRecorder, and encode it as png. Check that the png data is // backed by an external Uint8List. @pragma('vm:entry-point') Future<void> encodeImageProducesExternalUint8List() async { final PictureRecorder pictureRecorder = PictureRecorder(); final Canvas canvas = Canvas(pictureRecorder); final Paint paint = Paint() ..color = Color.fromRGBO(255, 255, 255, 1.0) ..style = PaintingStyle.fill; final Offset c = Offset(50.0, 50.0); canvas.drawCircle(c, 25.0, paint); final Picture picture = pictureRecorder.endRecording(); final Image image = await picture.toImage(100, 100); _encodeImage(image, ImageByteFormat.png.index, (Uint8List result, String? error) { // The buffer should be non-null and writable. result[0] = 0; // The buffer should be external typed data. _validateExternal(result); }); } @pragma('vm:external-name', 'EncodeImage') external void _encodeImage(Image i, int format, void Function(Uint8List result, String? error)); @pragma('vm:external-name', 'ValidateExternal') external void _validateExternal(Uint8List result); @pragma('vm:external-name', 'ValidateError') external void _validateError(String? error); @pragma('vm:external-name', 'TurnOffGPU') external void _turnOffGPU(bool value); @pragma('vm:external-name', 'FlushGpuAwaitingTasks') external void _flushGpuAwaitingTasks(); @pragma('vm:external-name', 'ValidateNotNull') external void _validateNotNull(Object? object); @pragma('vm:entry-point') Future<void> toByteDataWithoutGPU() async { final PictureRecorder pictureRecorder = PictureRecorder(); final Canvas canvas = Canvas(pictureRecorder); final Paint paint = Paint() ..color = Color.fromRGBO(255, 255, 255, 1.0) ..style = PaintingStyle.fill; final Offset c = Offset(50.0, 50.0); canvas.drawCircle(c, 25.0, paint); final Picture picture = pictureRecorder.endRecording(); final Image image = await picture.toImage(100, 100); _turnOffGPU(true); Timer flusher = Timer.periodic(Duration(milliseconds: 1), (timer) { _flushGpuAwaitingTasks(); }); try { ByteData? byteData = await image.toByteData(); _validateError(null); } catch (error) { _validateError(error.toString()); } finally { flusher.cancel(); } } @pragma('vm:entry-point') Future<void> toByteDataRetries() async { final PictureRecorder pictureRecorder = PictureRecorder(); final Canvas canvas = Canvas(pictureRecorder); final Paint paint = Paint() ..color = Color.fromRGBO(255, 255, 255, 1.0) ..style = PaintingStyle.fill; final Offset c = Offset(50.0, 50.0); canvas.drawCircle(c, 25.0, paint); final Picture picture = pictureRecorder.endRecording(); final Image image = await picture.toImage(100, 100); _turnOffGPU(true); Future<void>.delayed(Duration(milliseconds: 10), () { _turnOffGPU(false); }); try { ByteData? byteData = await image.toByteData(); _validateNotNull(byteData); } catch (error) { _validateNotNull(null); } } @pragma('vm:entry-point') Future<void> pumpImage() async { const int width = 60; const int height = 60; final Completer<Image> completer = Completer<Image>(); decodeImageFromPixels( Uint8List.fromList(List<int>.filled(width * height * 4, 0xFF)), width, height, PixelFormat.rgba8888, (Image image) => completer.complete(image), ); final Image image = await completer.future; late Picture picture; late OffsetEngineLayer layer; void renderBlank(Duration duration) { image.dispose(); picture.dispose(); layer.dispose(); final PictureRecorder recorder = PictureRecorder(); final Canvas canvas = Canvas(recorder); canvas.drawPaint(Paint()); picture = recorder.endRecording(); final SceneBuilder builder = SceneBuilder(); layer = builder.pushOffset(0, 0); builder.addPicture(Offset.zero, picture); final Scene scene = builder.build(); window.render(scene); scene.dispose(); _finish(); } void renderImage(Duration duration) { final PictureRecorder recorder = PictureRecorder(); final Canvas canvas = Canvas(recorder); canvas.drawImage(image, Offset.zero, Paint()); picture = recorder.endRecording(); final SceneBuilder builder = SceneBuilder(); layer = builder.pushOffset(0, 0); builder.addPicture(Offset.zero, picture); _captureImageAndPicture(image, picture); final Scene scene = builder.build(); window.render(scene); scene.dispose(); window.onBeginFrame = renderBlank; window.scheduleFrame(); } window.onBeginFrame = renderImage; window.scheduleFrame(); } @pragma('vm:external-name', 'CaptureImageAndPicture') external void _captureImageAndPicture(Image image, Picture picture); @pragma('vm:entry-point') void convertPaintToDlPaint() { Paint paint = Paint(); paint.blendMode = BlendMode.modulate; paint.color = Color.fromARGB(0x11, 0x22, 0x33, 0x44); paint.colorFilter = ColorFilter.mode(Color.fromARGB(0x55, 0x66, 0x77, 0x88), BlendMode.xor); paint.maskFilter = MaskFilter.blur(BlurStyle.inner, .75); paint.style = PaintingStyle.stroke; _convertPaintToDlPaint(paint); } @pragma('vm:external-name', 'ConvertPaintToDlPaint') external void _convertPaintToDlPaint(Paint paint); @pragma('vm:entry-point') void hooksTests() async { Future<void> test(String name, FutureOr<void> Function() testFunction) async { try { await testFunction(); } catch (e) { print('Test "$name" failed!'); rethrow; } } void expectEquals(Object? value, Object? expected) { if (value != expected) { throw 'Expected $value to be $expected.'; } } void expectIdentical(Object a, Object b) { if (!identical(a, b)) { throw 'Expected $a to be identical to $b.'; } } void expectNotEquals(Object? value, Object? expected) { if (value == expected) { throw 'Expected $value to not be $expected.'; } } await test('onMetricsChanged preserves callback zone', () { late Zone originalZone; late Zone callbackZone; late double devicePixelRatio; runZoned(() { originalZone = Zone.current; window.onMetricsChanged = () { callbackZone = Zone.current; devicePixelRatio = window.devicePixelRatio; }; }); window.onMetricsChanged!(); _callHook( '_updateWindowMetrics', 21, 0, // window Id 0.1234, // device pixel ratio 0.0, // width 0.0, // height 0.0, // padding top 0.0, // padding right 0.0, // padding bottom 0.0, // padding left 0.0, // inset top 0.0, // inset right 0.0, // inset bottom 0.0, // inset left 0.0, // system gesture inset top 0.0, // system gesture inset right 0.0, // system gesture inset bottom 0.0, // system gesture inset left 22.0, // physicalTouchSlop <double>[], // display features bounds <int>[], // display features types <int>[], // display features states 0, // Display ID ); expectIdentical(originalZone, callbackZone); if (devicePixelRatio != 0.1234) { throw 'Expected devicePixelRatio to be 0.1234 but got $devicePixelRatio.'; } }); await test('onError preserves the callback zone', () { late Zone originalZone; late Zone callbackZone; final Object error = Exception('foo'); StackTrace? stackTrace; runZoned(() { originalZone = Zone.current; PlatformDispatcher.instance.onError = (Object exception, StackTrace? stackTrace) { callbackZone = Zone.current; expectIdentical(exception, error); expectNotEquals(stackTrace, null); return true; }; }); _callHook('_onError', 2, error, StackTrace.current); PlatformDispatcher.instance.onError = null; expectIdentical(originalZone, callbackZone); }); await test('updateUserSettings can handle an empty object', () { _callHook('_updateUserSettingsData', 1, '{}'); }); await test('PlatformDispatcher.locale returns unknown locale when locales is set to empty list', () { late Locale locale; int callCount = 0; runZoned(() { window.onLocaleChanged = () { locale = PlatformDispatcher.instance.locale; callCount += 1; }; }); const Locale fakeLocale = Locale.fromSubtags(languageCode: '1', countryCode: '2', scriptCode: '3'); _callHook('_updateLocales', 1, <String>[fakeLocale.languageCode, fakeLocale.countryCode!, fakeLocale.scriptCode!, '']); if (callCount != 1) { throw 'Expected 1 call, have $callCount'; } if (locale != fakeLocale) { throw 'Expected $locale to match $fakeLocale'; } _callHook('_updateLocales', 1, <String>[]); if (callCount != 2) { throw 'Expected 2 calls, have $callCount'; } if (locale != const Locale.fromSubtags()) { throw '$locale did not equal ${Locale.fromSubtags()}'; } if (locale.languageCode != 'und') { throw '${locale.languageCode} did not equal "und"'; } }); await test('deprecated region equals', () { // These are equal because ZR is deprecated and was mapped to CD. const Locale x = Locale('en', 'ZR'); const Locale y = Locale('en', 'CD'); expectEquals(x, y); expectEquals(x.countryCode, y.countryCode); }); await test('PlatformDispatcher.view getter returns view with provided ID', () { const int viewId = 0; expectEquals(PlatformDispatcher.instance.view(id: viewId)?.viewId, viewId); }); await test('View padding/insets/viewPadding/systemGestureInsets', () { _callHook( '_updateWindowMetrics', 21, 0, // window Id 1.0, // devicePixelRatio 800.0, // width 600.0, // height 50.0, // paddingTop 0.0, // paddingRight 40.0, // paddingBottom 0.0, // paddingLeft 0.0, // insetTop 0.0, // insetRight 0.0, // insetBottom 0.0, // insetLeft 0.0, // systemGestureInsetTop 0.0, // systemGestureInsetRight 0.0, // systemGestureInsetBottom 0.0, // systemGestureInsetLeft 22.0, // physicalTouchSlop <double>[], // display features bounds <int>[], // display features types <int>[], // display features states 0, // Display ID ); expectEquals(window.viewInsets.bottom, 0.0); expectEquals(window.viewPadding.bottom, 40.0); expectEquals(window.padding.bottom, 40.0); expectEquals(window.systemGestureInsets.bottom, 0.0); _callHook( '_updateWindowMetrics', 21, 0, // window Id 1.0, // devicePixelRatio 800.0, // width 600.0, // height 50.0, // paddingTop 0.0, // paddingRight 40.0, // paddingBottom 0.0, // paddingLeft 0.0, // insetTop 0.0, // insetRight 400.0, // insetBottom 0.0, // insetLeft 0.0, // systemGestureInsetTop 0.0, // systemGestureInsetRight 44.0, // systemGestureInsetBottom 0.0, // systemGestureInsetLeft 22.0, // physicalTouchSlop <double>[], // display features bounds <int>[], // display features types <int>[], // display features states 0, // Display ID ); expectEquals(window.viewInsets.bottom, 400.0); expectEquals(window.viewPadding.bottom, 40.0); expectEquals(window.padding.bottom, 0.0); expectEquals(window.systemGestureInsets.bottom, 44.0); }); await test('Window physical touch slop', () { _callHook( '_updateWindowMetrics', 21, 0, // window Id 1.0, // devicePixelRatio 800.0, // width 600.0, // height 50.0, // paddingTop 0.0, // paddingRight 40.0, // paddingBottom 0.0, // paddingLeft 0.0, // insetTop 0.0, // insetRight 0.0, // insetBottom 0.0, // insetLeft 0.0, // systemGestureInsetTop 0.0, // systemGestureInsetRight 0.0, // systemGestureInsetBottom 0.0, // systemGestureInsetLeft 11.0, // physicalTouchSlop <double>[], // display features bounds <int>[], // display features types <int>[], // display features states 0, // Display ID ); expectEquals(window.gestureSettings, GestureSettings(physicalTouchSlop: 11.0)); _callHook( '_updateWindowMetrics', 21, 0, // window Id 1.0, // devicePixelRatio 800.0, // width 600.0, // height 50.0, // paddingTop 0.0, // paddingRight 40.0, // paddingBottom 0.0, // paddingLeft 0.0, // insetTop 0.0, // insetRight 400.0, // insetBottom 0.0, // insetLeft 0.0, // systemGestureInsetTop 0.0, // systemGestureInsetRight 44.0, // systemGestureInsetBottom 0.0, // systemGestureInsetLeft -1.0, // physicalTouchSlop <double>[], // display features bounds <int>[], // display features types <int>[], // display features states 0, // Display ID ); expectEquals(window.gestureSettings, GestureSettings(physicalTouchSlop: null)); _callHook( '_updateWindowMetrics', 21, 0, // window Id 1.0, // devicePixelRatio 800.0, // width 600.0, // height 50.0, // paddingTop 0.0, // paddingRight 40.0, // paddingBottom 0.0, // paddingLeft 0.0, // insetTop 0.0, // insetRight 400.0, // insetBottom 0.0, // insetLeft 0.0, // systemGestureInsetTop 0.0, // systemGestureInsetRight 44.0, // systemGestureInsetBottom 0.0, // systemGestureInsetLeft 22.0, // physicalTouchSlop <double>[], // display features bounds <int>[], // display features types <int>[], // display features states 0, // Display ID ); expectEquals(window.gestureSettings, GestureSettings(physicalTouchSlop: 22.0)); }); await test('onLocaleChanged preserves callback zone', () { late Zone innerZone; late Zone runZone; Locale? locale; runZoned(() { innerZone = Zone.current; window.onLocaleChanged = () { runZone = Zone.current; locale = window.locale; }; }); _callHook('_updateLocales', 1, <String>['en', 'US', '', '']); expectIdentical(runZone, innerZone); expectEquals(locale, const Locale('en', 'US')); }); await test('onBeginFrame preserves callback zone', () { late Zone innerZone; late Zone runZone; late Duration start; runZoned(() { innerZone = Zone.current; window.onBeginFrame = (Duration value) { runZone = Zone.current; start = value; }; }); _callHook('_beginFrame', 2, 1234, 1); expectIdentical(runZone, innerZone); expectEquals(start, const Duration(microseconds: 1234)); }); await test('onDrawFrame preserves callback zone', () { late Zone innerZone; late Zone runZone; runZoned(() { innerZone = Zone.current; window.onDrawFrame = () { runZone = Zone.current; }; }); _callHook('_drawFrame'); expectIdentical(runZone, innerZone); }); await test('onReportTimings preserves callback zone', () { late Zone innerZone; late Zone runZone; runZoned(() { innerZone = Zone.current; window.onReportTimings = (List<FrameTiming> timings) { runZone = Zone.current; }; }); _callHook('_reportTimings', 1, <int>[]); expectIdentical(runZone, innerZone); }); await test('onPointerDataPacket preserves callback zone', () { late Zone innerZone; late Zone runZone; late PointerDataPacket data; runZoned(() { innerZone = Zone.current; window.onPointerDataPacket = (PointerDataPacket value) { runZone = Zone.current; data = value; }; }); final ByteData testData = ByteData.view(Uint8List(0).buffer); _callHook('_dispatchPointerDataPacket', 1, testData); expectIdentical(runZone, innerZone); expectEquals(data.data.length, 0); }); await test('onSemanticsEnabledChanged preserves callback zone', () { late Zone innerZone; late Zone runZone; late bool enabled; runZoned(() { innerZone = Zone.current; window.onSemanticsEnabledChanged = () { runZone = Zone.current; enabled = window.semanticsEnabled; }; }); final bool newValue = !window.semanticsEnabled; // needed? _callHook('_updateSemanticsEnabled', 1, newValue); expectIdentical(runZone, innerZone); expectEquals(enabled, newValue); }); await test('onSemanticsActionEvent preserves callback zone', () { late Zone innerZone; late Zone runZone; late SemanticsActionEvent action; runZoned(() { innerZone = Zone.current; PlatformDispatcher.instance.onSemanticsActionEvent = (SemanticsActionEvent actionEvent) { runZone = Zone.current; action = actionEvent; }; }); _callHook('_dispatchSemanticsAction', 3, 1234, 4, null); expectIdentical(runZone, innerZone); expectEquals(action.nodeId, 1234); expectEquals(action.type.index, 4); }); await test('onPlatformMessage preserves callback zone', () { late Zone innerZone; late Zone runZone; late String name; runZoned(() { innerZone = Zone.current; window.onPlatformMessage = (String value, _, __) { runZone = Zone.current; name = value; }; }); _callHook('_dispatchPlatformMessage', 3, 'testName', null, 123456789); expectIdentical(runZone, innerZone); expectEquals(name, 'testName'); }); await test('onTextScaleFactorChanged preserves callback zone', () { late Zone innerZone; late Zone runZoneTextScaleFactor; late Zone runZonePlatformBrightness; late double? textScaleFactor; late Brightness? platformBrightness; runZoned(() { innerZone = Zone.current; window.onTextScaleFactorChanged = () { runZoneTextScaleFactor = Zone.current; textScaleFactor = window.textScaleFactor; }; window.onPlatformBrightnessChanged = () { runZonePlatformBrightness = Zone.current; platformBrightness = window.platformBrightness; }; }); window.onTextScaleFactorChanged!(); _callHook('_updateUserSettingsData', 1, '{"textScaleFactor": 0.5, "platformBrightness": "light", "alwaysUse24HourFormat": true}'); expectIdentical(runZoneTextScaleFactor, innerZone); expectEquals(textScaleFactor, 0.5); textScaleFactor = null; platformBrightness = null; window.onPlatformBrightnessChanged!(); _callHook('_updateUserSettingsData', 1, '{"textScaleFactor": 0.5, "platformBrightness": "dark", "alwaysUse24HourFormat": true}'); expectIdentical(runZonePlatformBrightness, innerZone); expectEquals(platformBrightness, Brightness.dark); }); await test('onFrameDataChanged preserves callback zone', () { late Zone innerZone; late Zone runZone; late int frameNumber; runZoned(() { innerZone = Zone.current; window.onFrameDataChanged = () { runZone = Zone.current; frameNumber = window.frameData.frameNumber; }; }); _callHook('_beginFrame', 2, 0, 2); expectNotEquals(runZone, null); expectIdentical(runZone, innerZone); expectEquals(frameNumber, 2); }); await test('_updateDisplays preserves callback zone', () { late Zone innerZone; late Zone runZone; late Display display; runZoned(() { innerZone = Zone.current; window.onMetricsChanged = () { runZone = Zone.current; display = PlatformDispatcher.instance.displays.first; }; }); _callHook('_updateDisplays', 5, <int>[0], <double>[800], <double>[600], <double>[1.5], <double>[65]); expectNotEquals(runZone, null); expectIdentical(runZone, innerZone); expectEquals(display.id, 0); expectEquals(display.size, const Size(800, 600)); expectEquals(display.devicePixelRatio, 1.5); expectEquals(display.refreshRate, 65); }); await test('_futureize handles callbacker sync error', () async { String? callbacker(void Function(Object? arg) cb) { return 'failure'; } Object? error; try { await _futurize(callbacker); } catch (err) { error = err; } expectNotEquals(error, null); }); await test('_futureize does not leak sync uncaught exceptions into the zone', () async { String? callbacker(void Function(Object? arg) cb) { cb(null); // indicates failure } Object? error; try { await _futurize(callbacker); } catch (err) { error = err; } expectNotEquals(error, null); }); await test('_futureize does not leak async uncaught exceptions into the zone', () async { String? callbacker(void Function(Object? arg) cb) { Timer.run(() { cb(null); // indicates failure }); } Object? error; try { await _futurize(callbacker); } catch (err) { error = err; } expectNotEquals(error, null); }); await test('_futureize successfully returns a value sync', () async { String? callbacker(void Function(Object? arg) cb) { cb(true); } final Object? result = await _futurize(callbacker); expectEquals(result, true); }); await test('_futureize successfully returns a value async', () async { String? callbacker(void Function(Object? arg) cb) { Timer.run(() { cb(true); }); } final Object? result = await _futurize(callbacker); expectEquals(result, true); }); await test('root isolate token', () async { if (RootIsolateToken.instance == null) { throw Exception('We should have a token on a root isolate.'); } ReceivePort receivePort = ReceivePort(); Isolate.spawn(_backgroundRootIsolateTestMain, receivePort.sendPort); bool didPass = await receivePort.first as bool; if (!didPass) { throw Exception('Background isolate found a root isolate id.'); } }); await test('send port message without registering', () async { ReceivePort receivePort = ReceivePort(); Isolate.spawn(_backgroundIsolateSendWithoutRegistering, receivePort.sendPort); bool didError = await receivePort.first as bool; if (!didError) { throw Exception('Expected an error when not registering a root isolate and sending port messages.'); } }); _finish(); } /// Sends `true` on [port] if the isolate executing the function is not a root /// isolate. void _backgroundRootIsolateTestMain(SendPort port) { port.send(RootIsolateToken.instance == null); } /// Sends `true` on [port] if [PlatformDispatcher.sendPortPlatformMessage] /// throws an exception without calling /// [PlatformDispatcher.registerBackgroundIsolate]. void _backgroundIsolateSendWithoutRegistering(SendPort port) { bool didError = false; ReceivePort messagePort = ReceivePort(); try { PlatformDispatcher.instance.sendPortPlatformMessage( 'foo', null, 1, messagePort.sendPort, ); } catch (_) { didError = true; } port.send(didError); } typedef _Callback<T> = void Function(T result); typedef _Callbacker<T> = String? Function(_Callback<T?> callback); // This is an exact copy of the function defined in painting.dart. If you change either // then you must change both. Future<T> _futurize<T>(_Callbacker<T> callbacker) { final Completer<T> completer = Completer<T>.sync(); // If the callback synchronously throws an error, then synchronously // rethrow that error instead of adding it to the completer. This // prevents the Zone from receiving an uncaught exception. bool sync = true; final String? error = callbacker((T? t) { if (t == null) { if (sync) { throw Exception('operation failed'); } else { completer.completeError(Exception('operation failed')); } } else { completer.complete(t); } }); sync = false; if (error != null) throw Exception(error); return completer.future; } @pragma('vm:external-name', 'CallHook') external void _callHook( String name, [ int argCount = 0, Object? arg0, Object? arg1, Object? arg2, Object? arg3, Object? arg4, Object? arg5, Object? arg6, Object? arg8, Object? arg9, Object? arg10, Object? arg11, Object? arg12, Object? arg13, Object? arg14, Object? arg15, Object? arg16, Object? arg17, Object? arg18, Object? arg19, Object? arg20, Object? arg21, ]);
engine/lib/ui/fixtures/ui_test.dart/0
{ "file_path": "engine/lib/ui/fixtures/ui_test.dart", "repo_id": "engine", "token_count": 12628 }
231
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. part of dart.ui; // ignore_for_file: avoid_classes_with_only_static_members /// Helper functions for Dart Plugin Registrants. abstract final class DartPluginRegistrant { static bool _wasInitialized = false; /// Makes sure the that the Dart Plugin Registrant has been called for this /// isolate. This can safely be executed multiple times on the same isolate, /// but should not be called on the Root isolate. static void ensureInitialized() { if (!_wasInitialized) { _wasInitialized = true; _ensureInitialized(); } } @Native<Void Function()>(symbol: 'DartPluginRegistrant_EnsureInitialized') external static void _ensureInitialized(); } // Corelib 'print' implementation. void _print(String arg) { _Logger._printString(arg); } void _printDebug(String arg) { _Logger._printDebugString(arg); } class _Logger { @Native<Void Function(Handle)>(symbol: 'DartRuntimeHooks::Logger_PrintString') external static void _printString(String? s); @Native<Void Function(Handle)>(symbol: 'DartRuntimeHooks::Logger_PrintDebugString') external static void _printDebugString(String? s); } // If we actually run on big endian machines, we'll need to do something smarter // here. We don't use [Endian.Host] because it's not a compile-time // constant and can't propagate into the set/get calls. const Endian _kFakeHostEndian = Endian.little; // A service protocol extension to schedule a frame to be rendered into the // window. Future<developer.ServiceExtensionResponse> _scheduleFrame( String method, Map<String, String> parameters, ) async { // Schedule the frame. PlatformDispatcher.instance.scheduleFrame(); // Always succeed. return developer.ServiceExtensionResponse.result(json.encode(<String, String>{ 'type': 'Success', })); } Future<developer.ServiceExtensionResponse> _reinitializeShader( String method, Map<String, String> parameters, ) async { final String? assetKey = parameters['assetKey']; if (assetKey != null) { FragmentProgram._reinitializeShader(assetKey); } // Always succeed. return developer.ServiceExtensionResponse.result(json.encode(<String, String>{ 'type': 'Success', })); } Future<developer.ServiceExtensionResponse> _getImpellerEnabled( String method, Map<String, String> parameters, ) async { return developer.ServiceExtensionResponse.result(json.encode(<String, Object>{ 'type': 'Success', 'enabled': _impellerEnabled, })); } const bool _kReleaseMode = bool.fromEnvironment('dart.vm.product'); @Native<Void Function(Handle)>(symbol: 'DartRuntimeHooks::ScheduleMicrotask') external void _scheduleMicrotask(void Function() callback); @Native<Handle Function(Handle)>(symbol: 'DartRuntimeHooks::GetCallbackHandle') external int? _getCallbackHandle(Function closure); @Native<Handle Function(Int64)>(symbol: 'DartRuntimeHooks::GetCallbackFromHandle') external Function? _getCallbackFromHandle(int handle); typedef _PrintClosure = void Function(String line); // Used by the embedder to initialize how printing is performed. // See also https://github.com/dart-lang/sdk/blob/main/sdk/lib/_internal/vm/lib/print_patch.dart @pragma('vm:entry-point') _PrintClosure _getPrintClosure() => _print; typedef _ScheduleImmediateClosure = void Function(void Function()); // Used by the embedder to initialize how microtasks are scheduled. // See also https://github.com/dart-lang/sdk/blob/main/sdk/lib/_internal/vm/lib/schedule_microtask_patch.dart @pragma('vm:entry-point') _ScheduleImmediateClosure _getScheduleMicrotaskClosure() => _scheduleMicrotask; // Used internally to indicate whether the Engine is using Impeller for // rendering. @pragma('vm:entry-point') bool _impellerEnabled = false; // Used internally to indicate whether the embedder enables the implicit view, // and the implicit view's ID if so. // // The exact value of this variable is an implementation detail that may change // at any time. Apps should always use PlatformDispatcher.implicitView to // determine the current implicit view, if any. @pragma('vm:entry-point') int? _implicitViewId;
engine/lib/ui/natives.dart/0
{ "file_path": "engine/lib/ui/natives.dart", "repo_id": "engine", "token_count": 1281 }
232
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <memory> #include <sstream> #include "display_list/effects/dl_runtime_effect.h" #include "flutter/lib/ui/painting/fragment_program.h" #include "flutter/assets/asset_manager.h" #include "flutter/fml/trace_event.h" #include "flutter/impeller/runtime_stage/runtime_stage.h" #include "flutter/lib/ui/dart_wrapper.h" #include "flutter/lib/ui/ui_dart_state.h" #include "flutter/lib/ui/window/platform_configuration.h" #include "impeller/core/runtime_types.h" #include "third_party/skia/include/core/SkString.h" #include "third_party/tonic/converter/dart_converter.h" #include "third_party/tonic/dart_args.h" #include "third_party/tonic/dart_binding_macros.h" #include "third_party/tonic/dart_library_natives.h" #include "third_party/tonic/typed_data/typed_list.h" namespace flutter { IMPLEMENT_WRAPPERTYPEINFO(ui, FragmentProgram); static std::string RuntimeStageBackendToString( impeller::RuntimeStageBackend backend) { switch (backend) { case impeller::RuntimeStageBackend::kSkSL: return "SkSL"; case impeller::RuntimeStageBackend::kMetal: return "Metal"; case impeller::RuntimeStageBackend::kOpenGLES: return "OpenGLES"; case impeller::RuntimeStageBackend::kVulkan: return "Vulkan"; } } std::string FragmentProgram::initFromAsset(const std::string& asset_name) { FML_TRACE_EVENT("flutter", "FragmentProgram::initFromAsset", "asset", asset_name); std::shared_ptr<AssetManager> asset_manager = UIDartState::Current() ->platform_configuration() ->client() ->GetAssetManager(); std::unique_ptr<fml::Mapping> data = asset_manager->GetAsMapping(asset_name); if (data == nullptr) { return std::string("Asset '") + asset_name + std::string("' not found"); } auto runtime_stages = impeller::RuntimeStage::DecodeRuntimeStages(std::move(data)); if (runtime_stages.empty()) { return std::string("Asset '") + asset_name + std::string("' does not contain any shader data."); } auto backend = UIDartState::Current()->GetRuntimeStageBackend(); auto runtime_stage = runtime_stages[backend]; if (!runtime_stage) { std::ostringstream stream; stream << "Asset '" << asset_name << "' does not contain appropriate runtime stage data for current " "backend (" << RuntimeStageBackendToString(backend) << ")." << std::endl << "Found stages: "; for (const auto& kvp : runtime_stages) { if (kvp.second) { stream << RuntimeStageBackendToString(kvp.first) << " "; } } return stream.str(); } int sampled_image_count = 0; size_t other_uniforms_bytes = 0; for (const auto& uniform_description : runtime_stage->GetUniforms()) { if (uniform_description.type == impeller::RuntimeUniformType::kSampledImage) { sampled_image_count++; } else { other_uniforms_bytes += uniform_description.GetSize(); } } if (UIDartState::Current()->IsImpellerEnabled()) { runtime_effect_ = DlRuntimeEffect::MakeImpeller(std::move(runtime_stage)); } else { const auto& code_mapping = runtime_stage->GetCodeMapping(); auto code_size = code_mapping->GetSize(); const char* sksl = reinterpret_cast<const char*>(code_mapping->GetMapping()); // SkString makes a copy. SkRuntimeEffect::Result result = SkRuntimeEffect::MakeForShader(SkString(sksl, code_size)); if (result.effect == nullptr) { return std::string("Invalid SkSL:\n") + sksl + std::string("\nSkSL Error:\n") + result.errorText.c_str(); } runtime_effect_ = DlRuntimeEffect::MakeSkia(result.effect); } Dart_Handle ths = Dart_HandleFromWeakPersistent(dart_wrapper()); if (Dart_IsError(ths)) { Dart_PropagateError(ths); } Dart_Handle result = Dart_SetField(ths, tonic::ToDart("_samplerCount"), Dart_NewInteger(sampled_image_count)); if (Dart_IsError(result)) { return "Failed to set sampler count for fragment program."; } size_t rounded_uniform_bytes = (other_uniforms_bytes + sizeof(float) - 1) & ~(sizeof(float) - 1); size_t float_count = rounded_uniform_bytes / sizeof(float); result = Dart_SetField(ths, tonic::ToDart("_uniformFloatCount"), Dart_NewInteger(float_count)); if (Dart_IsError(result)) { return "Failed to set uniform float count for fragment program."; } return ""; } std::shared_ptr<DlColorSource> FragmentProgram::MakeDlColorSource( std::shared_ptr<std::vector<uint8_t>> float_uniforms, const std::vector<std::shared_ptr<DlColorSource>>& children) { return DlColorSource::MakeRuntimeEffect(runtime_effect_, children, std::move(float_uniforms)); } void FragmentProgram::Create(Dart_Handle wrapper) { auto res = fml::MakeRefCounted<FragmentProgram>(); res->AssociateWithDartWrapper(wrapper); } FragmentProgram::FragmentProgram() = default; FragmentProgram::~FragmentProgram() = default; } // namespace flutter
engine/lib/ui/painting/fragment_program.cc/0
{ "file_path": "engine/lib/ui/painting/fragment_program.cc", "repo_id": "engine", "token_count": 2141 }
233
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/common/task_runners.h" #include "flutter/fml/mapping.h" #include "flutter/fml/synchronization/waitable_event.h" #include "flutter/impeller/core/allocator.h" #include "flutter/impeller/core/device_buffer.h" #include "flutter/impeller/geometry/size.h" #include "flutter/impeller/renderer/context.h" #include "flutter/lib/ui/painting/image_decoder.h" #include "flutter/lib/ui/painting/image_decoder_impeller.h" #include "flutter/lib/ui/painting/image_decoder_no_gl_unittests.h" #include "flutter/lib/ui/painting/image_decoder_skia.h" #include "flutter/lib/ui/painting/multi_frame_codec.h" #include "flutter/runtime/dart_vm.h" #include "flutter/runtime/dart_vm_lifecycle.h" #include "flutter/testing/dart_isolate_runner.h" #include "flutter/testing/elf_loader.h" #include "flutter/testing/fixture_test.h" #include "flutter/testing/post_task_sync.h" #include "flutter/testing/test_dart_native_resolver.h" #include "flutter/testing/test_gl_surface.h" #include "flutter/testing/testing.h" #include "fml/logging.h" #include "impeller/renderer/command_queue.h" #include "third_party/skia/include/codec/SkCodecAnimation.h" #include "third_party/skia/include/core/SkData.h" #include "third_party/skia/include/core/SkImage.h" #include "third_party/skia/include/core/SkImageInfo.h" #include "third_party/skia/include/core/SkSize.h" #include "third_party/skia/include/encode/SkPngEncoder.h" // CREATE_NATIVE_ENTRY is leaky by design // NOLINTBEGIN(clang-analyzer-core.StackAddressEscape) namespace impeller { class TestImpellerContext : public impeller::Context { public: TestImpellerContext() = default; BackendType GetBackendType() const override { return BackendType::kMetal; } std::string DescribeGpuModel() const override { return "TestGpu"; } bool IsValid() const override { return true; } const std::shared_ptr<const Capabilities>& GetCapabilities() const override { return capabilities_; } std::shared_ptr<Allocator> GetResourceAllocator() const override { return std::make_shared<TestImpellerAllocator>(); } std::shared_ptr<ShaderLibrary> GetShaderLibrary() const override { return nullptr; } std::shared_ptr<SamplerLibrary> GetSamplerLibrary() const override { return nullptr; } std::shared_ptr<PipelineLibrary> GetPipelineLibrary() const override { return nullptr; } std::shared_ptr<CommandQueue> GetCommandQueue() const override { FML_UNREACHABLE(); } std::shared_ptr<CommandBuffer> CreateCommandBuffer() const override { command_buffer_count_ += 1; return nullptr; } void Shutdown() override {} mutable size_t command_buffer_count_ = 0; private: std::shared_ptr<const Capabilities> capabilities_; }; } // namespace impeller namespace flutter { namespace testing { class TestIOManager final : public IOManager { public: explicit TestIOManager(const fml::RefPtr<fml::TaskRunner>& task_runner, bool has_gpu_context = true) : gl_surface_(SkISize::Make(1, 1)), impeller_context_(std::make_shared<impeller::TestImpellerContext>()), gl_context_(has_gpu_context ? gl_surface_.CreateGrContext() : nullptr), weak_gl_context_factory_( has_gpu_context ? std::make_unique<fml::WeakPtrFactory<GrDirectContext>>( gl_context_.get()) : nullptr), unref_queue_(fml::MakeRefCounted<SkiaUnrefQueue>( task_runner, fml::TimeDelta::FromNanoseconds(0), gl_context_)), runner_(task_runner), is_gpu_disabled_sync_switch_(std::make_shared<fml::SyncSwitch>()), weak_factory_(this) { FML_CHECK(task_runner->RunsTasksOnCurrentThread()) << "The IO manager must be initialized its primary task runner. The " "test harness may not be set up correctly/safely."; weak_prototype_ = weak_factory_.GetWeakPtr(); } ~TestIOManager() override { fml::AutoResetWaitableEvent latch; fml::TaskRunner::RunNowOrPostTask(runner_, [&latch, queue = unref_queue_]() { queue->Drain(); latch.Signal(); }); latch.Wait(); } // |IOManager| fml::WeakPtr<IOManager> GetWeakIOManager() const override { return weak_prototype_; } // |IOManager| fml::WeakPtr<GrDirectContext> GetResourceContext() const override { return weak_gl_context_factory_ ? weak_gl_context_factory_->GetWeakPtr() : fml::WeakPtr<GrDirectContext>{}; } // |IOManager| fml::RefPtr<flutter::SkiaUnrefQueue> GetSkiaUnrefQueue() const override { return unref_queue_; } // |IOManager| std::shared_ptr<const fml::SyncSwitch> GetIsGpuDisabledSyncSwitch() override { did_access_is_gpu_disabled_sync_switch_ = true; return is_gpu_disabled_sync_switch_; } // |IOManager| std::shared_ptr<impeller::Context> GetImpellerContext() const override { return impeller_context_; } void SetGpuDisabled(bool disabled) { is_gpu_disabled_sync_switch_->SetSwitch(disabled); } bool did_access_is_gpu_disabled_sync_switch_ = false; private: TestGLSurface gl_surface_; std::shared_ptr<impeller::Context> impeller_context_; sk_sp<GrDirectContext> gl_context_; std::unique_ptr<fml::WeakPtrFactory<GrDirectContext>> weak_gl_context_factory_; fml::RefPtr<SkiaUnrefQueue> unref_queue_; fml::WeakPtr<TestIOManager> weak_prototype_; fml::RefPtr<fml::TaskRunner> runner_; std::shared_ptr<fml::SyncSwitch> is_gpu_disabled_sync_switch_; fml::WeakPtrFactory<TestIOManager> weak_factory_; FML_DISALLOW_COPY_AND_ASSIGN(TestIOManager); }; class ImageDecoderFixtureTest : public FixtureTest {}; TEST_F(ImageDecoderFixtureTest, CanCreateImageDecoder) { auto loop = fml::ConcurrentMessageLoop::Create(); auto thread_task_runner = CreateNewThread(); TaskRunners runners(GetCurrentTestName(), // label thread_task_runner, // platform thread_task_runner, // raster thread_task_runner, // ui thread_task_runner // io ); PostTaskSync(runners.GetIOTaskRunner(), [&]() { TestIOManager manager(runners.GetIOTaskRunner()); Settings settings; auto decoder = ImageDecoder::Make(settings, runners, loop->GetTaskRunner(), manager.GetWeakIOManager(), std::make_shared<fml::SyncSwitch>()); ASSERT_NE(decoder, nullptr); }); } /// An Image generator that pretends it can't recognize the data it was given. class UnknownImageGenerator : public ImageGenerator { public: UnknownImageGenerator() : info_(SkImageInfo::MakeUnknown()){}; ~UnknownImageGenerator() = default; const SkImageInfo& GetInfo() { return info_; } unsigned int GetFrameCount() const { return 1; } unsigned int GetPlayCount() const { return 1; } const ImageGenerator::FrameInfo GetFrameInfo(unsigned int frame_index) { return {std::nullopt, 0, SkCodecAnimation::DisposalMethod::kKeep}; } SkISize GetScaledDimensions(float scale) { return SkISize::Make(info_.width(), info_.height()); } bool GetPixels(const SkImageInfo& info, void* pixels, size_t row_bytes, unsigned int frame_index, std::optional<unsigned int> prior_frame) { return false; }; private: SkImageInfo info_; }; TEST_F(ImageDecoderFixtureTest, InvalidImageResultsError) { auto loop = fml::ConcurrentMessageLoop::Create(); auto thread_task_runner = CreateNewThread(); TaskRunners runners(GetCurrentTestName(), // label thread_task_runner, // platform thread_task_runner, // raster thread_task_runner, // ui thread_task_runner // io ); fml::AutoResetWaitableEvent latch; thread_task_runner->PostTask([&]() { TestIOManager manager(runners.GetIOTaskRunner()); Settings settings; auto decoder = ImageDecoder::Make(settings, runners, loop->GetTaskRunner(), manager.GetWeakIOManager(), std::make_shared<fml::SyncSwitch>()); auto data = flutter::testing::OpenFixtureAsSkData("ThisDoesNotExist.jpg"); ASSERT_FALSE(data); fml::RefPtr<ImageDescriptor> image_descriptor = fml::MakeRefCounted<ImageDescriptor>( std::move(data), std::make_unique<UnknownImageGenerator>()); ImageDecoder::ImageResult callback = [&](const sk_sp<DlImage>& image, const std::string& decode_error) { ASSERT_TRUE(runners.GetUITaskRunner()->RunsTasksOnCurrentThread()); ASSERT_FALSE(image); latch.Signal(); }; decoder->Decode(image_descriptor, 0, 0, callback); }); latch.Wait(); } TEST_F(ImageDecoderFixtureTest, ValidImageResultsInSuccess) { auto loop = fml::ConcurrentMessageLoop::Create(); TaskRunners runners(GetCurrentTestName(), // label CreateNewThread("platform"), // platform CreateNewThread("raster"), // raster CreateNewThread("ui"), // ui CreateNewThread("io") // io ); fml::AutoResetWaitableEvent latch; std::unique_ptr<TestIOManager> io_manager; auto release_io_manager = [&]() { io_manager.reset(); latch.Signal(); }; auto decode_image = [&]() { Settings settings; std::unique_ptr<ImageDecoder> image_decoder = ImageDecoder::Make( settings, runners, loop->GetTaskRunner(), io_manager->GetWeakIOManager(), std::make_shared<fml::SyncSwitch>()); auto data = flutter::testing::OpenFixtureAsSkData("DashInNooglerHat.jpg"); ASSERT_TRUE(data); ASSERT_GE(data->size(), 0u); ImageGeneratorRegistry registry; std::shared_ptr<ImageGenerator> generator = registry.CreateCompatibleGenerator(data); ASSERT_TRUE(generator); auto descriptor = fml::MakeRefCounted<ImageDescriptor>( std::move(data), std::move(generator)); ImageDecoder::ImageResult callback = [&](const sk_sp<DlImage>& image, const std::string& decode_error) { ASSERT_TRUE(runners.GetUITaskRunner()->RunsTasksOnCurrentThread()); ASSERT_TRUE(image && image->skia_image()); EXPECT_TRUE(io_manager->did_access_is_gpu_disabled_sync_switch_); runners.GetIOTaskRunner()->PostTask(release_io_manager); }; EXPECT_FALSE(io_manager->did_access_is_gpu_disabled_sync_switch_); image_decoder->Decode(descriptor, descriptor->width(), descriptor->height(), callback); }; auto set_up_io_manager_and_decode = [&]() { io_manager = std::make_unique<TestIOManager>(runners.GetIOTaskRunner()); runners.GetUITaskRunner()->PostTask(decode_image); }; runners.GetIOTaskRunner()->PostTask(set_up_io_manager_and_decode); latch.Wait(); } TEST_F(ImageDecoderFixtureTest, ImpellerUploadToSharedNoGpu) { #if !IMPELLER_SUPPORTS_RENDERING GTEST_SKIP() << "Impeller only test."; #endif // IMPELLER_SUPPORTS_RENDERING auto no_gpu_access_context = std::make_shared<impeller::TestImpellerContext>(); auto gpu_disabled_switch = std::make_shared<fml::SyncSwitch>(true); auto info = SkImageInfo::Make(10, 10, SkColorType::kRGBA_8888_SkColorType, SkAlphaType::kPremul_SkAlphaType); auto bitmap = std::make_shared<SkBitmap>(); bitmap->allocPixels(info, 10 * 4); impeller::DeviceBufferDescriptor desc; desc.size = bitmap->computeByteSize(); auto buffer = std::make_shared<impeller::TestImpellerDeviceBuffer>(desc); auto result = ImageDecoderImpeller::UploadTextureToPrivate( no_gpu_access_context, buffer, info, bitmap, gpu_disabled_switch); ASSERT_EQ(no_gpu_access_context->command_buffer_count_, 0ul); ASSERT_EQ(result.second, ""); result = ImageDecoderImpeller::UploadTextureToStorage( no_gpu_access_context, bitmap, gpu_disabled_switch, impeller::StorageMode::kHostVisible, true); ASSERT_EQ(no_gpu_access_context->command_buffer_count_, 0ul); ASSERT_EQ(result.second, ""); } TEST_F(ImageDecoderFixtureTest, ImpellerNullColorspace) { auto info = SkImageInfo::Make(10, 10, SkColorType::kRGBA_8888_SkColorType, SkAlphaType::kPremul_SkAlphaType); SkBitmap bitmap; bitmap.allocPixels(info, 10 * 4); auto data = SkData::MakeWithoutCopy(bitmap.getPixels(), 10 * 10 * 4); auto image = SkImages::RasterFromBitmap(bitmap); ASSERT_TRUE(image != nullptr); ASSERT_EQ(SkISize::Make(10, 10), image->dimensions()); ASSERT_EQ(nullptr, image->colorSpace()); auto descriptor = fml::MakeRefCounted<ImageDescriptor>( std::move(data), image->imageInfo(), 10 * 4); #if IMPELLER_SUPPORTS_RENDERING std::shared_ptr<impeller::Allocator> allocator = std::make_shared<impeller::TestImpellerAllocator>(); std::optional<DecompressResult> decompressed = ImageDecoderImpeller::DecompressTexture( descriptor.get(), SkISize::Make(100, 100), {100, 100}, /*supports_wide_gamut=*/true, allocator); ASSERT_TRUE(decompressed.has_value()); ASSERT_EQ(decompressed->image_info.colorType(), kRGBA_8888_SkColorType); ASSERT_EQ(decompressed->image_info.colorSpace(), nullptr); #endif // IMPELLER_SUPPORTS_RENDERING } TEST_F(ImageDecoderFixtureTest, ImpellerPixelConversion32F) { auto info = SkImageInfo::Make(10, 10, SkColorType::kRGBA_F32_SkColorType, SkAlphaType::kUnpremul_SkAlphaType); SkBitmap bitmap; bitmap.allocPixels(info, 10 * 16); auto data = SkData::MakeWithoutCopy(bitmap.getPixels(), 10 * 10 * 16); auto image = SkImages::RasterFromBitmap(bitmap); ASSERT_TRUE(image != nullptr); ASSERT_EQ(SkISize::Make(10, 10), image->dimensions()); ASSERT_EQ(nullptr, image->colorSpace()); auto descriptor = fml::MakeRefCounted<ImageDescriptor>( std::move(data), image->imageInfo(), 10 * 16); #if IMPELLER_SUPPORTS_RENDERING std::shared_ptr<impeller::Allocator> allocator = std::make_shared<impeller::TestImpellerAllocator>(); std::optional<DecompressResult> decompressed = ImageDecoderImpeller::DecompressTexture( descriptor.get(), SkISize::Make(100, 100), {100, 100}, /*supports_wide_gamut=*/true, allocator); ASSERT_TRUE(decompressed.has_value()); ASSERT_EQ(decompressed->image_info.colorType(), kRGBA_F16_SkColorType); ASSERT_EQ(decompressed->image_info.colorSpace(), nullptr); #endif // IMPELLER_SUPPORTS_RENDERING } TEST_F(ImageDecoderFixtureTest, ImpellerWideGamutDisplayP3Opaque) { auto data = flutter::testing::OpenFixtureAsSkData("DisplayP3Logo.jpg"); auto image = SkImages::DeferredFromEncodedData(data); ASSERT_TRUE(image != nullptr); ASSERT_EQ(SkISize::Make(100, 100), image->dimensions()); ImageGeneratorRegistry registry; std::shared_ptr<ImageGenerator> generator = registry.CreateCompatibleGenerator(data); ASSERT_TRUE(generator); auto descriptor = fml::MakeRefCounted<ImageDescriptor>(std::move(data), std::move(generator)); #if IMPELLER_SUPPORTS_RENDERING std::shared_ptr<impeller::Allocator> allocator = std::make_shared<impeller::TestImpellerAllocator>(); std::optional<DecompressResult> wide_result = ImageDecoderImpeller::DecompressTexture( descriptor.get(), SkISize::Make(100, 100), {100, 100}, /*supports_wide_gamut=*/true, allocator); ASSERT_TRUE(wide_result.has_value()); ASSERT_EQ(wide_result->image_info.colorType(), kBGR_101010x_XR_SkColorType); ASSERT_TRUE(wide_result->image_info.colorSpace()->isSRGB()); const SkPixmap& wide_pixmap = wide_result->sk_bitmap->pixmap(); const uint32_t* pixel_ptr = static_cast<const uint32_t*>(wide_pixmap.addr()); bool found_deep_red = false; for (int i = 0; i < wide_pixmap.width() * wide_pixmap.height(); ++i) { uint32_t pixel = *pixel_ptr++; float blue = DecodeBGR10((pixel >> 0) & 0x3ff); float green = DecodeBGR10((pixel >> 10) & 0x3ff); float red = DecodeBGR10((pixel >> 20) & 0x3ff); if (fabsf(red - 1.0931f) < 0.01f && fabsf(green - -0.2268f) < 0.01f && fabsf(blue - -0.1501f) < 0.01f) { found_deep_red = true; break; } } ASSERT_TRUE(found_deep_red); std::optional<DecompressResult> narrow_result = ImageDecoderImpeller::DecompressTexture( descriptor.get(), SkISize::Make(100, 100), {100, 100}, /*supports_wide_gamut=*/false, allocator); ASSERT_TRUE(narrow_result.has_value()); ASSERT_EQ(narrow_result->image_info.colorType(), kRGBA_8888_SkColorType); #endif // IMPELLER_SUPPORTS_RENDERING } TEST_F(ImageDecoderFixtureTest, ImpellerNonWideGamut) { auto data = flutter::testing::OpenFixtureAsSkData("Horizontal.jpg"); auto image = SkImages::DeferredFromEncodedData(data); ASSERT_TRUE(image != nullptr); ASSERT_EQ(SkISize::Make(600, 200), image->dimensions()); ImageGeneratorRegistry registry; std::shared_ptr<ImageGenerator> generator = registry.CreateCompatibleGenerator(data); ASSERT_TRUE(generator); auto descriptor = fml::MakeRefCounted<ImageDescriptor>(std::move(data), std::move(generator)); #if IMPELLER_SUPPORTS_RENDERING std::shared_ptr<impeller::Allocator> allocator = std::make_shared<impeller::TestImpellerAllocator>(); std::optional<DecompressResult> result = ImageDecoderImpeller::DecompressTexture( descriptor.get(), SkISize::Make(600, 200), {600, 200}, /*supports_wide_gamut=*/true, allocator); ASSERT_TRUE(result.has_value()); ASSERT_EQ(result->image_info.colorType(), kRGBA_8888_SkColorType); #endif // IMPELLER_SUPPORTS_RENDERING } TEST_F(ImageDecoderFixtureTest, ExifDataIsRespectedOnDecode) { auto loop = fml::ConcurrentMessageLoop::Create(); TaskRunners runners(GetCurrentTestName(), // label CreateNewThread("platform"), // platform CreateNewThread("raster"), // raster CreateNewThread("ui"), // ui CreateNewThread("io") // io ); fml::AutoResetWaitableEvent latch; std::unique_ptr<IOManager> io_manager; auto release_io_manager = [&]() { io_manager.reset(); latch.Signal(); }; SkISize decoded_size = SkISize::MakeEmpty(); auto decode_image = [&]() { Settings settings; std::unique_ptr<ImageDecoder> image_decoder = ImageDecoder::Make( settings, runners, loop->GetTaskRunner(), io_manager->GetWeakIOManager(), std::make_shared<fml::SyncSwitch>()); auto data = flutter::testing::OpenFixtureAsSkData("Horizontal.jpg"); ASSERT_TRUE(data); ASSERT_GE(data->size(), 0u); ImageGeneratorRegistry registry; std::shared_ptr<ImageGenerator> generator = registry.CreateCompatibleGenerator(data); ASSERT_TRUE(generator); auto descriptor = fml::MakeRefCounted<ImageDescriptor>( std::move(data), std::move(generator)); ImageDecoder::ImageResult callback = [&](const sk_sp<DlImage>& image, const std::string& decode_error) { ASSERT_TRUE(runners.GetUITaskRunner()->RunsTasksOnCurrentThread()); ASSERT_TRUE(image && image->skia_image()); decoded_size = image->skia_image()->dimensions(); runners.GetIOTaskRunner()->PostTask(release_io_manager); }; image_decoder->Decode(descriptor, descriptor->width(), descriptor->height(), callback); }; auto set_up_io_manager_and_decode = [&]() { io_manager = std::make_unique<TestIOManager>(runners.GetIOTaskRunner()); runners.GetUITaskRunner()->PostTask(decode_image); }; runners.GetIOTaskRunner()->PostTask(set_up_io_manager_and_decode); latch.Wait(); ASSERT_EQ(decoded_size.width(), 600); ASSERT_EQ(decoded_size.height(), 200); } TEST_F(ImageDecoderFixtureTest, CanDecodeWithoutAGPUContext) { auto loop = fml::ConcurrentMessageLoop::Create(); TaskRunners runners(GetCurrentTestName(), // label CreateNewThread("platform"), // platform CreateNewThread("raster"), // raster CreateNewThread("ui"), // ui CreateNewThread("io") // io ); fml::AutoResetWaitableEvent latch; std::unique_ptr<IOManager> io_manager; auto release_io_manager = [&]() { io_manager.reset(); latch.Signal(); }; auto decode_image = [&]() { Settings settings; std::unique_ptr<ImageDecoder> image_decoder = ImageDecoder::Make( settings, runners, loop->GetTaskRunner(), io_manager->GetWeakIOManager(), std::make_shared<fml::SyncSwitch>()); auto data = flutter::testing::OpenFixtureAsSkData("DashInNooglerHat.jpg"); ASSERT_TRUE(data); ASSERT_GE(data->size(), 0u); ImageGeneratorRegistry registry; std::shared_ptr<ImageGenerator> generator = registry.CreateCompatibleGenerator(data); ASSERT_TRUE(generator); auto descriptor = fml::MakeRefCounted<ImageDescriptor>( std::move(data), std::move(generator)); ImageDecoder::ImageResult callback = [&](const sk_sp<DlImage>& image, const std::string& decode_error) { ASSERT_TRUE(runners.GetUITaskRunner()->RunsTasksOnCurrentThread()); ASSERT_TRUE(image && image->skia_image()); runners.GetIOTaskRunner()->PostTask(release_io_manager); }; image_decoder->Decode(descriptor, descriptor->width(), descriptor->height(), callback); }; auto set_up_io_manager_and_decode = [&]() { io_manager = std::make_unique<TestIOManager>(runners.GetIOTaskRunner(), false); runners.GetUITaskRunner()->PostTask(decode_image); }; runners.GetIOTaskRunner()->PostTask(set_up_io_manager_and_decode); latch.Wait(); } TEST_F(ImageDecoderFixtureTest, CanDecodeWithResizes) { const auto image_dimensions = SkImages::DeferredFromEncodedData( flutter::testing::OpenFixtureAsSkData("DashInNooglerHat.jpg")) ->dimensions(); ASSERT_FALSE(image_dimensions.isEmpty()); ASSERT_NE(image_dimensions.width(), image_dimensions.height()); auto loop = fml::ConcurrentMessageLoop::Create(); TaskRunners runners(GetCurrentTestName(), // label CreateNewThread("platform"), // platform CreateNewThread("raster"), // raster CreateNewThread("ui"), // ui CreateNewThread("io") // io ); fml::AutoResetWaitableEvent latch; std::unique_ptr<IOManager> io_manager; std::unique_ptr<ImageDecoder> image_decoder; // Setup the IO manager. PostTaskSync(runners.GetIOTaskRunner(), [&]() { io_manager = std::make_unique<TestIOManager>(runners.GetIOTaskRunner()); }); // Setup the image decoder. PostTaskSync(runners.GetUITaskRunner(), [&]() { Settings settings; image_decoder = ImageDecoder::Make(settings, runners, loop->GetTaskRunner(), io_manager->GetWeakIOManager(), std::make_shared<fml::SyncSwitch>()); }); // Setup a generic decoding utility that gives us the final decoded size. auto decoded_size = [&](uint32_t target_width, uint32_t target_height) -> SkISize { SkISize final_size = SkISize::MakeEmpty(); runners.GetUITaskRunner()->PostTask([&]() { auto data = flutter::testing::OpenFixtureAsSkData("DashInNooglerHat.jpg"); ASSERT_TRUE(data); ASSERT_GE(data->size(), 0u); ImageGeneratorRegistry registry; std::shared_ptr<ImageGenerator> generator = registry.CreateCompatibleGenerator(data); ASSERT_TRUE(generator); auto descriptor = fml::MakeRefCounted<ImageDescriptor>( std::move(data), std::move(generator)); ImageDecoder::ImageResult callback = [&](const sk_sp<DlImage>& image, const std::string& decode_error) { ASSERT_TRUE(runners.GetUITaskRunner()->RunsTasksOnCurrentThread()); ASSERT_TRUE(image && image->skia_image()); final_size = image->skia_image()->dimensions(); latch.Signal(); }; image_decoder->Decode(descriptor, target_width, target_height, callback); }); latch.Wait(); return final_size; }; ASSERT_EQ(SkISize::Make(3024, 4032), image_dimensions); ASSERT_EQ(decoded_size(3024, 4032), image_dimensions); ASSERT_EQ(decoded_size(100, 100), SkISize::Make(100, 100)); // Destroy the IO manager PostTaskSync(runners.GetIOTaskRunner(), [&]() { io_manager.reset(); }); // Destroy the image decoder PostTaskSync(runners.GetUITaskRunner(), [&]() { image_decoder.reset(); }); } // Verifies https://skia-review.googlesource.com/c/skia/+/259161 is present in // Flutter. TEST(ImageDecoderTest, VerifyCodecRepeatCountsForGifAndWebPAreConsistentWithLoopCounts) { auto gif_mapping = flutter::testing::OpenFixtureAsSkData("hello_loop_2.gif"); auto webp_mapping = flutter::testing::OpenFixtureAsSkData("hello_loop_2.webp"); ASSERT_TRUE(gif_mapping); ASSERT_TRUE(webp_mapping); ImageGeneratorRegistry registry; auto gif_generator = registry.CreateCompatibleGenerator(gif_mapping); auto webp_generator = registry.CreateCompatibleGenerator(webp_mapping); ASSERT_TRUE(gif_generator); ASSERT_TRUE(webp_generator); // Both fixtures have a loop count of 2. ASSERT_EQ(gif_generator->GetPlayCount(), static_cast<unsigned int>(2)); ASSERT_EQ(webp_generator->GetPlayCount(), static_cast<unsigned int>(2)); } TEST(ImageDecoderTest, VerifySimpleDecoding) { auto data = flutter::testing::OpenFixtureAsSkData("Horizontal.jpg"); auto image = SkImages::DeferredFromEncodedData(data); ASSERT_TRUE(image != nullptr); ASSERT_EQ(600, image->width()); ASSERT_EQ(200, image->height()); ImageGeneratorRegistry registry; std::shared_ptr<ImageGenerator> generator = registry.CreateCompatibleGenerator(data); ASSERT_TRUE(generator); auto descriptor = fml::MakeRefCounted<ImageDescriptor>(std::move(data), std::move(generator)); auto compressed_image = ImageDecoderSkia::ImageFromCompressedData( descriptor.get(), 6, 2, fml::tracing::TraceFlow("")); ASSERT_EQ(compressed_image->width(), 6); ASSERT_EQ(compressed_image->height(), 2); ASSERT_EQ(compressed_image->alphaType(), kOpaque_SkAlphaType); #if IMPELLER_SUPPORTS_RENDERING std::shared_ptr<impeller::Allocator> allocator = std::make_shared<impeller::TestImpellerAllocator>(); auto result_1 = ImageDecoderImpeller::DecompressTexture( descriptor.get(), SkISize::Make(6, 2), {100, 100}, /*supports_wide_gamut=*/false, allocator); ASSERT_EQ(result_1.sk_bitmap->width(), 6); ASSERT_EQ(result_1.sk_bitmap->height(), 2); auto result_2 = ImageDecoderImpeller::DecompressTexture( descriptor.get(), SkISize::Make(60, 20), {10, 10}, /*supports_wide_gamut=*/false, allocator); ASSERT_EQ(result_2.sk_bitmap->width(), 10); ASSERT_EQ(result_2.sk_bitmap->height(), 10); #endif // IMPELLER_SUPPORTS_RENDERING } TEST(ImageDecoderTest, ImagesWithTransparencyArePremulAlpha) { auto data = flutter::testing::OpenFixtureAsSkData("heart_end.png"); ASSERT_TRUE(data); ImageGeneratorRegistry registry; std::shared_ptr<ImageGenerator> generator = registry.CreateCompatibleGenerator(data); ASSERT_TRUE(generator); auto descriptor = fml::MakeRefCounted<ImageDescriptor>(std::move(data), std::move(generator)); auto compressed_image = ImageDecoderSkia::ImageFromCompressedData( descriptor.get(), 250, 250, fml::tracing::TraceFlow("")); ASSERT_TRUE(compressed_image); ASSERT_EQ(compressed_image->width(), 250); ASSERT_EQ(compressed_image->height(), 250); ASSERT_EQ(compressed_image->alphaType(), kPremul_SkAlphaType); } TEST(ImageDecoderTest, VerifySubpixelDecodingPreservesExifOrientation) { auto data = flutter::testing::OpenFixtureAsSkData("Horizontal.jpg"); ImageGeneratorRegistry registry; std::shared_ptr<ImageGenerator> generator = registry.CreateCompatibleGenerator(data); ASSERT_TRUE(generator); auto descriptor = fml::MakeRefCounted<ImageDescriptor>(data, std::move(generator)); // If Exif metadata is ignored, the height and width will be swapped because // "Rotate 90 CW" is what is encoded there. ASSERT_EQ(600, descriptor->width()); ASSERT_EQ(200, descriptor->height()); auto image = SkImages::DeferredFromEncodedData(data); ASSERT_TRUE(image != nullptr); ASSERT_EQ(600, image->width()); ASSERT_EQ(200, image->height()); auto decode = [descriptor](uint32_t target_width, uint32_t target_height) { return ImageDecoderSkia::ImageFromCompressedData( descriptor.get(), target_width, target_height, fml::tracing::TraceFlow("")); }; auto expected_data = flutter::testing::OpenFixtureAsSkData("Horizontal.png"); ASSERT_TRUE(expected_data != nullptr); ASSERT_FALSE(expected_data->isEmpty()); auto assert_image = [&](auto decoded_image, const std::string& decode_error) { ASSERT_EQ(decoded_image->dimensions(), SkISize::Make(300, 100)); sk_sp<SkData> encoded = SkPngEncoder::Encode(nullptr, decoded_image.get(), {}); ASSERT_TRUE(encoded->equals(expected_data.get())); }; assert_image(decode(300, 100), {}); } TEST_F(ImageDecoderFixtureTest, MultiFrameCodecCanBeCollectedBeforeIOTasksFinish) { // This test verifies that the MultiFrameCodec safely shares state between // tasks on the IO and UI runners, and does not allow unsafe memory access if // the UI object is collected while the IO thread still has pending decode // work. This could happen in a real application if the engine is collected // while a multi-frame image is decoding. To exercise this, the test: // - Starts a Dart VM // - Latches the IO task runner // - Create a MultiFrameCodec for an animated gif pointed to a callback // in the Dart fixture // - Calls getNextFrame on the UI task runner // - Collects the MultiFrameCodec object before unlatching the IO task // runner. // - Unlatches the IO task runner auto settings = CreateSettingsForFixture(); auto vm_ref = DartVMRef::Create(settings); auto vm_data = vm_ref.GetVMData(); auto gif_mapping = flutter::testing::OpenFixtureAsSkData("hello_loop_2.gif"); ASSERT_TRUE(gif_mapping); ImageGeneratorRegistry registry; std::shared_ptr<ImageGenerator> gif_generator = registry.CreateCompatibleGenerator(gif_mapping); ASSERT_TRUE(gif_generator); TaskRunners runners(GetCurrentTestName(), // label CreateNewThread("platform"), // platform CreateNewThread("raster"), // raster CreateNewThread("ui"), // ui CreateNewThread("io") // io ); fml::AutoResetWaitableEvent io_latch; std::unique_ptr<TestIOManager> io_manager; // Setup the IO manager. PostTaskSync(runners.GetIOTaskRunner(), [&]() { io_manager = std::make_unique<TestIOManager>(runners.GetIOTaskRunner()); }); auto isolate = RunDartCodeInIsolate(vm_ref, settings, runners, "main", {}, GetDefaultKernelFilePath(), io_manager->GetWeakIOManager()); // Latch the IO task runner. runners.GetIOTaskRunner()->PostTask([&]() { io_latch.Wait(); }); PostTaskSync(runners.GetUITaskRunner(), [&]() { fml::AutoResetWaitableEvent isolate_latch; fml::RefPtr<MultiFrameCodec> codec; EXPECT_TRUE(isolate->RunInIsolateScope([&]() -> bool { Dart_Handle library = Dart_RootLibrary(); if (Dart_IsError(library)) { isolate_latch.Signal(); return false; } Dart_Handle closure = Dart_GetField(library, Dart_NewStringFromCString("frameCallback")); if (Dart_IsError(closure) || !Dart_IsClosure(closure)) { isolate_latch.Signal(); return false; } codec = fml::MakeRefCounted<MultiFrameCodec>(std::move(gif_generator)); codec->getNextFrame(closure); codec = nullptr; isolate_latch.Signal(); return true; })); isolate_latch.Wait(); EXPECT_FALSE(codec); io_latch.Signal(); }); // Destroy the IO manager PostTaskSync(runners.GetIOTaskRunner(), [&]() { io_manager.reset(); }); } TEST_F(ImageDecoderFixtureTest, MultiFrameCodecDidAccessGpuDisabledSyncSwitch) { auto settings = CreateSettingsForFixture(); auto vm_ref = DartVMRef::Create(settings); auto vm_data = vm_ref.GetVMData(); auto gif_mapping = flutter::testing::OpenFixtureAsSkData("hello_loop_2.gif"); ASSERT_TRUE(gif_mapping); ImageGeneratorRegistry registry; std::shared_ptr<ImageGenerator> gif_generator = registry.CreateCompatibleGenerator(gif_mapping); ASSERT_TRUE(gif_generator); TaskRunners runners(GetCurrentTestName(), // label CreateNewThread("platform"), // platform CreateNewThread("raster"), // raster CreateNewThread("ui"), // ui CreateNewThread("io") // io ); std::unique_ptr<TestIOManager> io_manager; fml::RefPtr<MultiFrameCodec> codec; fml::AutoResetWaitableEvent latch; auto validate_frame_callback = [&latch](Dart_NativeArguments args) { EXPECT_FALSE(Dart_IsNull(Dart_GetNativeArgument(args, 0))); latch.Signal(); }; AddNativeCallback("ValidateFrameCallback", CREATE_NATIVE_ENTRY(validate_frame_callback)); // Setup the IO manager. PostTaskSync(runners.GetIOTaskRunner(), [&]() { io_manager = std::make_unique<TestIOManager>(runners.GetIOTaskRunner()); }); auto isolate = RunDartCodeInIsolate(vm_ref, settings, runners, "main", {}, GetDefaultKernelFilePath(), io_manager->GetWeakIOManager()); PostTaskSync(runners.GetUITaskRunner(), [&]() { fml::AutoResetWaitableEvent isolate_latch; EXPECT_TRUE(isolate->RunInIsolateScope([&]() -> bool { Dart_Handle library = Dart_RootLibrary(); if (Dart_IsError(library)) { isolate_latch.Signal(); return false; } Dart_Handle closure = Dart_GetField(library, Dart_NewStringFromCString("frameCallback")); if (Dart_IsError(closure) || !Dart_IsClosure(closure)) { isolate_latch.Signal(); return false; } EXPECT_FALSE(io_manager->did_access_is_gpu_disabled_sync_switch_); codec = fml::MakeRefCounted<MultiFrameCodec>(std::move(gif_generator)); codec->getNextFrame(closure); isolate_latch.Signal(); return true; })); isolate_latch.Wait(); }); PostTaskSync(runners.GetIOTaskRunner(), [&]() { EXPECT_TRUE(io_manager->did_access_is_gpu_disabled_sync_switch_); }); latch.Wait(); // Destroy the Isolate isolate = nullptr; // Destroy the MultiFrameCodec PostTaskSync(runners.GetUITaskRunner(), [&]() { codec = nullptr; }); // Destroy the IO manager PostTaskSync(runners.GetIOTaskRunner(), [&]() { io_manager.reset(); }); } TEST_F(ImageDecoderFixtureTest, MultiFrameCodecProducesATextureEvenIfGPUIsDisabledOnImpeller) { auto settings = CreateSettingsForFixture(); settings.enable_impeller = true; auto vm_ref = DartVMRef::Create(settings); auto vm_data = vm_ref.GetVMData(); auto gif_mapping = flutter::testing::OpenFixtureAsSkData("hello_loop_2.gif"); ASSERT_TRUE(gif_mapping); ImageGeneratorRegistry registry; std::shared_ptr<ImageGenerator> gif_generator = registry.CreateCompatibleGenerator(gif_mapping); ASSERT_TRUE(gif_generator); TaskRunners runners(GetCurrentTestName(), // label CreateNewThread("platform"), // platform CreateNewThread("raster"), // raster CreateNewThread("ui"), // ui CreateNewThread("io") // io ); std::unique_ptr<TestIOManager> io_manager; fml::RefPtr<MultiFrameCodec> codec; fml::AutoResetWaitableEvent latch; auto validate_frame_callback = [&latch](Dart_NativeArguments args) { EXPECT_FALSE(Dart_IsNull(Dart_GetNativeArgument(args, 0))); latch.Signal(); }; AddNativeCallback("ValidateFrameCallback", CREATE_NATIVE_ENTRY(validate_frame_callback)); // Setup the IO manager. PostTaskSync(runners.GetIOTaskRunner(), [&]() { io_manager = std::make_unique<TestIOManager>(runners.GetIOTaskRunner()); // Mark GPU disabled. io_manager->SetGpuDisabled(true); }); auto isolate = RunDartCodeInIsolate(vm_ref, settings, runners, "main", {}, GetDefaultKernelFilePath(), io_manager->GetWeakIOManager()); PostTaskSync(runners.GetUITaskRunner(), [&]() { fml::AutoResetWaitableEvent isolate_latch; EXPECT_TRUE(isolate->RunInIsolateScope([&]() -> bool { Dart_Handle library = Dart_RootLibrary(); if (Dart_IsError(library)) { isolate_latch.Signal(); return false; } Dart_Handle closure = Dart_GetField(library, Dart_NewStringFromCString("frameCallback")); if (Dart_IsError(closure) || !Dart_IsClosure(closure)) { isolate_latch.Signal(); return false; } EXPECT_FALSE(io_manager->did_access_is_gpu_disabled_sync_switch_); codec = fml::MakeRefCounted<MultiFrameCodec>(std::move(gif_generator)); codec->getNextFrame(closure); isolate_latch.Signal(); return true; })); isolate_latch.Wait(); }); PostTaskSync(runners.GetIOTaskRunner(), [&]() { EXPECT_TRUE(io_manager->did_access_is_gpu_disabled_sync_switch_); }); latch.Wait(); // Destroy the Isolate isolate = nullptr; // Destroy the MultiFrameCodec PostTaskSync(runners.GetUITaskRunner(), [&]() { codec = nullptr; }); // Destroy the IO manager PostTaskSync(runners.GetIOTaskRunner(), [&]() { io_manager.reset(); }); } TEST_F(ImageDecoderFixtureTest, NullCheckBuffer) { auto context = std::make_shared<impeller::TestImpellerContext>(); auto allocator = ImpellerAllocator(context->GetResourceAllocator()); EXPECT_FALSE(allocator.allocPixelRef(nullptr)); } } // namespace testing } // namespace flutter // NOLINTEND(clang-analyzer-core.StackAddressEscape)
engine/lib/ui/painting/image_decoder_unittests.cc/0
{ "file_path": "engine/lib/ui/painting/image_decoder_unittests.cc", "repo_id": "engine", "token_count": 15889 }
234
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "image_generator_apng.h" #include <cstddef> #include <cstring> #include "flutter/fml/logging.h" #include "third_party/skia/include/codec/SkCodec.h" #include "third_party/skia/include/codec/SkCodecAnimation.h" #include "third_party/skia/include/core/SkAlphaType.h" #include "third_party/skia/include/core/SkColorType.h" #include "third_party/skia/include/core/SkImageInfo.h" #include "third_party/skia/include/core/SkStream.h" #include "third_party/zlib/zlib.h" // For crc32 namespace flutter { APNGImageGenerator::~APNGImageGenerator() = default; APNGImageGenerator::APNGImageGenerator(sk_sp<SkData>& data, SkImageInfo& image_info, APNGImage&& default_image, unsigned int frame_count, unsigned int play_count, const void* next_chunk_p, const std::vector<uint8_t>& header) : data_(data), image_info_(image_info), frame_count_(frame_count), play_count_(play_count), first_frame_index_(default_image.frame_info.has_value() ? 0 : 1), next_chunk_p_(next_chunk_p), header_(header) { images_.push_back(std::move(default_image)); } const SkImageInfo& APNGImageGenerator::GetInfo() { return image_info_; } unsigned int APNGImageGenerator::GetFrameCount() const { return frame_count_; } unsigned int APNGImageGenerator::GetPlayCount() const { return frame_count_ > 1 ? play_count_ : 1; } const ImageGenerator::FrameInfo APNGImageGenerator::GetFrameInfo( unsigned int frame_index) { unsigned int image_index = first_frame_index_ + frame_index; if (!DemuxToImageIndex(image_index)) { return {}; } auto frame_info = images_[image_index].frame_info; if (frame_info.has_value()) { return frame_info.value(); } return {}; } SkISize APNGImageGenerator::GetScaledDimensions(float desired_scale) { return image_info_.dimensions(); } bool APNGImageGenerator::GetPixels(const SkImageInfo& info, void* pixels, size_t row_bytes, unsigned int frame_index, std::optional<unsigned int> prior_frame) { FML_DCHECK(images_.size() > 0); unsigned int image_index = first_frame_index_ + frame_index; //---------------------------------------------------------------------------- /// 1. Demux the frame from the APNG stream. /// if (!DemuxToImageIndex(image_index)) { FML_DLOG(ERROR) << "Couldn't demux image at index " << image_index << " (frame index: " << frame_index << ") from APNG stream."; return RenderDefaultImage(info, pixels, row_bytes); } //---------------------------------------------------------------------------- /// 2. Decode the frame. /// APNGImage& frame = images_[image_index]; SkImageInfo frame_info = frame.codec->getInfo(); auto frame_row_bytes = frame_info.bytesPerPixel() * frame_info.width(); if (frame.pixels.empty()) { frame.pixels.resize(frame_row_bytes * frame_info.height()); SkCodec::Result result = frame.codec->getPixels( frame.codec->getInfo(), frame.pixels.data(), frame_row_bytes); if (result != SkCodec::kSuccess) { FML_DLOG(ERROR) << "Failed to decode image at index " << image_index << " (frame index: " << frame_index << ") of APNG. SkCodec::Result: " << result; return RenderDefaultImage(info, pixels, row_bytes); } } if (!frame.frame_info.has_value()) { FML_DLOG(ERROR) << "Failed to decode image at index " << image_index << " (frame index: " << frame_index << ") of APNG due to the frame missing data (frame_info)."; return false; } //---------------------------------------------------------------------------- /// 3. Composite the frame onto the canvas. /// if (info.colorType() != kN32_SkColorType) { FML_DLOG(ERROR) << "Failed to composite image at index " << image_index << " (frame index: " << frame_index << ") of APNG due to the destination surface having an " "unsupported color type."; return false; } if (frame_info.colorType() != kN32_SkColorType) { FML_DLOG(ERROR) << "Failed to composite image at index " << image_index << " (frame index: " << frame_index << ") of APNG due to the frame having an unsupported color type."; return false; } // Regardless of the byte order (RGBA vs BGRA), the blending operations are // the same. struct Pixel { uint8_t channel[4]; uint8_t GetAlpha() { return channel[3]; } void Premultiply() { for (int i = 0; i < 3; i++) { channel[i] = channel[i] * GetAlpha() / 0xFF; } } void Unpremultiply() { if (GetAlpha() == 0) { channel[0] = channel[1] = channel[2] = 0; return; } for (int i = 0; i < 3; i++) { channel[i] = channel[i] * 0xFF / GetAlpha(); } } }; FML_DCHECK(frame_info.bytesPerPixel() == sizeof(Pixel)); bool result = true; if (frame.frame_info->blend_mode == SkCodecAnimation::Blend::kSrc) { SkPixmap src_pixmap(frame_info, frame.pixels.data(), frame_row_bytes); uint8_t* dst_pixels = static_cast<uint8_t*>(pixels) + frame.y_offset * row_bytes + frame.x_offset * frame_info.bytesPerPixel(); result = src_pixmap.readPixels(info, dst_pixels, row_bytes); if (!result) { FML_DLOG(ERROR) << "Failed to copy pixels at index " << image_index << " (frame index: " << frame_index << ") of APNG."; } } else if (frame.frame_info->blend_mode == SkCodecAnimation::Blend::kSrcOver) { for (int y = 0; y < frame_info.height(); y++) { auto src_row = frame.pixels.data() + y * frame_row_bytes; auto dst_row = static_cast<uint8_t*>(pixels) + (y + frame.y_offset) * row_bytes + frame.x_offset * frame_info.bytesPerPixel(); for (int x = 0; x < frame_info.width(); x++) { auto x_offset_bytes = x * frame_info.bytesPerPixel(); Pixel src = *reinterpret_cast<Pixel*>(src_row + x_offset_bytes); Pixel* dst_p = reinterpret_cast<Pixel*>(dst_row + x_offset_bytes); Pixel dst = *dst_p; // Ensure both colors are premultiplied for the blending operation. if (info.alphaType() == kUnpremul_SkAlphaType) { dst.Premultiply(); } if (frame_info.alphaType() == kUnpremul_SkAlphaType) { src.Premultiply(); } for (int i = 0; i < 4; i++) { dst.channel[i] = src.channel[i] + dst.channel[i] * (0xFF - src.GetAlpha()) / 0xFF; } // The final color is premultiplied. Unpremultiply to match the // backdrop surface if necessary. if (info.alphaType() == kUnpremul_SkAlphaType) { dst.Unpremultiply(); } *dst_p = dst; } } } return result; } std::unique_ptr<ImageGenerator> APNGImageGenerator::MakeFromData( sk_sp<SkData> data) { // Ensure the buffer is large enough to at least contain the PNG signature // and a chunk header. if (data->size() < sizeof(kPngSignature) + sizeof(ChunkHeader)) { return nullptr; } // Validate the full PNG signature. const uint8_t* data_p = static_cast<const uint8_t*>(data.get()->data()); if (memcmp(data_p, kPngSignature, sizeof(kPngSignature))) { return nullptr; } // Validate the header chunk. const ChunkHeader* chunk = reinterpret_cast<const ChunkHeader*>(data_p + 8); if (!IsValidChunkHeader(data_p, data->size(), chunk) || chunk->get_data_length() != sizeof(ImageHeaderChunkData) || chunk->get_type() != kImageHeaderChunkType) { return nullptr; } // Walk the chunks to find the "animation control" chunk. If an "image data" // chunk is found first, this PNG is not animated. while (true) { chunk = GetNextChunk(data_p, data->size(), chunk); if (chunk == nullptr) { return nullptr; } if (chunk->get_type() == kImageDataChunkType) { return nullptr; } if (chunk->get_type() == kAnimationControlChunkType) { break; } } const AnimationControlChunkData* animation_data = CastChunkData<AnimationControlChunkData>(chunk); // Extract the header signature and chunks to prepend when demuxing images. std::optional<std::vector<uint8_t>> header; const void* first_chunk_p; std::tie(header, first_chunk_p) = ExtractHeader(data_p, data->size()); if (!header.has_value()) { return nullptr; } // Demux the first image in the APNG chunk stream in order to interpret // extent and blending info immediately. std::optional<APNGImage> default_image; const void* next_chunk_p; std::tie(default_image, next_chunk_p) = DemuxNextImage(data_p, data->size(), header.value(), first_chunk_p); if (!default_image.has_value()) { return nullptr; } unsigned int play_count = animation_data->get_num_plays(); if (play_count == 0) { play_count = kInfinitePlayCount; } SkImageInfo image_info = default_image.value().codec->getInfo(); return std::unique_ptr<APNGImageGenerator>( new APNGImageGenerator(data, image_info, std::move(default_image.value()), animation_data->get_num_frames(), play_count, next_chunk_p, header.value())); } bool APNGImageGenerator::IsValidChunkHeader(const void* buffer, size_t size, const ChunkHeader* chunk) { // Ensure the chunk doesn't start before the beginning of the buffer. if (reinterpret_cast<const uint8_t*>(chunk) < static_cast<const uint8_t*>(buffer)) { return false; } // Ensure the buffer is large enough to contain at least the chunk header. if (reinterpret_cast<const uint8_t*>(chunk) + sizeof(ChunkHeader) > static_cast<const uint8_t*>(buffer) + size) { return false; } // Ensure the buffer is large enough to contain the chunk's given data size // and CRC. const uint8_t* chunk_end = reinterpret_cast<const uint8_t*>(chunk) + GetChunkSize(chunk); if (chunk_end > static_cast<const uint8_t*>(buffer) + size) { return false; } // Ensure the 4-byte type only contains ISO 646 letters. uint32_t type = chunk->get_type(); for (int i = 0; i < 4; i++) { uint8_t c = type >> i * 8 & 0xFF; if (!((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'))) { return false; } } return true; } const APNGImageGenerator::ChunkHeader* APNGImageGenerator::GetNextChunk( const void* buffer, size_t size, const ChunkHeader* current_chunk) { FML_DCHECK((uint8_t*)current_chunk + sizeof(ChunkHeader) <= (uint8_t*)buffer + size); const ChunkHeader* next_chunk = reinterpret_cast<const ChunkHeader*>( reinterpret_cast<const uint8_t*>(current_chunk) + GetChunkSize(current_chunk)); if (!IsValidChunkHeader(buffer, size, next_chunk)) { return nullptr; } return next_chunk; } std::pair<std::optional<std::vector<uint8_t>>, const void*> APNGImageGenerator::ExtractHeader(const void* buffer_p, size_t buffer_size) { std::vector<uint8_t> result(sizeof(kPngSignature)); memcpy(result.data(), kPngSignature, sizeof(kPngSignature)); const ChunkHeader* chunk = reinterpret_cast<const ChunkHeader*>( static_cast<const uint8_t*>(buffer_p) + sizeof(kPngSignature)); // Validate the first chunk to ensure it's safe to read. if (!IsValidChunkHeader(buffer_p, buffer_size, chunk)) { return std::make_pair(std::nullopt, nullptr); } // Walk the chunks and copy in the non-APNG chunks until we come across a // frame or image chunk. do { if (chunk->get_type() != kAnimationControlChunkType) { size_t chunk_size = GetChunkSize(chunk); result.resize(result.size() + chunk_size); memcpy(result.data() + result.size() - chunk_size, chunk, chunk_size); } chunk = GetNextChunk(buffer_p, buffer_size, chunk); } while (chunk != nullptr && chunk->get_type() != kFrameControlChunkType && chunk->get_type() != kImageDataChunkType && chunk->get_type() != kFrameDataChunkType); // nullptr means the end of the buffer was reached, which means there's no // frame or image data, so just return nothing because the PNG isn't even // valid. if (chunk == nullptr) { return std::make_pair(std::nullopt, nullptr); } return std::make_pair(result, chunk); } std::pair<std::optional<APNGImageGenerator::APNGImage>, const void*> APNGImageGenerator::DemuxNextImage(const void* buffer_p, size_t buffer_size, const std::vector<uint8_t>& header, const void* chunk_p) { const ChunkHeader* chunk = reinterpret_cast<const ChunkHeader*>(chunk_p); // Validate the given chunk to ensure it's safe to read. if (!IsValidChunkHeader(buffer_p, buffer_size, chunk)) { return std::make_pair(std::nullopt, nullptr); } // Expect frame data to begin at fdAT or IDAT if (chunk->get_type() != kFrameControlChunkType && chunk->get_type() != kImageDataChunkType) { return std::make_pair(std::nullopt, nullptr); } APNGImage result; const FrameControlChunkData* control_data = nullptr; // The presence of an fcTL chunk is optional for the first (default) image // of a PNG. Both cases are handled in APNGImage. if (chunk->get_type() == kFrameControlChunkType) { control_data = CastChunkData<FrameControlChunkData>(chunk); ImageGenerator::FrameInfo frame_info; switch (control_data->get_blend_op()) { case 0: // APNG_BLEND_OP_SOURCE frame_info.blend_mode = SkCodecAnimation::Blend::kSrc; break; case 1: // APNG_BLEND_OP_OVER frame_info.blend_mode = SkCodecAnimation::Blend::kSrcOver; break; default: return std::make_pair(std::nullopt, nullptr); } SkIRect frame_rect = SkIRect::MakeXYWH( control_data->get_x_offset(), control_data->get_y_offset(), control_data->get_width(), control_data->get_height()); switch (control_data->get_dispose_op()) { case 0: // APNG_DISPOSE_OP_NONE frame_info.disposal_method = SkCodecAnimation::DisposalMethod::kKeep; break; case 1: // APNG_DISPOSE_OP_BACKGROUND frame_info.disposal_method = SkCodecAnimation::DisposalMethod::kRestoreBGColor; frame_info.disposal_rect = frame_rect; break; case 2: // APNG_DISPOSE_OP_PREVIOUS frame_info.disposal_method = SkCodecAnimation::DisposalMethod::kRestorePrevious; break; default: return std::make_pair(std::nullopt, nullptr); } uint16_t denominator = control_data->get_delay_den() == 0 ? 100 : control_data->get_delay_den(); frame_info.duration = static_cast<int>(control_data->get_delay_num() * 1000.f / denominator); result.frame_info = frame_info; result.x_offset = control_data->get_x_offset(); result.y_offset = control_data->get_y_offset(); } std::vector<const ChunkHeader*> image_chunks; size_t chunk_space = 0; // Walk the chunks until the next frame, end chunk, or an invalid chunk is // reached, recording the chunks to copy along with their required space. // TODO(bdero): Validate that IDAT/fdAT chunks are contiguous. // TODO(bdero): Validate the acTL/fcTL/fdAT sequence number ordering. do { if (chunk->get_type() != kFrameControlChunkType) { image_chunks.push_back(chunk); chunk_space += GetChunkSize(chunk); // fdAT chunks are converted into IDAT chunks when demuxed. The only // difference between these chunk types is that fdAT has a 4 byte // sequence number prepended to its data, so subtract that space from // the buffer. if (chunk->get_type() == kFrameDataChunkType) { chunk_space -= 4; } } chunk = GetNextChunk(buffer_p, buffer_size, chunk); } while (chunk != nullptr && chunk->get_type() != kFrameControlChunkType && chunk->get_type() != kImageTrailerChunkType); const uint8_t end_chunk[] = {0, 0, 0, 0, 'I', 'E', 'N', 'D', 0xAE, 0x42, 0x60, 0x82}; // Form a buffer for the new encoded PNG and copy the chunks in. sk_sp<SkData> new_png_buffer = SkData::MakeUninitialized( header.size() + chunk_space + sizeof(end_chunk)); { uint8_t* write_cursor = static_cast<uint8_t*>(new_png_buffer->writable_data()); // Copy the signature/header chunks memcpy(write_cursor, header.data(), header.size()); // If this is a frame, override the width/height in the IHDR chunk. if (control_data) { ChunkHeader* ihdr_header = reinterpret_cast<ChunkHeader*>(write_cursor + sizeof(kPngSignature)); ImageHeaderChunkData* ihdr_data = const_cast<ImageHeaderChunkData*>( CastChunkData<ImageHeaderChunkData>(ihdr_header)); ihdr_data->set_width(control_data->get_width()); ihdr_data->set_height(control_data->get_height()); ihdr_header->UpdateChunkCrc32(); } write_cursor += header.size(); // Copy the image data/ancillary chunks. for (const ChunkHeader* c : image_chunks) { if (c->get_type() == kFrameDataChunkType) { // Write a new IDAT chunk header. ChunkHeader* write_header = reinterpret_cast<ChunkHeader*>(write_cursor); write_header->set_data_length(c->get_data_length() - 4); write_header->set_type(kImageDataChunkType); write_cursor += sizeof(ChunkHeader); // Copy all of the data except for the 4 byte sequence number at the // beginning of the fdAT data. memcpy(write_cursor, reinterpret_cast<const uint8_t*>(c) + sizeof(ChunkHeader) + 4, write_header->get_data_length()); write_cursor += write_header->get_data_length(); // Recompute the chunk CRC. write_header->UpdateChunkCrc32(); write_cursor += 4; } else { size_t chunk_size = GetChunkSize(c); memcpy(write_cursor, c, chunk_size); write_cursor += chunk_size; } } // Copy the trailer chunk. memcpy(write_cursor, &end_chunk, sizeof(end_chunk)); } SkCodec::Result header_parse_result; result.codec = SkCodec::MakeFromStream(SkMemoryStream::Make(new_png_buffer), &header_parse_result); if (header_parse_result != SkCodec::Result::kSuccess) { FML_DLOG(ERROR) << "Failed to parse image header during APNG demux. SkCodec::Result: " << header_parse_result; return std::make_pair(std::nullopt, nullptr); } if (chunk->get_type() == kImageTrailerChunkType) { chunk = nullptr; } return std::make_pair(std::optional<APNGImage>{std::move(result)}, chunk); } bool APNGImageGenerator::DemuxNextImageInternal() { if (next_chunk_p_ == nullptr) { return false; } std::optional<APNGImage> image; const void* data_p = const_cast<void*>(data_.get()->data()); std::tie(image, next_chunk_p_) = DemuxNextImage(data_p, data_->size(), header_, next_chunk_p_); if (!image.has_value() || !image->frame_info.has_value()) { return false; } auto last_frame_info = images_.back().frame_info; if (!last_frame_info.has_value()) { return false; } if (images_.size() > first_frame_index_ && (last_frame_info->disposal_method == SkCodecAnimation::DisposalMethod::kKeep || last_frame_info->disposal_method == SkCodecAnimation::DisposalMethod::kRestoreBGColor)) { // Mark the required frame as the previous frame in all cases. image->frame_info->required_frame = images_.size() - 1; } else if (images_.size() > (first_frame_index_ + 1) && last_frame_info->disposal_method == SkCodecAnimation::DisposalMethod::kRestorePrevious) { // Mark the required frame as the last previous frame // It is not valid if there are 2 or above frames set |disposal_method| to // |kRestorePrevious|. But it also works in MultiFrameCodec. image->frame_info->required_frame = images_.size() - 2; } // Calling SkCodec::getInfo at least once prior to decoding is mandatory. SkImageInfo info = image.value().codec->getInfo(); FML_DCHECK(info.colorInfo() == image_info_.colorInfo()); images_.push_back(std::move(image.value())); auto default_info = images_[0].codec->getInfo(); if (info.colorType() != default_info.colorType()) { return false; } return true; } bool APNGImageGenerator::DemuxToImageIndex(unsigned int image_index) { // If the requested image doesn't exist yet, demux more frames from the APNG // stream. if (image_index >= images_.size()) { while (DemuxNextImageInternal() && image_index >= images_.size()) { } if (image_index >= images_.size()) { // The chunk stream was exhausted before the image was found. return false; } } return true; } void APNGImageGenerator::ChunkHeader::UpdateChunkCrc32() { uint32_t* crc_p = reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(this) + sizeof(ChunkHeader) + get_data_length()); *crc_p = fml::BigEndianToArch(ComputeChunkCrc32()); } uint32_t APNGImageGenerator::ChunkHeader::ComputeChunkCrc32() { // Exclude the length field at the beginning of the chunk header. size_t length = sizeof(ChunkHeader) - 4 + get_data_length(); uint8_t* chunk_data_p = reinterpret_cast<uint8_t*>(this) + 4; uint32_t crc = 0; // zlib's crc32 can only take 16 bits at a time for the length, but PNG // supports a 32 bit chunk length, so looping is necessary here. // Note that crc32 is always called at least once, even if the chunk has an // empty data section. do { uint16_t length16 = length; if (length16 == 0 && length > 0) { length16 = std::numeric_limits<uint16_t>::max(); } crc = crc32(crc, chunk_data_p, length16); length -= length16; chunk_data_p += length16; } while (length > 0); return crc; } bool APNGImageGenerator::RenderDefaultImage(const SkImageInfo& info, void* pixels, size_t row_bytes) { SkCodec::Result result = images_[0].codec->getPixels(info, pixels, row_bytes); if (result != SkCodec::kSuccess) { FML_DLOG(ERROR) << "Failed to decode the APNG's default/fallback image. " "SkCodec::Result: " << result; return false; } return true; } } // namespace flutter
engine/lib/ui/painting/image_generator_apng.cc/0
{ "file_path": "engine/lib/ui/painting/image_generator_apng.cc", "repo_id": "engine", "token_count": 9583 }
235
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "flutter/lib/ui/painting/path.h" #include <cmath> #include "flutter/lib/ui/floating_point.h" #include "flutter/lib/ui/painting/matrix.h" #include "flutter/lib/ui/ui_dart_state.h" #include "third_party/tonic/converter/dart_converter.h" #include "third_party/tonic/dart_args.h" #include "third_party/tonic/dart_binding_macros.h" #include "third_party/tonic/dart_library_natives.h" using tonic::ToDart; namespace flutter { typedef CanvasPath Path; IMPLEMENT_WRAPPERTYPEINFO(ui, Path); CanvasPath::CanvasPath() : path_tracker_(UIDartState::Current()->GetVolatilePathTracker()), tracked_path_(std::make_shared<VolatilePathTracker::TrackedPath>()) { FML_DCHECK(path_tracker_); resetVolatility(); } CanvasPath::~CanvasPath() = default; void CanvasPath::resetVolatility() { if (!tracked_path_->tracking_volatility) { mutable_path().setIsVolatile(true); tracked_path_->frame_count = 0; tracked_path_->tracking_volatility = true; path_tracker_->Track(tracked_path_); } } int CanvasPath::getFillType() { return static_cast<int>(path().getFillType()); } void CanvasPath::setFillType(int fill_type) { mutable_path().setFillType(static_cast<SkPathFillType>(fill_type)); resetVolatility(); } void CanvasPath::moveTo(double x, double y) { mutable_path().moveTo(SafeNarrow(x), SafeNarrow(y)); resetVolatility(); } void CanvasPath::relativeMoveTo(double x, double y) { mutable_path().rMoveTo(SafeNarrow(x), SafeNarrow(y)); resetVolatility(); } void CanvasPath::lineTo(double x, double y) { mutable_path().lineTo(SafeNarrow(x), SafeNarrow(y)); resetVolatility(); } void CanvasPath::relativeLineTo(double x, double y) { mutable_path().rLineTo(SafeNarrow(x), SafeNarrow(y)); resetVolatility(); } void CanvasPath::quadraticBezierTo(double x1, double y1, double x2, double y2) { mutable_path().quadTo(SafeNarrow(x1), SafeNarrow(y1), SafeNarrow(x2), SafeNarrow(y2)); resetVolatility(); } void CanvasPath::relativeQuadraticBezierTo(double x1, double y1, double x2, double y2) { mutable_path().rQuadTo(SafeNarrow(x1), SafeNarrow(y1), SafeNarrow(x2), SafeNarrow(y2)); resetVolatility(); } void CanvasPath::cubicTo(double x1, double y1, double x2, double y2, double x3, double y3) { mutable_path().cubicTo(SafeNarrow(x1), SafeNarrow(y1), SafeNarrow(x2), SafeNarrow(y2), SafeNarrow(x3), SafeNarrow(y3)); resetVolatility(); } void CanvasPath::relativeCubicTo(double x1, double y1, double x2, double y2, double x3, double y3) { mutable_path().rCubicTo(SafeNarrow(x1), SafeNarrow(y1), SafeNarrow(x2), SafeNarrow(y2), SafeNarrow(x3), SafeNarrow(y3)); resetVolatility(); } void CanvasPath::conicTo(double x1, double y1, double x2, double y2, double w) { mutable_path().conicTo(SafeNarrow(x1), SafeNarrow(y1), SafeNarrow(x2), SafeNarrow(y2), SafeNarrow(w)); resetVolatility(); } void CanvasPath::relativeConicTo(double x1, double y1, double x2, double y2, double w) { mutable_path().rConicTo(SafeNarrow(x1), SafeNarrow(y1), SafeNarrow(x2), SafeNarrow(y2), SafeNarrow(w)); resetVolatility(); } void CanvasPath::arcTo(double left, double top, double right, double bottom, double startAngle, double sweepAngle, bool forceMoveTo) { mutable_path().arcTo( SkRect::MakeLTRB(SafeNarrow(left), SafeNarrow(top), SafeNarrow(right), SafeNarrow(bottom)), SafeNarrow(startAngle) * 180.0f / static_cast<float>(M_PI), SafeNarrow(sweepAngle) * 180.0f / static_cast<float>(M_PI), forceMoveTo); resetVolatility(); } void CanvasPath::arcToPoint(double arcEndX, double arcEndY, double radiusX, double radiusY, double xAxisRotation, bool isLargeArc, bool isClockwiseDirection) { const auto arcSize = isLargeArc ? SkPath::ArcSize::kLarge_ArcSize : SkPath::ArcSize::kSmall_ArcSize; const auto direction = isClockwiseDirection ? SkPathDirection::kCW : SkPathDirection::kCCW; mutable_path().arcTo(SafeNarrow(radiusX), SafeNarrow(radiusY), SafeNarrow(xAxisRotation), arcSize, direction, SafeNarrow(arcEndX), SafeNarrow(arcEndY)); resetVolatility(); } void CanvasPath::relativeArcToPoint(double arcEndDeltaX, double arcEndDeltaY, double radiusX, double radiusY, double xAxisRotation, bool isLargeArc, bool isClockwiseDirection) { const auto arcSize = isLargeArc ? SkPath::ArcSize::kLarge_ArcSize : SkPath::ArcSize::kSmall_ArcSize; const auto direction = isClockwiseDirection ? SkPathDirection::kCW : SkPathDirection::kCCW; mutable_path().rArcTo(SafeNarrow(radiusX), SafeNarrow(radiusY), SafeNarrow(xAxisRotation), arcSize, direction, SafeNarrow(arcEndDeltaX), SafeNarrow(arcEndDeltaY)); resetVolatility(); } void CanvasPath::addRect(double left, double top, double right, double bottom) { mutable_path().addRect(SkRect::MakeLTRB(SafeNarrow(left), SafeNarrow(top), SafeNarrow(right), SafeNarrow(bottom))); resetVolatility(); } void CanvasPath::addOval(double left, double top, double right, double bottom) { mutable_path().addOval(SkRect::MakeLTRB(SafeNarrow(left), SafeNarrow(top), SafeNarrow(right), SafeNarrow(bottom))); resetVolatility(); } void CanvasPath::addArc(double left, double top, double right, double bottom, double startAngle, double sweepAngle) { mutable_path().addArc( SkRect::MakeLTRB(SafeNarrow(left), SafeNarrow(top), SafeNarrow(right), SafeNarrow(bottom)), SafeNarrow(startAngle) * 180.0f / static_cast<float>(M_PI), SafeNarrow(sweepAngle) * 180.0f / static_cast<float>(M_PI)); resetVolatility(); } void CanvasPath::addPolygon(const tonic::Float32List& points, bool close) { mutable_path().addPoly(reinterpret_cast<const SkPoint*>(points.data()), points.num_elements() / 2, close); resetVolatility(); } void CanvasPath::addRRect(const RRect& rrect) { mutable_path().addRRect(rrect.sk_rrect); resetVolatility(); } void CanvasPath::addPath(CanvasPath* path, double dx, double dy) { if (!path) { Dart_ThrowException(ToDart("Path.addPath called with non-genuine Path.")); return; } mutable_path().addPath(path->path(), SafeNarrow(dx), SafeNarrow(dy), SkPath::kAppend_AddPathMode); resetVolatility(); } void CanvasPath::addPathWithMatrix(CanvasPath* path, double dx, double dy, Dart_Handle matrix4_handle) { tonic::Float64List matrix4(matrix4_handle); if (!path) { matrix4.Release(); Dart_ThrowException( ToDart("Path.addPathWithMatrix called with non-genuine Path.")); return; } SkMatrix matrix = ToSkMatrix(matrix4); matrix4.Release(); matrix.setTranslateX(matrix.getTranslateX() + SafeNarrow(dx)); matrix.setTranslateY(matrix.getTranslateY() + SafeNarrow(dy)); mutable_path().addPath(path->path(), matrix, SkPath::kAppend_AddPathMode); resetVolatility(); } void CanvasPath::extendWithPath(CanvasPath* path, double dx, double dy) { if (!path) { Dart_ThrowException( ToDart("Path.extendWithPath called with non-genuine Path.")); return; } mutable_path().addPath(path->path(), SafeNarrow(dx), SafeNarrow(dy), SkPath::kExtend_AddPathMode); resetVolatility(); } void CanvasPath::extendWithPathAndMatrix(CanvasPath* path, double dx, double dy, Dart_Handle matrix4_handle) { tonic::Float64List matrix4(matrix4_handle); if (!path) { matrix4.Release(); Dart_ThrowException( ToDart("Path.addPathWithMatrix called with non-genuine Path.")); return; } SkMatrix matrix = ToSkMatrix(matrix4); matrix4.Release(); matrix.setTranslateX(matrix.getTranslateX() + SafeNarrow(dx)); matrix.setTranslateY(matrix.getTranslateY() + SafeNarrow(dy)); mutable_path().addPath(path->path(), matrix, SkPath::kExtend_AddPathMode); resetVolatility(); } void CanvasPath::close() { mutable_path().close(); resetVolatility(); } void CanvasPath::reset() { mutable_path().reset(); resetVolatility(); } bool CanvasPath::contains(double x, double y) { return path().contains(SafeNarrow(x), SafeNarrow(y)); } void CanvasPath::shift(Dart_Handle path_handle, double dx, double dy) { fml::RefPtr<CanvasPath> path = Create(path_handle); auto& other_mutable_path = path->mutable_path(); mutable_path().offset(SafeNarrow(dx), SafeNarrow(dy), &other_mutable_path); resetVolatility(); } void CanvasPath::transform(Dart_Handle path_handle, Dart_Handle matrix4_handle) { tonic::Float64List matrix4(matrix4_handle); auto sk_matrix = ToSkMatrix(matrix4); matrix4.Release(); fml::RefPtr<CanvasPath> path = Create(path_handle); auto& other_mutable_path = path->mutable_path(); mutable_path().transform(sk_matrix, &other_mutable_path); } tonic::Float32List CanvasPath::getBounds() { tonic::Float32List rect(Dart_NewTypedData(Dart_TypedData_kFloat32, 4)); const SkRect& bounds = path().getBounds(); rect[0] = bounds.left(); rect[1] = bounds.top(); rect[2] = bounds.right(); rect[3] = bounds.bottom(); return rect; } bool CanvasPath::op(CanvasPath* path1, CanvasPath* path2, int operation) { bool result = Op(path1->path(), path2->path(), static_cast<SkPathOp>(operation), &tracked_path_->path); resetVolatility(); return result; } void CanvasPath::clone(Dart_Handle path_handle) { fml::RefPtr<CanvasPath> path = Create(path_handle); // per Skia docs, this will create a fast copy // data is shared until the source path or dest path are mutated path->mutable_path() = this->path(); } } // namespace flutter
engine/lib/ui/painting/path.cc/0
{ "file_path": "engine/lib/ui/painting/path.cc", "repo_id": "engine", "token_count": 5522 }
236
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_LIB_UI_PAINTING_SHADER_H_ #define FLUTTER_LIB_UI_PAINTING_SHADER_H_ #include "flutter/display_list/effects/dl_color_source.h" #include "flutter/lib/ui/ui_dart_state.h" namespace flutter { class Shader : public RefCountedDartWrappable<Shader> { DEFINE_WRAPPERTYPEINFO(); FML_FRIEND_MAKE_REF_COUNTED(Shader); public: ~Shader() override; virtual std::shared_ptr<DlColorSource> shader(DlImageSampling) = 0; protected: Shader() {} }; } // namespace flutter #endif // FLUTTER_LIB_UI_PAINTING_SHADER_H_
engine/lib/ui/painting/shader.h/0
{ "file_path": "engine/lib/ui/painting/shader.h", "repo_id": "engine", "token_count": 265 }
237
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_LIB_UI_SEMANTICS_SEMANTICS_NODE_H_ #define FLUTTER_LIB_UI_SEMANTICS_SEMANTICS_NODE_H_ #include <cstdint> #include <string> #include <unordered_map> #include <vector> #include "third_party/skia/include/core/SkM44.h" #include "third_party/skia/include/core/SkRect.h" #include "flutter/lib/ui/semantics/string_attribute.h" namespace flutter { // Must match the SemanticsAction enum in semantics.dart and in each of the // embedders. enum class SemanticsAction : int32_t { kTap = 1 << 0, kLongPress = 1 << 1, kScrollLeft = 1 << 2, kScrollRight = 1 << 3, kScrollUp = 1 << 4, kScrollDown = 1 << 5, kIncrease = 1 << 6, kDecrease = 1 << 7, kShowOnScreen = 1 << 8, kMoveCursorForwardByCharacter = 1 << 9, kMoveCursorBackwardByCharacter = 1 << 10, kSetSelection = 1 << 11, kCopy = 1 << 12, kCut = 1 << 13, kPaste = 1 << 14, kDidGainAccessibilityFocus = 1 << 15, kDidLoseAccessibilityFocus = 1 << 16, kCustomAction = 1 << 17, kDismiss = 1 << 18, kMoveCursorForwardByWord = 1 << 19, kMoveCursorBackwardByWord = 1 << 20, kSetText = 1 << 21, }; const int kVerticalScrollSemanticsActions = static_cast<int32_t>(SemanticsAction::kScrollUp) | static_cast<int32_t>(SemanticsAction::kScrollDown); const int kHorizontalScrollSemanticsActions = static_cast<int32_t>(SemanticsAction::kScrollLeft) | static_cast<int32_t>(SemanticsAction::kScrollRight); const int kScrollableSemanticsActions = kVerticalScrollSemanticsActions | kHorizontalScrollSemanticsActions; /// C/C++ representation of `SemanticsFlags` defined in /// `lib/ui/semantics.dart`. ///\warning This must match the `SemanticsFlags` enum in /// `lib/ui/semantics.dart`. /// See also: /// - file://./../../../lib/ui/semantics.dart enum class SemanticsFlags : int32_t { kHasCheckedState = 1 << 0, kIsChecked = 1 << 1, kIsSelected = 1 << 2, kIsButton = 1 << 3, kIsTextField = 1 << 4, kIsFocused = 1 << 5, kHasEnabledState = 1 << 6, kIsEnabled = 1 << 7, kIsInMutuallyExclusiveGroup = 1 << 8, kIsHeader = 1 << 9, kIsObscured = 1 << 10, kScopesRoute = 1 << 11, kNamesRoute = 1 << 12, kIsHidden = 1 << 13, kIsImage = 1 << 14, kIsLiveRegion = 1 << 15, kHasToggledState = 1 << 16, kIsToggled = 1 << 17, kHasImplicitScrolling = 1 << 18, kIsMultiline = 1 << 19, kIsReadOnly = 1 << 20, kIsFocusable = 1 << 21, kIsLink = 1 << 22, kIsSlider = 1 << 23, kIsKeyboardKey = 1 << 24, kIsCheckStateMixed = 1 << 25, kHasExpandedState = 1 << 26, kIsExpanded = 1 << 27, }; const int kScrollableSemanticsFlags = static_cast<int32_t>(SemanticsFlags::kHasImplicitScrolling); struct SemanticsNode { SemanticsNode(); SemanticsNode(const SemanticsNode& other); ~SemanticsNode(); bool HasAction(SemanticsAction action) const; bool HasFlag(SemanticsFlags flag) const; // Whether this node is for embedded platform views. bool IsPlatformViewNode() const; int32_t id = 0; int32_t flags = 0; int32_t actions = 0; int32_t maxValueLength = -1; int32_t currentValueLength = -1; int32_t textSelectionBase = -1; int32_t textSelectionExtent = -1; int32_t platformViewId = -1; int32_t scrollChildren = 0; int32_t scrollIndex = 0; double scrollPosition = std::nan(""); double scrollExtentMax = std::nan(""); double scrollExtentMin = std::nan(""); double elevation = 0.0; double thickness = 0.0; std::string identifier; std::string label; StringAttributes labelAttributes; std::string hint; StringAttributes hintAttributes; std::string value; StringAttributes valueAttributes; std::string increasedValue; StringAttributes increasedValueAttributes; std::string decreasedValue; StringAttributes decreasedValueAttributes; std::string tooltip; int32_t textDirection = 0; // 0=unknown, 1=rtl, 2=ltr SkRect rect = SkRect::MakeEmpty(); // Local space, relative to parent. SkM44 transform = SkM44{}; // Identity std::vector<int32_t> childrenInTraversalOrder; std::vector<int32_t> childrenInHitTestOrder; std::vector<int32_t> customAccessibilityActions; }; // Contains semantic nodes that need to be updated. // // The keys in the map are stable node IDd, and the values contain // semantic information for the node corresponding to the ID. using SemanticsNodeUpdates = std::unordered_map<int32_t, SemanticsNode>; } // namespace flutter #endif // FLUTTER_LIB_UI_SEMANTICS_SEMANTICS_NODE_H_
engine/lib/ui/semantics/semantics_node.h/0
{ "file_path": "engine/lib/ui/semantics/semantics_node.h", "repo_id": "engine", "token_count": 1657 }
238
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_LIB_UI_TEXT_PARAGRAPH_H_ #define FLUTTER_LIB_UI_TEXT_PARAGRAPH_H_ #include "flutter/fml/message_loop.h" #include "flutter/lib/ui/dart_wrapper.h" #include "flutter/lib/ui/painting/canvas.h" #include "flutter/third_party/txt/src/txt/paragraph.h" namespace flutter { class Paragraph : public RefCountedDartWrappable<Paragraph> { DEFINE_WRAPPERTYPEINFO(); FML_FRIEND_MAKE_REF_COUNTED(Paragraph); public: static void Create(Dart_Handle paragraph_handle, std::unique_ptr<txt::Paragraph> txt_paragraph) { auto paragraph = fml::MakeRefCounted<Paragraph>(std::move(txt_paragraph)); paragraph->AssociateWithDartWrapper(paragraph_handle); } ~Paragraph() override; double width(); double height(); double longestLine(); double minIntrinsicWidth(); double maxIntrinsicWidth(); double alphabeticBaseline(); double ideographicBaseline(); bool didExceedMaxLines(); void layout(double width); void paint(Canvas* canvas, double x, double y); tonic::Float32List getRectsForRange(unsigned start, unsigned end, unsigned boxHeightStyle, unsigned boxWidthStyle); tonic::Float32List getRectsForPlaceholders(); Dart_Handle getPositionForOffset(double dx, double dy); Dart_Handle getGlyphInfoAt(unsigned utf16Offset, Dart_Handle constructor) const; Dart_Handle getClosestGlyphInfo(double dx, double dy, Dart_Handle constructor) const; Dart_Handle getWordBoundary(unsigned offset); Dart_Handle getLineBoundary(unsigned offset); tonic::Float64List computeLineMetrics() const; Dart_Handle getLineMetricsAt(int lineNumber, Dart_Handle constructor) const; size_t getNumberOfLines() const; int getLineNumberAt(size_t utf16Offset) const; void dispose(); private: std::unique_ptr<txt::Paragraph> m_paragraph_; explicit Paragraph(std::unique_ptr<txt::Paragraph> paragraph); }; } // namespace flutter #endif // FLUTTER_LIB_UI_TEXT_PARAGRAPH_H_
engine/lib/ui/text/paragraph.h/0
{ "file_path": "engine/lib/ui/text/paragraph.h", "repo_id": "engine", "token_count": 909 }
239
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #define FML_USED_ON_EMBEDDER #include "flutter/lib/ui/window/platform_configuration.h" #include <memory> #include "flutter/common/task_runners.h" #include "flutter/fml/synchronization/waitable_event.h" #include "flutter/lib/ui/painting/vertices.h" #include "flutter/runtime/dart_vm.h" #include "flutter/shell/common/shell_test.h" #include "flutter/shell/common/thread_host.h" #include "flutter/testing/testing.h" namespace flutter { namespace testing { class PlatformConfigurationTest : public ShellTest {}; TEST_F(PlatformConfigurationTest, Initialization) { auto message_latch = std::make_shared<fml::AutoResetWaitableEvent>(); auto nativeValidateConfiguration = [message_latch](Dart_NativeArguments args) { PlatformConfiguration* configuration = UIDartState::Current()->platform_configuration(); ASSERT_NE(configuration->GetMetrics(0), nullptr); ASSERT_EQ(configuration->GetMetrics(0)->device_pixel_ratio, 1.0); ASSERT_EQ(configuration->GetMetrics(0)->physical_width, 0.0); ASSERT_EQ(configuration->GetMetrics(0)->physical_height, 0.0); message_latch->Signal(); }; Settings settings = CreateSettingsForFixture(); TaskRunners task_runners("test", // label GetCurrentTaskRunner(), // platform CreateNewThread(), // raster CreateNewThread(), // ui CreateNewThread() // io ); AddNativeCallback("ValidateConfiguration", CREATE_NATIVE_ENTRY(nativeValidateConfiguration)); std::unique_ptr<Shell> shell = CreateShell(settings, task_runners); ASSERT_TRUE(shell->IsSetup()); auto run_configuration = RunConfiguration::InferFromSettings(settings); run_configuration.SetEntrypoint("validateConfiguration"); shell->RunEngine(std::move(run_configuration), [&](auto result) { ASSERT_EQ(result, Engine::RunStatus::Success); }); message_latch->Wait(); DestroyShell(std::move(shell), task_runners); } TEST_F(PlatformConfigurationTest, WindowMetricsUpdate) { auto message_latch = std::make_shared<fml::AutoResetWaitableEvent>(); auto nativeValidateConfiguration = [message_latch](Dart_NativeArguments args) { PlatformConfiguration* configuration = UIDartState::Current()->platform_configuration(); ASSERT_NE(configuration->GetMetrics(0), nullptr); bool has_view = configuration->UpdateViewMetrics( 0, ViewportMetrics{2.0, 10.0, 20.0, 22, 0}); ASSERT_TRUE(has_view); ASSERT_EQ(configuration->GetMetrics(0)->device_pixel_ratio, 2.0); ASSERT_EQ(configuration->GetMetrics(0)->physical_width, 10.0); ASSERT_EQ(configuration->GetMetrics(0)->physical_height, 20.0); ASSERT_EQ(configuration->GetMetrics(0)->physical_touch_slop, 22); message_latch->Signal(); }; Settings settings = CreateSettingsForFixture(); TaskRunners task_runners("test", // label GetCurrentTaskRunner(), // platform CreateNewThread(), // raster CreateNewThread(), // ui CreateNewThread() // io ); AddNativeCallback("ValidateConfiguration", CREATE_NATIVE_ENTRY(nativeValidateConfiguration)); std::unique_ptr<Shell> shell = CreateShell(settings, task_runners); ASSERT_TRUE(shell->IsSetup()); auto run_configuration = RunConfiguration::InferFromSettings(settings); run_configuration.SetEntrypoint("validateConfiguration"); shell->RunEngine(std::move(run_configuration), [&](auto result) { ASSERT_EQ(result, Engine::RunStatus::Success); }); message_latch->Wait(); DestroyShell(std::move(shell), task_runners); } TEST_F(PlatformConfigurationTest, GetWindowReturnsNullForNonexistentId) { auto message_latch = std::make_shared<fml::AutoResetWaitableEvent>(); auto nativeValidateConfiguration = [message_latch](Dart_NativeArguments args) { PlatformConfiguration* configuration = UIDartState::Current()->platform_configuration(); ASSERT_EQ(configuration->GetMetrics(1), nullptr); ASSERT_EQ(configuration->GetMetrics(2), nullptr); message_latch->Signal(); }; Settings settings = CreateSettingsForFixture(); TaskRunners task_runners("test", // label GetCurrentTaskRunner(), // platform CreateNewThread(), // raster CreateNewThread(), // ui CreateNewThread() // io ); AddNativeCallback("ValidateConfiguration", CREATE_NATIVE_ENTRY(nativeValidateConfiguration)); std::unique_ptr<Shell> shell = CreateShell(settings, task_runners); ASSERT_TRUE(shell->IsSetup()); auto run_configuration = RunConfiguration::InferFromSettings(settings); run_configuration.SetEntrypoint("validateConfiguration"); shell->RunEngine(std::move(run_configuration), [&](auto result) { ASSERT_EQ(result, Engine::RunStatus::Success); }); message_latch->Wait(); DestroyShell(std::move(shell), task_runners); } TEST_F(PlatformConfigurationTest, OnErrorHandlesError) { auto message_latch = std::make_shared<fml::AutoResetWaitableEvent>(); bool did_throw = false; auto finish = [message_latch](Dart_NativeArguments args) { message_latch->Signal(); }; AddNativeCallback("Finish", CREATE_NATIVE_ENTRY(finish)); Settings settings = CreateSettingsForFixture(); settings.unhandled_exception_callback = [&did_throw](const std::string& exception, const std::string& stack_trace) -> bool { did_throw = true; return false; }; TaskRunners task_runners("test", // label GetCurrentTaskRunner(), // platform CreateNewThread(), // raster CreateNewThread(), // ui CreateNewThread() // io ); std::unique_ptr<Shell> shell = CreateShell(settings, task_runners); ASSERT_TRUE(shell->IsSetup()); auto run_configuration = RunConfiguration::InferFromSettings(settings); run_configuration.SetEntrypoint("customOnErrorTrue"); shell->RunEngine(std::move(run_configuration), [&](auto result) { ASSERT_EQ(result, Engine::RunStatus::Success); }); message_latch->Wait(); // Flush the UI task runner to make sure errors that were triggered had a turn // to propagate. task_runners.GetUITaskRunner()->PostTask( [&message_latch]() { message_latch->Signal(); }); message_latch->Wait(); ASSERT_FALSE(did_throw); DestroyShell(std::move(shell), task_runners); } TEST_F(PlatformConfigurationTest, OnErrorDoesNotHandleError) { auto message_latch = std::make_shared<fml::AutoResetWaitableEvent>(); std::string ex; std::string st; size_t throw_count = 0; auto finish = [message_latch](Dart_NativeArguments args) { message_latch->Signal(); }; AddNativeCallback("Finish", CREATE_NATIVE_ENTRY(finish)); Settings settings = CreateSettingsForFixture(); settings.unhandled_exception_callback = [&ex, &st, &throw_count](const std::string& exception, const std::string& stack_trace) -> bool { throw_count += 1; ex = exception; st = stack_trace; return true; }; TaskRunners task_runners("test", // label GetCurrentTaskRunner(), // platform CreateNewThread(), // raster CreateNewThread(), // ui CreateNewThread() // io ); std::unique_ptr<Shell> shell = CreateShell(settings, task_runners); ASSERT_TRUE(shell->IsSetup()); auto run_configuration = RunConfiguration::InferFromSettings(settings); run_configuration.SetEntrypoint("customOnErrorFalse"); shell->RunEngine(std::move(run_configuration), [&](auto result) { ASSERT_EQ(result, Engine::RunStatus::Success); }); message_latch->Wait(); // Flush the UI task runner to make sure errors that were triggered had a turn // to propagate. task_runners.GetUITaskRunner()->PostTask( [&message_latch]() { message_latch->Signal(); }); message_latch->Wait(); ASSERT_EQ(throw_count, 1ul); ASSERT_EQ(ex, "Exception: false") << ex; ASSERT_EQ(st.rfind("#0 customOnErrorFalse", 0), 0ul) << st; DestroyShell(std::move(shell), task_runners); } TEST_F(PlatformConfigurationTest, OnErrorThrows) { auto message_latch = std::make_shared<fml::AutoResetWaitableEvent>(); std::vector<std::string> errors; size_t throw_count = 0; auto finish = [message_latch](Dart_NativeArguments args) { message_latch->Signal(); }; AddNativeCallback("Finish", CREATE_NATIVE_ENTRY(finish)); Settings settings = CreateSettingsForFixture(); settings.unhandled_exception_callback = [&errors, &throw_count](const std::string& exception, const std::string& stack_trace) -> bool { throw_count += 1; errors.push_back(exception); errors.push_back(stack_trace); return true; }; TaskRunners task_runners("test", // label GetCurrentTaskRunner(), // platform CreateNewThread(), // raster CreateNewThread(), // ui CreateNewThread() // io ); std::unique_ptr<Shell> shell = CreateShell(settings, task_runners); ASSERT_TRUE(shell->IsSetup()); auto run_configuration = RunConfiguration::InferFromSettings(settings); run_configuration.SetEntrypoint("customOnErrorThrow"); shell->RunEngine(std::move(run_configuration), [&](auto result) { ASSERT_EQ(result, Engine::RunStatus::Success); }); message_latch->Wait(); // Flush the UI task runner to make sure errors that were triggered had a turn // to propagate. task_runners.GetUITaskRunner()->PostTask( [&message_latch]() { message_latch->Signal(); }); message_latch->Wait(); ASSERT_EQ(throw_count, 2ul); ASSERT_EQ(errors.size(), 4ul); ASSERT_EQ(errors[0], "Exception: throw2") << errors[0]; ASSERT_EQ(errors[1].rfind("#0 customOnErrorThrow"), 0ul) << errors[1]; ASSERT_EQ(errors[2], "Exception: throw1") << errors[2]; ASSERT_EQ(errors[3].rfind("#0 customOnErrorThrow"), 0ul) << errors[3]; DestroyShell(std::move(shell), task_runners); } TEST_F(PlatformConfigurationTest, SetDartPerformanceMode) { auto message_latch = std::make_shared<fml::AutoResetWaitableEvent>(); auto finish = [message_latch](Dart_NativeArguments args) { // call needs to happen on the UI thread. Dart_PerformanceMode prev = Dart_SetPerformanceMode(Dart_PerformanceMode_Default); ASSERT_EQ(Dart_PerformanceMode_Latency, prev); message_latch->Signal(); }; AddNativeCallback("Finish", CREATE_NATIVE_ENTRY(finish)); Settings settings = CreateSettingsForFixture(); TaskRunners task_runners("test", // label GetCurrentTaskRunner(), // platform CreateNewThread(), // raster CreateNewThread(), // ui CreateNewThread() // io ); std::unique_ptr<Shell> shell = CreateShell(settings, task_runners); ASSERT_TRUE(shell->IsSetup()); auto run_configuration = RunConfiguration::InferFromSettings(settings); run_configuration.SetEntrypoint("setLatencyPerformanceMode"); shell->RunEngine(std::move(run_configuration), [&](auto result) { ASSERT_EQ(result, Engine::RunStatus::Success); }); message_latch->Wait(); DestroyShell(std::move(shell), task_runners); } } // namespace testing } // namespace flutter
engine/lib/ui/window/platform_configuration_unittests.cc/0
{ "file_path": "engine/lib/ui/window/platform_configuration_unittests.cc", "repo_id": "engine", "token_count": 4803 }
240
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef FLUTTER_LIB_UI_WINDOW_POINTER_DATA_PACKET_H_ #define FLUTTER_LIB_UI_WINDOW_POINTER_DATA_PACKET_H_ #include <cstring> #include <vector> #include "flutter/fml/macros.h" #include "flutter/lib/ui/window/pointer_data.h" namespace flutter { class PointerDataPacket { public: explicit PointerDataPacket(size_t count); PointerDataPacket(uint8_t* data, size_t num_bytes); ~PointerDataPacket(); void SetPointerData(size_t i, const PointerData& data); PointerData GetPointerData(size_t i) const; size_t GetLength() const; const std::vector<uint8_t>& data() const { return data_; } private: std::vector<uint8_t> data_; FML_DISALLOW_COPY_AND_ASSIGN(PointerDataPacket); }; } // namespace flutter #endif // FLUTTER_LIB_UI_WINDOW_POINTER_DATA_PACKET_H_
engine/lib/ui/window/pointer_data_packet.h/0
{ "file_path": "engine/lib/ui/window/pointer_data_packet.h", "repo_id": "engine", "token_count": 353 }
241
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. import 'dart:async'; import 'dart:math' as math; import 'package:image/image.dart'; import 'package:test_api/backend.dart'; /// Provides the environment for a specific web browser. abstract class BrowserEnvironment { /// Name of the browser. Used in logging commands. String get name; /// The [Runtime] used by `package:test` to identify this browser type. Runtime get packageTestRuntime; /// The name of the configuration YAML file used to configure `package:test`. /// /// The configuration file is expected to be a direct child of the `web_ui` /// directory. String get packageTestConfigurationYamlFile; /// Prepares the OS environment to run tests for this browser. /// /// This may include things like installing browsers, and starting web drivers, /// iOS Simulators, and/or Android emulators. /// /// Typically the browser environment is prepared once and supports multiple /// browser instances. Future<void> prepare(); /// Perform any necessary teardown steps Future<void> cleanup(); /// Launches a browser instance. /// /// The browser will be directed to open the provided [url]. /// /// If [debug] is true and the browser supports debugging, launches the /// browser in debug mode by pausing test execution after the code is loaded /// but before calling the `main()` function of the test, giving the /// developer a chance to set breakpoints. Future<Browser> launchBrowserInstance( Uri url, { bool debug = false, }); } /// An interface for running browser instances. /// /// This is intentionally coarse-grained: browsers are controlled primary from /// inside a single tab. Thus this interface only provides support for closing /// the browser and seeing if it closes itself. /// /// Any errors starting or running the browser process are reported through /// [onExit]. abstract class Browser { /// The Dart VM Service URL for this browser. /// /// Returns `null` for browsers that aren't running the Dart VM, or /// if the Dart VM Service URL can't be found. Future<Uri>? get vmServiceUrl => null; /// The remote debugger URL for this browser. /// /// Returns `null` for browsers that don't support remote debugging, /// or if the remote debugging URL can't be found. Future<Uri>? get remoteDebuggerUrl => null; /// A future that completes when the browser exits. /// /// If there's a problem starting or running the browser, this will complete /// with an error. Future<void> get onExit; /// Closes the browser /// /// Returns the same [Future] as [onExit], except that it won't emit /// exceptions. Future<void> close(); /// Returns whether this browser supports taking screenshots bool get supportsScreenshots => false; /// Capture a screenshot. /// /// This will throw if the browser doesn't support screenshotting. /// Please read the details for the implementing classes. Future<Image> captureScreenshot(math.Rectangle<num> region) => throw Exception('This browser does not support screenshots'); }
engine/lib/web_ui/dev/browser.dart/0
{ "file_path": "engine/lib/web_ui/dev/browser.dart", "repo_id": "engine", "token_count": 824 }
242
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. import 'dart:async'; import 'dart:io'; import 'package:path/path.dart' as path; import 'package:test_api/backend.dart'; // TODO(ditman): Fix ignore when https://github.com/flutter/flutter/issues/143599 is resolved. import 'package:test_core/src/util/io.dart'; // ignore: implementation_imports import 'browser.dart'; import 'browser_process.dart'; import 'common.dart'; import 'environment.dart'; import 'firefox_installer.dart'; import 'package_lock.dart'; /// Provides an environment for the desktop Firefox. class FirefoxEnvironment implements BrowserEnvironment { late final BrowserInstallation _installation; @override Future<Browser> launchBrowserInstance(Uri url, {bool debug = false}) async { return Firefox(url, _installation, debug: debug); } @override Runtime get packageTestRuntime => Runtime.firefox; @override Future<void> prepare() async { _installation = await getOrInstallFirefox( packageLock.firefoxLock.version, infoLog: isCi ? stdout : DevNull(), ); } @override Future<void> cleanup() async {} @override final String name = 'Firefox'; @override String get packageTestConfigurationYamlFile => 'dart_test_firefox.yaml'; } /// Runs desktop Firefox. /// /// Most of the communication with the browser is expected to happen via HTTP, /// so this exposes a bare-bones API. The browser starts as soon as the class is /// constructed, and is killed when [close] is called. /// /// Any errors starting or running the process are reported through [onExit]. class Firefox extends Browser { /// Starts a new instance of Firefox open to the given [url], which may be a /// [Uri] or a [String]. factory Firefox(Uri url, BrowserInstallation installation, {bool debug = false}) { final Completer<Uri> remoteDebuggerCompleter = Completer<Uri>.sync(); return Firefox._(BrowserProcess(() async { // Using a profile on opening will prevent popups related to profiles. const String profile = ''' user_pref("browser.shell.checkDefaultBrowser", false); user_pref("dom.disable_open_during_load", false); user_pref("dom.max_script_run_time", 0); '''; final Directory temporaryProfileDirectory = Directory( path.join(environment.webUiDartToolDir.path, 'firefox_profile')); // A good source of various Firefox Command Line options: // https://developer.mozilla.org/en-US/docs/Mozilla/Command_Line_Options#Browser // if (temporaryProfileDirectory.existsSync()) { temporaryProfileDirectory.deleteSync(recursive: true); } temporaryProfileDirectory.createSync(recursive: true); File(path.join(temporaryProfileDirectory.path, 'prefs.js')) .writeAsStringSync(profile); final bool isMac = Platform.isMacOS; final List<String> args = <String>[ url.toString(), '--profile', temporaryProfileDirectory.path, if (!debug) '--headless', '-width $kMaxScreenshotWidth', '-height $kMaxScreenshotHeight', // On Mac Firefox uses the -- option prefix, while elsewhere it uses the - prefix. '${isMac ? '-' : ''}-new-window', '${isMac ? '-' : ''}-new-instance', '--start-debugger-server $kDevtoolsPort', ]; final Process process = await Process.start(installation.executable, args); remoteDebuggerCompleter.complete( getRemoteDebuggerUrl(Uri.parse('http://localhost:$kDevtoolsPort'))); unawaited(process.exitCode.then((_) { temporaryProfileDirectory.deleteSync(recursive: true); })); return process; }), remoteDebuggerCompleter.future); } Firefox._(this._process, this.remoteDebuggerUrl); final BrowserProcess _process; @override final Future<Uri> remoteDebuggerUrl; @override Future<void> get onExit => _process.onExit; @override Future<void> close() => _process.close(); }
engine/lib/web_ui/dev/firefox.dart/0
{ "file_path": "engine/lib/web_ui/dev/firefox.dart", "repo_id": "engine", "token_count": 1378 }
243
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. import 'dart:async'; import 'dart:convert' show LineSplitter, utf8; import 'dart:io' as io; import 'package:args/command_runner.dart'; import 'package:meta/meta.dart'; import 'package:path/path.dart' as path; import 'common.dart'; import 'environment.dart'; import 'exceptions.dart'; import 'felt_config.dart'; enum RuntimeMode { debug, profile, release, } class FilePath { FilePath.fromCwd(String relativePath) : _absolutePath = path.absolute(relativePath); FilePath.fromWebUi(String relativePath) : _absolutePath = path.join(environment.webUiRootDir.path, relativePath); FilePath.fromTestSet(TestSet testSet, String relativePath) : _absolutePath = path.join(getTestSetDirectory(testSet).path, relativePath); final String _absolutePath; String get absolute => _absolutePath; String get relativeToCwd => path.relative(_absolutePath); String get relativeToWebUi => path.relative(_absolutePath, from: environment.webUiRootDir.path); @override bool operator ==(Object other) { return other is FilePath && other._absolutePath == _absolutePath; } @override int get hashCode => _absolutePath.hashCode; @override String toString() => _absolutePath; } /// Runs [executable] merging its output into the current process' standard out and standard error. Future<int> runProcess( String executable, List<String> arguments, { String? workingDirectory, bool failureIsSuccess = false, Map<String, String> environment = const <String, String>{}, }) async { final ProcessManager manager = await startProcess( executable, arguments, workingDirectory: workingDirectory, failureIsSuccess: failureIsSuccess, environment: environment, ); return manager.wait(); } /// Runs the process and returns its standard output as a string. /// /// Standard error output is ignored (use [ProcessManager.evalStderr] for that). /// /// Throws an exception if the process exited with a non-zero exit code. Future<String> evalProcess( String executable, List<String> arguments, { String? workingDirectory, Map<String, String> environment = const <String, String>{}, }) async { final ProcessManager manager = await startProcess( executable, arguments, workingDirectory: workingDirectory, environment: environment, evalOutput: true, ); return manager.evalStdout(); } /// Starts a process using the [executable], passing it [arguments]. /// /// Returns a process manager that decorates the process with extra /// functionality. See [ProcessManager] for what it can do. /// /// If [workingDirectory] is not null makes it the current working directory of /// the process. Otherwise, the process inherits this processes working /// directory. /// /// If [failureIsSuccess] is set to true, the returned [ProcessManager] treats /// non-zero exit codes as success, and zero exit code as failure. /// /// If [evalOutput] is set to true, collects and decodes the process' standard /// streams into in-memory strings. Future<ProcessManager> startProcess( String executable, List<String> arguments, { String? workingDirectory, bool failureIsSuccess = false, bool evalOutput = false, Map<String, String> environment = const <String, String>{}, }) async { final io.Process process = await io.Process.start( executable, arguments, workingDirectory: workingDirectory, // Running the process in a system shell for Windows. Otherwise // the process is not able to get Dart from path. runInShell: io.Platform.isWindows, // When [evalOutput] is false, we don't need to intercept the stdout of the // sub-process. In this case, it's better to run the sub-process in the // `inheritStdio` mode which lets it print directly to the terminal. // This allows sub-processes such as `ninja` to use all kinds of terminal // features like printing colors, printing progress on the same line, etc. mode: evalOutput ? io.ProcessStartMode.normal : io.ProcessStartMode.inheritStdio, environment: environment, ); processesToCleanUp.add(process); return ProcessManager._( executable: executable, arguments: arguments, workingDirectory: workingDirectory, process: process, evalOutput: evalOutput, failureIsSuccess: failureIsSuccess, ); } /// Manages a process running outside `felt`. class ProcessManager { /// Creates a process manager that manages [process]. ProcessManager._({ required this.executable, required this.arguments, required this.workingDirectory, required this.process, required bool evalOutput, required bool failureIsSuccess, }) : _evalOutput = evalOutput, _failureIsSuccess = failureIsSuccess { if (_evalOutput) { _forwardStream(process.stdout, _stdout); _forwardStream(process.stderr, _stderr); } } /// The executable, from which the process was spawned. final String executable; /// The arguments passed to the prcess. final List<String> arguments; /// The current working directory (CWD) of the child process. /// /// If null, the child process inherits `felt`'s CWD. final String? workingDirectory; /// The process being managed by this manager. final io.Process process; /// Whether the standard output and standard error should be decoded into /// strings while running the process. final bool _evalOutput; /// Whether non-zero exit code is considered successful completion of the /// process. /// /// See also [wait]. final bool _failureIsSuccess; final StringBuffer _stdout = StringBuffer(); final StringBuffer _stderr = StringBuffer(); void _forwardStream(Stream<List<int>> stream, StringSink buffer) { stream .transform(utf8.decoder) .transform(const LineSplitter()) .listen(buffer.writeln); } /// Waits for the [process] to exit. Returns the exit code. /// /// The returned future completes successfully if: /// /// * [failureIsSuccess] is false and the process exited with exit code 0. /// * [failureIsSuccess] is true and the process exited with a non-zero exit code. /// /// In all other cicumstances the future completes with an error. Future<int> wait() async { final int exitCode = await process.exitCode; if (!_failureIsSuccess && exitCode != 0) { _throwProcessException( description: 'Sub-process failed.', exitCode: exitCode, ); } return exitCode; } /// If [evalOutput] is true, wait for the process to finish then returns the /// decoded standard streams. Future<ProcessOutput> eval() async { if (!_evalOutput) { kill(); _throwProcessException( description: 'Cannot eval process output. The process was launched ' 'with `evalOutput` set to false.', ); } final int exitCode = await wait(); return ProcessOutput( exitCode: exitCode, stdout: _stdout.toString(), stderr: _stderr.toString(), ); } /// A convenience method on top of [eval] that only extracts standard output. Future<String> evalStdout() async { return (await eval()).stdout; } /// A convenience method on top of [eval] that only extracts standard error. Future<String> evalStderr() async { return (await eval()).stderr; } Never _throwProcessException({required String description, int? exitCode}) { throw ProcessException( description: description, executable: executable, arguments: arguments, workingDirectory: workingDirectory, exitCode: exitCode, ); } /// Kills the [process] by sending it the [signal]. bool kill([io.ProcessSignal signal = io.ProcessSignal.sigterm]) { return process.kill(signal); } } /// Stringified standard output and standard error streams from a process. class ProcessOutput { ProcessOutput({ required this.exitCode, required this.stdout, required this.stderr, }); /// The exit code of the process. final int exitCode; /// Standard output of the process decoded as a string. final String stdout; /// Standard error of the process decoded as a string. final String stderr; } /// An exception related to an attempt to spawn a sub-process. @immutable class ProcessException implements Exception { const ProcessException({ required this.description, required this.executable, required this.arguments, required this.workingDirectory, this.exitCode, }); final String description; final String executable; final List<String> arguments; final String? workingDirectory; /// The exit code of the process. /// /// The value is null if the exception is thrown before the process exits. /// For example, this can happen on invalid attempts to start a process, or /// when a process is stuck and is unable to exit. final int? exitCode; @override String toString() { final StringBuffer message = StringBuffer(); message ..writeln(description) ..writeln('Command: $executable ${arguments.join(' ')}') ..writeln('Working directory: ${workingDirectory ?? io.Directory.current.path}'); if (exitCode != null) { message.writeln('Exit code: $exitCode'); } return '$message'; } } /// Adds utility methods mixin ArgUtils<T> on Command<T> { /// Extracts a boolean argument from [argResults]. bool boolArg(String name) => argResults![name] as bool; /// Extracts a string argument from [argResults]. String stringArg(String name) => argResults![name] as String; RuntimeMode get runtimeMode { final bool isProfile = boolArg('profile'); final bool isDebug = boolArg('debug'); if (isProfile && isDebug) { throw ToolExit('Cannot specify both --profile and --debug at the same time.'); } if (isProfile) { return RuntimeMode.profile; } else if (isDebug) { return RuntimeMode.debug; } else { return RuntimeMode.release; } } } io.Directory getBuildDirectoryForRuntimeMode(RuntimeMode runtimeMode) => switch (runtimeMode) { RuntimeMode.debug => environment.wasmDebugUnoptOutDir, RuntimeMode.profile => environment.wasmProfileOutDir, RuntimeMode.release => environment.wasmReleaseOutDir, }; /// There might be proccesses started during the tests. /// /// Use this list to store those Processes, for cleaning up before shutdown. final List<io.Process> processesToCleanUp = <io.Process>[]; /// There might be temporary directories created during the tests. /// /// Use this list to store those directories and for deleteing them before /// shutdown. final List<io.Directory> temporaryDirectories = <io.Directory>[]; typedef AsyncCallback = Future<void> Function(); /// There might be additional cleanup needs to be done after the tools ran. /// /// Add these operations here to make sure that they will run before felt /// exit. final List<AsyncCallback> cleanupCallbacks = <AsyncCallback>[]; /// Cleanup the remaning processes, close open browsers, delete temp files. Future<void> cleanup() async { // Cleanup remaining processes if any. if (processesToCleanUp.isNotEmpty) { for (final io.Process process in processesToCleanUp) { process.kill(); } } // Delete temporary directories. if (temporaryDirectories.isNotEmpty) { for (final io.Directory directory in temporaryDirectories) { if (!directory.existsSync()) { directory.deleteSync(recursive: true); } } } for (final AsyncCallback callback in cleanupCallbacks) { await callback(); } } io.Directory getTestSetDirectory(TestSet testSet) { return io.Directory( path.join( environment.webUiTestDir.path, testSet.directory, ) ); } io.Directory getBundleBuildDirectory(TestBundle bundle) { return io.Directory( path.join( environment.webUiBuildDir.path, 'test_bundles', bundle.name, ) ); } io.Directory getSkiaGoldDirectoryForSuite(TestSuite suite) { return io.Directory( path.join( environment.webUiSkiaGoldDirectory.path, suite.name, ) ); } extension AnsiColors on String { static bool shouldEscape = () { if (isLuci) { // Produce clean output on LUCI. return false; } return io.stdout.hasTerminal && io.stdout.supportsAnsiEscapes; }(); static const String _noColorCode = '\u001b[39m'; String _wrapText(String prefix, String suffix) => shouldEscape ? '$prefix$this$suffix' : this; String _colorText(String colorCode) => _wrapText(colorCode, _noColorCode); String get ansiBlack => _colorText('\u001b[30m'); String get ansiRed => _colorText('\u001b[31m'); String get ansiGreen => _colorText('\u001b[32m'); String get ansiYellow => _colorText('\u001b[33m'); String get ansiBlue => _colorText('\u001b[34m'); String get ansiMagenta => _colorText('\u001b[35m'); String get ansiCyan => _colorText('\u001b[36m'); String get ansiWhite => _colorText('\u001b[37m'); String get ansiBold => _wrapText('\u001b[1m', '\u001b[0m'); }
engine/lib/web_ui/dev/utils.dart/0
{ "file_path": "engine/lib/web_ui/dev/utils.dart", "repo_id": "engine", "token_count": 4132 }
244
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. part of ui; enum PointerChange { cancel, add, remove, hover, down, move, up, panZoomStart, panZoomUpdate, panZoomEnd, } enum PointerDeviceKind { touch, mouse, stylus, invertedStylus, trackpad, unknown } enum PointerSignalKind { none, scroll, scrollInertiaCancel, scale, unknown } class PointerData { const PointerData({ this.viewId = 0, this.embedderId = 0, this.timeStamp = Duration.zero, this.change = PointerChange.cancel, this.kind = PointerDeviceKind.touch, this.signalKind, this.device = 0, this.pointerIdentifier = 0, this.physicalX = 0.0, this.physicalY = 0.0, this.physicalDeltaX = 0.0, this.physicalDeltaY = 0.0, this.buttons = 0, this.obscured = false, this.synthesized = false, this.pressure = 0.0, this.pressureMin = 0.0, this.pressureMax = 0.0, this.distance = 0.0, this.distanceMax = 0.0, this.size = 0.0, this.radiusMajor = 0.0, this.radiusMinor = 0.0, this.radiusMin = 0.0, this.radiusMax = 0.0, this.orientation = 0.0, this.tilt = 0.0, this.platformData = 0, this.scrollDeltaX = 0.0, this.scrollDeltaY = 0.0, this.panX = 0.0, this.panY = 0.0, this.panDeltaX = 0.0, this.panDeltaY = 0.0, this.scale = 0.0, this.rotation = 0.0, }); final int viewId; final int embedderId; final Duration timeStamp; final PointerChange change; final PointerDeviceKind kind; final PointerSignalKind? signalKind; final int device; final int pointerIdentifier; final double physicalX; final double physicalY; final double physicalDeltaX; final double physicalDeltaY; final int buttons; final bool obscured; final bool synthesized; final double pressure; final double pressureMin; final double pressureMax; final double distance; final double distanceMax; final double size; final double radiusMajor; final double radiusMinor; final double radiusMin; final double radiusMax; final double orientation; final double tilt; final int platformData; final double scrollDeltaX; final double scrollDeltaY; final double panX; final double panY; final double panDeltaX; final double panDeltaY; final double scale; final double rotation; @override String toString() => 'PointerData(viewId: $viewId, x: $physicalX, y: $physicalY)'; String toStringFull() { return '$runtimeType(' 'embedderId: $embedderId, ' 'timeStamp: $timeStamp, ' 'change: $change, ' 'kind: $kind, ' 'signalKind: $signalKind, ' 'device: $device, ' 'pointerIdentifier: $pointerIdentifier, ' 'physicalX: $physicalX, ' 'physicalY: $physicalY, ' 'physicalDeltaX: $physicalDeltaX, ' 'physicalDeltaY: $physicalDeltaY, ' 'buttons: $buttons, ' 'synthesized: $synthesized, ' 'pressure: $pressure, ' 'pressureMin: $pressureMin, ' 'pressureMax: $pressureMax, ' 'distance: $distance, ' 'distanceMax: $distanceMax, ' 'size: $size, ' 'radiusMajor: $radiusMajor, ' 'radiusMinor: $radiusMinor, ' 'radiusMin: $radiusMin, ' 'radiusMax: $radiusMax, ' 'orientation: $orientation, ' 'tilt: $tilt, ' 'platformData: $platformData, ' 'scrollDeltaX: $scrollDeltaX, ' 'scrollDeltaY: $scrollDeltaY, ' 'panX: $panX, ' 'panY: $panY, ' 'panDeltaX: $panDeltaX, ' 'panDeltaY: $panDeltaY, ' 'scale: $scale, ' 'rotation: $rotation, ' 'viewId: $viewId' ')'; } } class PointerDataPacket { const PointerDataPacket({this.data = const <PointerData>[]}); final List<PointerData> data; }
engine/lib/web_ui/lib/pointer.dart/0
{ "file_path": "engine/lib/web_ui/lib/pointer.dart", "repo_id": "engine", "token_count": 1740 }
245
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. /// Uses image codecs supplied by the CanvasKit WASM bundle. /// /// See also: /// /// * `image_web_codecs.dart`, which uses the `ImageDecoder` supplied by the browser. library image_wasm_codecs; import 'dart:async'; import 'dart:typed_data'; import 'package:ui/src/engine.dart'; import 'package:ui/ui.dart' as ui; /// The CanvasKit implementation of [ui.Codec]. /// /// Wraps `SkAnimatedImage`. class CkAnimatedImage implements ui.Codec { /// Decodes an image from a list of encoded bytes. CkAnimatedImage.decodeFromBytes(this._bytes, this.src, {this.targetWidth, this.targetHeight}) { final SkAnimatedImage skAnimatedImage = createSkAnimatedImage(); _ref = UniqueRef<SkAnimatedImage>(this, skAnimatedImage, 'Codec'); } late final UniqueRef<SkAnimatedImage> _ref; final String src; final Uint8List _bytes; int _frameCount = 0; int _repetitionCount = -1; final int? targetWidth; final int? targetHeight; SkAnimatedImage createSkAnimatedImage() { SkAnimatedImage? animatedImage = canvasKit.MakeAnimatedImageFromEncoded(_bytes); if (animatedImage == null) { throw ImageCodecException( 'Failed to decode image data.\n' 'Image source: $src', ); } if (targetWidth != null || targetHeight != null) { if (animatedImage.getFrameCount() > 1) { printWarning('targetWidth and targetHeight for multi-frame images not supported'); } else { animatedImage = _resizeAnimatedImage(animatedImage, targetWidth, targetHeight); if (animatedImage == null) { throw ImageCodecException( 'Failed to decode re-sized image data.\n' 'Image source: $src', ); } } } _frameCount = animatedImage.getFrameCount().toInt(); _repetitionCount = animatedImage.getRepetitionCount().toInt(); return animatedImage; } SkAnimatedImage? _resizeAnimatedImage(SkAnimatedImage animatedImage, int? targetWidth, int? targetHeight) { final SkImage image = animatedImage.makeImageAtCurrentFrame(); final CkImage ckImage = scaleImage(image, targetWidth, targetHeight); final Uint8List? resizedBytes = ckImage.skImage.encodeToBytes(); if (resizedBytes == null) { throw ImageCodecException('Failed to re-size image'); } final SkAnimatedImage? resizedAnimatedImage = canvasKit.MakeAnimatedImageFromEncoded(resizedBytes); return resizedAnimatedImage; } bool _disposed = false; bool get debugDisposed => _disposed; bool _debugCheckIsNotDisposed() { assert(!_disposed, 'This image has been disposed.'); return true; } @override void dispose() { assert( !_disposed, 'Cannot dispose a codec that has already been disposed.', ); _disposed = true; _ref.dispose(); } @override int get frameCount { assert(_debugCheckIsNotDisposed()); return _frameCount; } @override int get repetitionCount { assert(_debugCheckIsNotDisposed()); return _repetitionCount; } @override Future<ui.FrameInfo> getNextFrame() { assert(_debugCheckIsNotDisposed()); final SkAnimatedImage animatedImage = _ref.nativeObject; // SkAnimatedImage comes pre-initialized to point to the current frame (by // default the first frame, and, with some special resurrection logic in // `createDefault`, to a subsequent frame if resurrection happens in the // middle of animation). Flutter's `Codec` semantics is to initialize to // point to "just before the first frame", i.e. the first invocation of // `getNextFrame` returns the first frame. Therefore, we have to read the // current Skia frame, then advance SkAnimatedImage to the next frame, and // return the current frame. final ui.FrameInfo currentFrame = AnimatedImageFrameInfo( Duration(milliseconds: animatedImage.currentFrameDuration().toInt()), CkImage(animatedImage.makeImageAtCurrentFrame()), ); animatedImage.decodeNextFrame(); return Future<ui.FrameInfo>.value(currentFrame); } }
engine/lib/web_ui/lib/src/engine/canvaskit/image_wasm_codecs.dart/0
{ "file_path": "engine/lib/web_ui/lib/src/engine/canvaskit/image_wasm_codecs.dart", "repo_id": "engine", "token_count": 1439 }
246
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. import 'dart:typed_data'; // TODO(hterkelsen): Delete this once the slots change lands? class PlatformMessage { PlatformMessage(this.channel, this.data, this.response); final String channel; final ByteData data; final PlatformMessageResponse response; } class PlatformMessageResponse { void complete(Uint8List data) {} void completeEmpty() {} }
engine/lib/web_ui/lib/src/engine/canvaskit/platform_message.dart/0
{ "file_path": "engine/lib/web_ui/lib/src/engine/canvaskit/platform_message.dart", "repo_id": "engine", "token_count": 145 }
247
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // For member documentation see https://api.flutter.dev/flutter/dart-ui/Canvas-class.html import 'dart:typed_data'; import 'package:ui/ui.dart' as ui; import 'dom.dart'; import 'html/painting.dart'; import 'html/render_vertices.dart'; import 'text/canvas_paragraph.dart'; import 'util.dart'; import 'vector_math.dart'; /// Defines canvas interface common across canvases that the [SceneBuilder] /// renders to. /// /// This can be used either as an interface or super-class. abstract class EngineCanvas { /// The element that is attached to the DOM. DomElement get rootElement; void dispose() { clear(); } void clear(); void save(); void restore(); void translate(double dx, double dy); void scale(double sx, double sy); void rotate(double radians); void skew(double sx, double sy); void transform(Float32List matrix4); void clipRect(ui.Rect rect, ui.ClipOp clipOp); void clipRRect(ui.RRect rrect); void clipPath(ui.Path path); void drawColor(ui.Color color, ui.BlendMode blendMode); void drawLine(ui.Offset p1, ui.Offset p2, SurfacePaintData paint); void drawPaint(SurfacePaintData paint); void drawRect(ui.Rect rect, SurfacePaintData paint); void drawRRect(ui.RRect rrect, SurfacePaintData paint); void drawDRRect(ui.RRect outer, ui.RRect inner, SurfacePaintData paint); void drawOval(ui.Rect rect, SurfacePaintData paint); void drawCircle(ui.Offset c, double radius, SurfacePaintData paint); void drawPath(ui.Path path, SurfacePaintData paint); void drawShadow( ui.Path path, ui.Color color, double elevation, bool transparentOccluder); void drawImage(ui.Image image, ui.Offset p, SurfacePaintData paint); void drawImageRect( ui.Image image, ui.Rect src, ui.Rect dst, SurfacePaintData paint); void drawParagraph(CanvasParagraph paragraph, ui.Offset offset); void drawVertices( SurfaceVertices vertices, ui.BlendMode blendMode, SurfacePaintData paint); void drawPoints(ui.PointMode pointMode, Float32List points, SurfacePaintData paint); /// Extension of Canvas API to mark the end of a stream of painting commands /// to enable re-use/dispose optimizations. void endOfPaint(); } /// Adds an [offset] transformation to a [transform] matrix and returns the /// combined result. /// /// If the given offset is zero, returns [transform] matrix as is. Otherwise, /// returns a new [Matrix4] object representing the combined transformation. Matrix4 transformWithOffset(Matrix4 transform, ui.Offset offset) { if (offset == ui.Offset.zero) { return transform; } // Clone to avoid mutating transform. final Matrix4 effectiveTransform = transform.clone(); effectiveTransform.translate(offset.dx, offset.dy); return effectiveTransform; } class SaveStackEntry { SaveStackEntry({ required this.transform, required this.clipStack, }); final Matrix4 transform; final List<SaveClipEntry>? clipStack; } /// Tagged union of clipping parameters used for canvas. class SaveClipEntry { SaveClipEntry.rect(this.rect, this.currentTransform) : rrect = null, path = null; SaveClipEntry.rrect(this.rrect, this.currentTransform) : rect = null, path = null; SaveClipEntry.path(this.path, this.currentTransform) : rect = null, rrect = null; final ui.Rect? rect; final ui.RRect? rrect; final ui.Path? path; final Matrix4 currentTransform; } /// Provides save stack tracking functionality to implementations of /// [EngineCanvas]. mixin SaveStackTracking on EngineCanvas { final List<SaveStackEntry> _saveStack = <SaveStackEntry>[]; /// The stack that maintains clipping operations used when text is painted /// onto bitmap canvas but is composited as separate element. List<SaveClipEntry>? _clipStack; /// Returns whether there are active clipping regions on the canvas. bool get isClipped => _clipStack != null; /// Empties the save stack and the element stack, and resets the transform /// and clip parameters. /// /// Classes that override this method must call `super.clear()`. @override void clear() { _saveStack.clear(); _clipStack = null; _currentTransform = Matrix4.identity(); } /// The current transformation matrix. Matrix4 get currentTransform => _currentTransform; Matrix4 _currentTransform = Matrix4.identity(); /// Saves current clip and transform on the save stack. /// /// Classes that override this method must call `super.save()`. @override void save() { _saveStack.add(SaveStackEntry( transform: _currentTransform.clone(), clipStack: _clipStack == null ? null : List<SaveClipEntry>.from(_clipStack!), )); } /// Restores current clip and transform from the save stack. /// /// Classes that override this method must call `super.restore()`. @override void restore() { if (_saveStack.isEmpty) { return; } final SaveStackEntry entry = _saveStack.removeLast(); _currentTransform = entry.transform; _clipStack = entry.clipStack; } /// Multiplies the [currentTransform] matrix by a translation. /// /// Classes that override this method must call `super.translate()`. @override void translate(double dx, double dy) { _currentTransform.translate(dx, dy); } /// Scales the [currentTransform] matrix. /// /// Classes that override this method must call `super.scale()`. @override void scale(double sx, double sy) { _currentTransform.scale(sx, sy); } /// Rotates the [currentTransform] matrix. /// /// Classes that override this method must call `super.rotate()`. @override void rotate(double radians) { _currentTransform.rotate(kUnitZ, radians); } /// Skews the [currentTransform] matrix. /// /// Classes that override this method must call `super.skew()`. @override void skew(double sx, double sy) { final Matrix4 skewMatrix = Matrix4.identity(); final Float32List storage = skewMatrix.storage; storage[1] = sy; storage[4] = sx; _currentTransform.multiply(skewMatrix); } /// Multiplies the [currentTransform] matrix by another matrix. /// /// Classes that override this method must call `super.transform()`. @override void transform(Float32List matrix4) { _currentTransform.multiply(Matrix4.fromFloat32List(matrix4)); } /// Adds a rectangle to clipping stack. /// /// Classes that override this method must call `super.clipRect()`. @override void clipRect(ui.Rect rect, ui.ClipOp op) { _clipStack ??= <SaveClipEntry>[]; _clipStack!.add(SaveClipEntry.rect(rect, _currentTransform.clone())); } /// Adds a round rectangle to clipping stack. /// /// Classes that override this method must call `super.clipRRect()`. @override void clipRRect(ui.RRect rrect) { _clipStack ??= <SaveClipEntry>[]; _clipStack!.add(SaveClipEntry.rrect(rrect, _currentTransform.clone())); } /// Adds a path to clipping stack. /// /// Classes that override this method must call `super.clipPath()`. @override void clipPath(ui.Path path) { _clipStack ??= <SaveClipEntry>[]; _clipStack!.add(SaveClipEntry.path(path, _currentTransform.clone())); } } DomElement drawParagraphElement( CanvasParagraph paragraph, ui.Offset offset, { Matrix4? transform, }) { assert(paragraph.isLaidOut); final DomElement paragraphElement = paragraph.toDomElement(); if (transform != null) { setElementTransform( paragraphElement, transformWithOffset(transform, offset).storage, ); } return paragraphElement; } class _SaveElementStackEntry { _SaveElementStackEntry({ required this.savedElement, required this.transform, }); final DomElement savedElement; final Matrix4 transform; } /// Provides save stack tracking functionality to implementations of /// [EngineCanvas]. mixin SaveElementStackTracking on EngineCanvas { final List<_SaveElementStackEntry> _saveStack = <_SaveElementStackEntry>[]; /// The element at the top of the element stack, or [rootElement] if the stack /// is empty. DomElement get currentElement => _elementStack.isEmpty ? rootElement : _elementStack.last; /// The stack that maintains the DOM elements used to express certain paint /// operations, such as clips. final List<DomElement> _elementStack = <DomElement>[]; /// Pushes the [element] onto the element stack for the purposes of applying /// a paint effect using a DOM element, e.g. for clipping. /// /// The [restore] method automatically pops the element off the stack. void pushElement(DomElement element) { _elementStack.add(element); } /// Empties the save stack and the element stack, and resets the transform /// and clip parameters. /// /// Classes that override this method must call `super.clear()`. @override void clear() { _saveStack.clear(); _elementStack.clear(); _currentTransform = Matrix4.identity(); } /// The current transformation matrix. Matrix4 get currentTransform => _currentTransform; Matrix4 _currentTransform = Matrix4.identity(); /// Saves current clip and transform on the save stack. /// /// Classes that override this method must call `super.save()`. @override void save() { _saveStack.add(_SaveElementStackEntry( savedElement: currentElement, transform: _currentTransform.clone(), )); } /// Restores current clip and transform from the save stack. /// /// Classes that override this method must call `super.restore()`. @override void restore() { if (_saveStack.isEmpty) { return; } final _SaveElementStackEntry entry = _saveStack.removeLast(); _currentTransform = entry.transform; // Pop out of any clips. while (currentElement != entry.savedElement) { _elementStack.removeLast(); } } /// Multiplies the [currentTransform] matrix by a translation. /// /// Classes that override this method must call `super.translate()`. @override void translate(double dx, double dy) { _currentTransform.translate(dx, dy); } /// Scales the [currentTransform] matrix. /// /// Classes that override this method must call `super.scale()`. @override void scale(double sx, double sy) { _currentTransform.scale(sx, sy); } /// Rotates the [currentTransform] matrix. /// /// Classes that override this method must call `super.rotate()`. @override void rotate(double radians) { _currentTransform.rotate(kUnitZ, radians); } /// Skews the [currentTransform] matrix. /// /// Classes that override this method must call `super.skew()`. @override void skew(double sx, double sy) { // DO NOT USE Matrix4.skew(sx, sy)! It treats sx and sy values as radians, // but in our case they are transform matrix values. final Matrix4 skewMatrix = Matrix4.identity(); final Float32List storage = skewMatrix.storage; storage[1] = sy; storage[4] = sx; _currentTransform.multiply(skewMatrix); } /// Multiplies the [currentTransform] matrix by another matrix. /// /// Classes that override this method must call `super.transform()`. @override void transform(Float32List matrix4) { _currentTransform.multiply(Matrix4.fromFloat32List(matrix4)); } }
engine/lib/web_ui/lib/src/engine/engine_canvas.dart/0
{ "file_path": "engine/lib/web_ui/lib/src/engine/engine_canvas.dart", "repo_id": "engine", "token_count": 3567 }
248
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. import 'package:ui/ui.dart' as ui; import '../dom.dart'; import '../util.dart'; import '../vector_math.dart'; import 'surface.dart'; /// A surface that makes its children transparent. class PersistedOpacity extends PersistedContainerSurface implements ui.OpacityEngineLayer { PersistedOpacity(PersistedOpacity? super.oldLayer, this.alpha, this.offset); final int alpha; final ui.Offset offset; @override void recomputeTransformAndClip() { transform = parent!.transform; final double dx = offset.dx; final double dy = offset.dy; if (dx != 0.0 || dy != 0.0) { transform = transform!.clone(); transform!.translate(dx, dy); } projectedClip = null; } /// Cached inverse of transform on this node. Unlike transform, this /// Matrix only contains local transform (not chain multiplied since root). Matrix4? _localTransformInverse; @override Matrix4 get localTransformInverse => _localTransformInverse ??= Matrix4.translationValues(-offset.dx, -offset.dy, 0); @override DomElement createElement() { final DomElement element = domDocument.createElement('flt-opacity'); setElementStyle(element, 'position', 'absolute'); setElementStyle(element, 'transform-origin', '0 0 0'); return element; } @override void apply() { final DomElement element = rootElement!; setElementStyle(element, 'opacity', '${alpha / 255}'); element.style.transform = 'translate(${offset.dx}px, ${offset.dy}px)'; } @override void update(PersistedOpacity oldSurface) { super.update(oldSurface); if (alpha != oldSurface.alpha || offset != oldSurface.offset) { apply(); } } }
engine/lib/web_ui/lib/src/engine/html/opacity.dart/0
{ "file_path": "engine/lib/web_ui/lib/src/engine/html/opacity.dart", "repo_id": "engine", "token_count": 602 }
249
// Copyright 2013 The Flutter Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. import 'dart:math' as math; import 'dart:typed_data'; import 'package:ui/ui.dart' as ui; import '../browser_detection.dart'; import '../dom.dart'; import '../safe_browser_api.dart'; import '../util.dart'; import '../vector_math.dart'; import 'painting.dart'; import 'shaders/image_shader.dart'; import 'shaders/normalized_gradient.dart'; import 'shaders/shader_builder.dart'; import 'shaders/vertex_shaders.dart'; GlRenderer? glRenderer; class SurfaceVertices implements ui.Vertices { SurfaceVertices( this.mode, List<ui.Offset> positions, { List<ui.Color>? colors, List<int>? indices, }) : colors = colors != null ? _int32ListFromColors(colors) : null, indices = indices != null ? Uint16List.fromList(indices) : null, positions = offsetListToFloat32List(positions) { initWebGl(); } SurfaceVertices.raw( this.mode, this.positions, { this.colors, this.indices, }) { initWebGl(); } final ui.VertexMode mode; final Float32List positions; final Int32List? colors; final Uint16List? indices; static Int32List _int32ListFromColors(List<ui.Color> colors) { final Int32List list = Int32List(colors.length); final int len = colors.length; for (int i = 0; i < len; i++) { list[i] = colors[i].value; } return list; } bool _disposed = false; @override void dispose() { _disposed = true; } @override bool get debugDisposed { bool? result; assert(() { result = _disposed; return true; }()); if (result != null) { return result!; } throw StateError('Vertices.debugDisposed is only available when asserts are enabled.'); } } /// Lazily initializes web gl. /// /// Used to treeshake WebGlRenderer when user doesn't create Vertices object /// to use the API. void initWebGl() { glRenderer ??= _WebGlRenderer(); } abstract class GlRenderer { void drawVertices( DomCanvasRenderingContext2D? context, int canvasWidthInPixels, int canvasHeightInPixels, Matrix4 transform, SurfaceVertices vertices, ui.BlendMode blendMode, SurfacePaintData paint); Object? drawRect(ui.Rect targetRect, GlContext gl, GlProgram glProgram, NormalizedGradient gradient, int widthInPixels, int heightInPixels); String drawRectToImageUrl( ui.Rect targetRect, GlContext gl, GlProgram glProgram, NormalizedGradient gradient, int widthInPixels, int heightInPixels); void drawHairline(DomCanvasRenderingContext2D? ctx, Float32List positions); } /// Treeshakeable backend for rendering webgl on canvas. /// /// This class gets instantiated on demand by Vertices constructor. For apps /// that don't use Vertices WebGlRenderer will be removed from release binary. class _WebGlRenderer implements GlRenderer { @override void drawVertices( DomCanvasRenderingContext2D? context, int canvasWidthInPixels, int canvasHeightInPixels, Matrix4 transform, SurfaceVertices vertices, ui.BlendMode blendMode, SurfacePaintData paint) { // Compute bounds of vertices. final Float32List positions = vertices.positions; final ui.Rect bounds = _computeVerticesBounds(positions, transform); final double minValueX = bounds.left; final double minValueY = bounds.top; final double maxValueX = bounds.right; final double maxValueY = bounds.bottom; double offsetX = 0; double offsetY = 0; int widthInPixels = canvasWidthInPixels; int heightInPixels = canvasHeightInPixels; // If vertices fall outside the bitmap area, cull. if (maxValueX < 0 || maxValueY < 0) { return; } if (minValueX > widthInPixels || minValueY > heightInPixels) { return; } // If Vertices are is smaller than hosting canvas, allocate minimal // offscreen canvas to reduce readPixels data size. if ((maxValueX - minValueX) < widthInPixels && (maxValueY - minValueY) < heightInPixels) { widthInPixels = maxValueX.ceil() - minValueX.floor(); heightInPixels = maxValueY.ceil() - minValueY.floor(); offsetX = minValueX.floor().toDouble(); offsetY = minValueY.floor().toDouble(); } if (widthInPixels == 0 || heightInPixels == 0) { return; } final bool isWebGl2 = webGLVersion == WebGLVersion.webgl2; final EngineImageShader? imageShader = paint.shader == null ? null : paint.shader! as EngineImageShader; final String vertexShader = imageShader == null ? VertexShaders.writeBaseVertexShader() : VertexShaders.writeTextureVertexShader(); final String fragmentShader = imageShader == null ? _writeVerticesFragmentShader() : FragmentShaders.writeTextureFragmentShader( isWebGl2, imageShader.tileModeX, imageShader.tileModeY); final GlContext gl = GlContextCache.createGlContext(widthInPixels, heightInPixels)!; final GlProgram glProgram = gl.cacheProgram(vertexShader, fragmentShader); gl.useProgram(glProgram); final Object positionAttributeLocation = gl.getAttributeLocation(glProgram.program, 'position'); setupVertexTransforms(gl, glProgram, offsetX, offsetY, widthInPixels.toDouble(), heightInPixels.toDouble(), transform); if (imageShader != null) { /// To map from vertex position to texture coordinate in 0..1 range, /// we setup scalar to be used in vertex shader. setupTextureTransform( gl, glProgram, 0.0, 0.0, 1.0 / imageShader.image.width.toDouble(), 1.0 / imageShader.image.height.toDouble()); } // Setup geometry. // // Create buffer for vertex coordinates. final Object positionsBuffer = gl.createBuffer()!; Object? vao; if (imageShader != null) { if (isWebGl2) { // Create a vertex array object. vao = gl.createVertexArray(); // Set vertex array object as active one. gl.bindVertexArray(vao!); } } // Turn on position attribute. gl.enableVertexAttribArray(positionAttributeLocation); // Bind buffer as position buffer and transfer data. gl.bindArrayBuffer(positionsBuffer); bufferVertexData(gl, positions, 1.0); // Setup data format for attribute. vertexAttribPointerGlContext( gl.glContext, positionAttributeLocation, 2, gl.kFloat, false, 0, 0, ); final int vertexCount = positions.length ~/ 2; Object? texture; if (imageShader == null) { // Setup color buffer. final Object? colorsBuffer = gl.createBuffer(); gl.bindArrayBuffer(colorsBuffer); // Buffer kBGRA_8888. if (vertices.colors == null) { final Uint32List vertexColors = Uint32List(vertexCount); for (int i = 0; i < vertexCount; i++) { vertexColors[i] = paint.color; } gl.bufferData(vertexColors, gl.kStaticDraw); } else { gl.bufferData(vertices.colors, gl.kStaticDraw); } final Object colorLoc = gl.getAttributeLocation(glProgram.program, 'color'); vertexAttribPointerGlContext( gl.glContext, colorLoc, 4, gl.kUnsignedByte, true, 0, 0, ); gl.enableVertexAttribArray(colorLoc); } else { // Copy image it to the texture. texture = gl.createTexture(); // Texture units are a global array of references to the textures. // By setting activeTexture, we associate the bound texture to a unit. // Every time we call a texture function such as texImage2D with a target // like TEXTURE_2D, it looks up texture by using the currently active // unit. // In our case we have a single texture unit 0. gl.activeTexture(gl.kTexture0); gl.bindTexture(gl.kTexture2D, texture); gl.texImage2D(gl.kTexture2D, 0, gl.kRGBA, gl.kRGBA, gl.kUnsignedByte, imageShader.image.imgElement); if (isWebGl2) { // Texture REPEAT and MIRROR is only supported in WebGL 2, for // WebGL 1.0 we let shader compute correct uv coordinates. gl.texParameteri(gl.kTexture2D, gl.kTextureWrapS, tileModeToGlWrapping(gl, imageShader.tileModeX)); gl.texParameteri(gl.kTexture2D, gl.kTextureWrapT, tileModeToGlWrapping(gl, imageShader.tileModeY)); // Mipmapping saves your texture in different resolutions // so the graphics card can choose which resolution is optimal // without artifacts. gl.generateMipmap(gl.kTexture2D); } else { // For webgl1, if a texture is not mipmap complete, then the return // value of a texel fetch is (0, 0, 0, 1), so we have to set // minifying function to filter. // See https://www.khronos.org/registry/webgl/specs/1.0.0/#5.13.8. gl.texParameteri(gl.kTexture2D, gl.kTextureWrapS, gl.kClampToEdge); gl.texParameteri(gl.kTexture2D, gl.kTextureWrapT, gl.kClampToEdge); gl.texParameteri(gl.kTexture2D, gl.kTextureMinFilter, gl.kLinear); } } // Finally render triangles. gl.clear(); final Uint16List? indices = vertices.indices; if (indices == null) { gl.drawTriangles(vertexCount, vertices.mode); } else { /// If indices are specified to use shared vertices to reduce vertex /// data transfer, use drawElements to map from vertex indices to /// triangles. final Object? indexBuffer = gl.createBuffer(); gl.bindElementArrayBuffer(indexBuffer); gl.bufferElementData(indices, gl.kStaticDraw); gl.drawElements(gl.kTriangles, indices.length, gl.kUnsignedShort); } if (vao != null) { gl.unbindVertexArray(); } context!.save(); context.resetTransform(); gl.drawImage(context, offsetX, offsetY); context.restore(); } /// Renders a rectangle using given program into an image resource. /// /// Browsers that support OffscreenCanvas and the transferToImageBitmap api /// will return ImageBitmap, otherwise will return CanvasElement. @override Object? drawRect(ui.Rect targetRect, GlContext gl, GlProgram glProgram, NormalizedGradient gradient, int widthInPixels, int heightInPixels) { drawRectToGl( targetRect, gl, glProgram, gradient, widthInPixels, heightInPixels); final Object? image = gl.readPatternData(gradient.isOpaque); gl.bindArrayBuffer(null); gl.bindElementArrayBuffer(null); return image; } /// Renders a rectangle using given program into an image resource and returns /// url. @override String drawRectToImageUrl( ui.Rect targetRect, GlContext gl, GlProgram glProgram, NormalizedGradient gradient, int widthInPixels, int heightInPixels) { drawRectToGl( targetRect, gl, glProgram, gradient, widthInPixels, heightInPixels); final String imageUrl = gl.toImageUrl(); // Cleanup buffers. gl.bindArrayBuffer(null); gl.bindElementArrayBuffer(null); return imageUrl; } /// Renders a rectangle using given program into [GlContext]. /// /// Caller has to cleanup gl array and element array buffers. void drawRectToGl(ui.Rect targetRect, GlContext gl, GlProgram glProgram, NormalizedGradient gradient, int widthInPixels, int heightInPixels) { // Setup rectangle coordinates. final double left = targetRect.left; final double top = targetRect.top; final double right = targetRect.right; final double bottom = targetRect.bottom; // Form 2 triangles for rectangle. final Float32List vertices = Float32List(8); vertices[0] = left; vertices[1] = top; vertices[2] = right; vertices[3] = top; vertices[4] = right; vertices[5] = bottom; vertices[6] = left; vertices[7] = bottom; final Object transformUniform = gl.getUniformLocation(glProgram.program, 'u_ctransform'); gl.setUniformMatrix4fv(transformUniform, false, Matrix4.identity().storage); // Set uniform to scale 0..width/height pixels coordinates to -1..1 // clipspace range and flip the Y axis. final Object resolution = gl.getUniformLocation(glProgram.program, 'u_scale'); gl.setUniform4f(resolution, 2.0 / widthInPixels.toDouble(), -2.0 / heightInPixels.toDouble(), 1, 1); final Object shift = gl.getUniformLocation(glProgram.program, 'u_shift'); gl.setUniform4f(shift, -1, 1, 0, 0); // Setup geometry. final Object positionsBuffer = gl.createBuffer()!; gl.bindArrayBuffer(positionsBuffer); gl.bufferData(vertices, gl.kStaticDraw); // Point an attribute to the currently bound vertex buffer object. vertexAttribPointerGlContext( gl.glContext, 0, 2, gl.kFloat, false, 0, 0, ); gl.enableVertexAttribArray(0); // Setup color buffer. final Object? colorsBuffer = gl.createBuffer(); gl.bindArrayBuffer(colorsBuffer); // Buffer kBGRA_8888. final Int32List colors = Int32List.fromList(<int>[ 0xFF00FF00, 0xFF0000FF, 0xFFFFFF00, 0xFF00FFFF, ]); gl.bufferData(colors, gl.kStaticDraw); vertexAttribPointerGlContext( gl.glContext, 1, 4, gl.kUnsignedByte, true, 0, 0, ); gl.enableVertexAttribArray(1); final Object? indexBuffer = gl.createBuffer(); gl.bindElementArrayBuffer(indexBuffer); gl.bufferElementData(VertexShaders.vertexIndicesForRect, gl.kStaticDraw); if (gl.containsUniform(glProgram.program, 'u_resolution')) { final Object uRes = gl.getUniformLocation(glProgram.program, 'u_resolution'); gl.setUniform2f( uRes, widthInPixels.toDouble(), heightInPixels.toDouble()); } gl.clear(); gl.viewport(0, 0, widthInPixels.toDouble(), heightInPixels.toDouble()); gl.drawElements( gl.kTriangles, VertexShaders.vertexIndicesForRect.length, gl.kUnsignedShort); } /// This fragment shader enables Int32List of colors to be passed directly /// to gl context buffer for rendering by decoding RGBA8888. /// #version 300 es /// precision mediump float; /// in vec4 vColor; /// out vec4 fragColor; /// void main() { /// fragColor = vColor; /// } String _writeVerticesFragmentShader() { final ShaderBuilder builder = ShaderBuilder.fragment(webGLVersion); builder.floatPrecision = ShaderPrecision.kMedium; builder.addIn(ShaderType.kVec4, name: 'v_color'); final ShaderMethod method = builder.addMethod('main'); method.addStatement('${builder.fragmentColor.name} = v_color;'); return builder.build(); } @override void drawHairline( DomCanvasRenderingContext2D? ctx, Float32List positions) { final int pointCount = positions.length ~/ 2; ctx!.lineWidth = 1.0; ctx.beginPath(); final int len = pointCount * 2; for (int i = 0; i < len;) { for (int triangleVertexIndex = 0; triangleVertexIndex < 3; triangleVertexIndex++, i += 2) { final double dx = positions[i]; final double dy = positions[i + 1]; switch (triangleVertexIndex) { case 0: ctx.moveTo(dx, dy); case 1: ctx.lineTo(dx, dy); case 2: ctx.lineTo(dx, dy); ctx.closePath(); ctx.stroke(); } } } } } ui.Rect _computeVerticesBounds(Float32List positions, Matrix4 transform) { double minValueX, maxValueX, minValueY, maxValueY; minValueX = maxValueX = positions[0]; minValueY = maxValueY = positions[1]; final int len = positions.length; for (int i = 2; i < len; i += 2) { final double x = positions[i]; final double y = positions[i + 1]; if (x.isNaN || y.isNaN) { // Follows skia implementation that sets bounds to empty // and aborts. return ui.Rect.zero; } minValueX = math.min(minValueX, x); maxValueX = math.max(maxValueX, x); minValueY = math.min(minValueY, y); maxValueY = math.max(maxValueY, y); } return _transformBounds( transform, minValueX, minValueY, maxValueX, maxValueY); } ui.Rect _transformBounds( Matrix4 transform, double left, double top, double right, double bottom) { final Float32List storage = transform.storage; final double m0 = storage[0]; final double m1 = storage[1]; final double m4 = storage[4]; final double m5 = storage[5]; final double m12 = storage[12]; final double m13 = storage[13]; final double x0 = (m0 * left) + (m4 * top) + m12; final double y0 = (m1 * left) + (m5 * top) + m13; final double x1 = (m0 * right) + (m4 * top) + m12; final double y1 = (m1 * right) + (m5 * top) + m13; final double x2 = (m0 * right) + (m4 * bottom) + m12; final double y2 = (m1 * right) + (m5 * bottom) + m13; final double x3 = (m0 * left) + (m4 * bottom) + m12; final double y3 = (m1 * left) + (m5 * bottom) + m13; return ui.Rect.fromLTRB( math.min(x0, math.min(x1, math.min(x2, x3))), math.min(y0, math.min(y1, math.min(y2, y3))), math.max(x0, math.max(x1, math.max(x2, x3))), math.max(y0, math.max(y1, math.max(y2, y3)))); } /// Converts from [VertexMode] triangleFan and triangleStrip to triangles. Float32List convertVertexPositions(ui.VertexMode mode, Float32List positions) { assert(mode != ui.VertexMode.triangles); if (mode == ui.VertexMode.triangleFan) { final int coordinateCount = positions.length ~/ 2; final int triangleCount = coordinateCount - 2; final Float32List triangleList = Float32List(triangleCount * 3 * 2); final double centerX = positions[0]; final double centerY = positions[1]; int destIndex = 0; int positionIndex = 2; for (int triangleIndex = 0; triangleIndex < triangleCount; triangleIndex++, positionIndex += 2) { triangleList[destIndex++] = centerX; triangleList[destIndex++] = centerY; triangleList[destIndex++] = positions[positionIndex]; triangleList[destIndex++] = positions[positionIndex + 1]; triangleList[destIndex++] = positions[positionIndex + 2]; triangleList[destIndex++] = positions[positionIndex + 3]; } return triangleList; } else { assert(mode == ui.VertexMode.triangleStrip); // Set of connected triangles. Each triangle shares 2 last vertices. final int vertexCount = positions.length ~/ 2; final int triangleCount = vertexCount - 2; double x0 = positions[0]; double y0 = positions[1]; double x1 = positions[2]; double y1 = positions[3]; final Float32List triangleList = Float32List(triangleCount * 3 * 2); int destIndex = 0; for (int i = 0, positionIndex = 4; i < triangleCount; i++) { final double x2 = positions[positionIndex++]; final double y2 = positions[positionIndex++]; triangleList[destIndex++] = x0; triangleList[destIndex++] = y0; triangleList[destIndex++] = x1; triangleList[destIndex++] = y1; triangleList[destIndex++] = x2; triangleList[destIndex++] = y2; x0 = x1; y0 = y1; x1 = x2; y1 = y2; } return triangleList; } }
engine/lib/web_ui/lib/src/engine/html/render_vertices.dart/0
{ "file_path": "engine/lib/web_ui/lib/src/engine/html/render_vertices.dart", "repo_id": "engine", "token_count": 7563 }
250