--- a/gfx/webrender/examples/blob.rs
+++ b/gfx/webrender/examples/blob.rs
@@ -46,27 +46,27 @@ fn render_blob(
commands: Arc<ImageRenderingCommands>,
descriptor: &api::BlobImageDescriptor,
tile: Option<api::TileOffset>,
) -> api::BlobImageResult {
let color = *commands;
// Allocate storage for the result. Right now the resource cache expects the
// tiles to have have no stride or offset.
- let mut texels = Vec::with_capacity((descriptor.width * descriptor.height * 4) as usize);
+ let mut texels = Vec::with_capacity((descriptor.size.width * descriptor.size.height * 4) as usize);
// Generate a per-tile pattern to see it in the demo. For a real use case it would not
// make sense for the rendered content to depend on its tile.
let tile_checker = match tile {
Some(tile) => (tile.x % 2 == 0) != (tile.y % 2 == 0),
None => true,
};
- for y in 0 .. descriptor.height {
- for x in 0 .. descriptor.width {
+ for y in 0 .. descriptor.size.height {
+ for x in 0 .. descriptor.size.width {
// Apply the tile's offset. This is important: all drawing commands should be
// translated by this offset to give correct results with tiled blob images.
let x2 = x + descriptor.offset.x as u32;
let y2 = y + descriptor.offset.y as u32;
// Render a simple checkerboard pattern
let checker = if (x2 % 20 >= 10) != (y2 % 20 >= 10) {
1
@@ -92,18 +92,17 @@ fn render_blob(
));
}
}
}
}
Ok(api::RasterizedBlobImage {
data: texels,
- width: descriptor.width,
- height: descriptor.height,
+ size: descriptor.size,
})
}
struct CheckerboardRenderer {
// We are going to defer the rendering work to worker threads.
// Using a pre-built Arc<ThreadPool> rather than creating our own threads
// makes it possible to share the same thread pool as the glyph renderer (if we
// want to).
@@ -133,22 +132,22 @@ impl CheckerboardRenderer {
workers,
tx,
rx,
}
}
}
impl api::BlobImageRenderer for CheckerboardRenderer {
- fn add(&mut self, key: api::ImageKey, cmds: api::BlobImageData, _: Option<api::TileSize>) {
+ fn add(&mut self, key: api::ImageKey, cmds: Arc<api::BlobImageData>, _: Option<api::TileSize>) {
self.image_cmds
.insert(key, Arc::new(deserialize_blob(&cmds[..]).unwrap()));
}
- fn update(&mut self, key: api::ImageKey, cmds: api::BlobImageData, _dirty_rect: Option<api::DeviceUintRect>) {
+ fn update(&mut self, key: api::ImageKey, cmds: Arc<api::BlobImageData>, _dirty_rect: Option<api::DeviceUintRect>) {
// Here, updating is just replacing the current version of the commands with
// the new one (no incremental updates).
self.image_cmds
.insert(key, Arc::new(deserialize_blob(&cmds[..]).unwrap()));
}
fn delete(&mut self, key: api::ImageKey) {
self.image_cmds.remove(&key);
--- a/gfx/webrender/examples/iframe.rs
+++ b/gfx/webrender/examples/iframe.rs
@@ -69,17 +69,17 @@ impl Example for App {
TransformStyle::Flat,
None,
MixBlendMode::Normal,
Vec::new(),
GlyphRasterSpace::Screen,
);
// red rect under the iframe: if this is visible, things have gone wrong
builder.push_rect(&info, ColorF::new(1.0, 0.0, 0.0, 1.0));
- builder.push_iframe(&info, sub_pipeline_id);
+ builder.push_iframe(&info, sub_pipeline_id, false);
builder.pop_stacking_context();
}
}
fn main() {
let mut app = App {};
boilerplate::main_wrapper(&mut app, None);
}
--- a/gfx/webrender/res/brush.glsl
+++ b/gfx/webrender/res/brush.glsl
@@ -19,16 +19,24 @@ void brush_vs(
#define VECS_PER_BRUSH_PRIM 2
#define VECS_PER_SEGMENT 2
#define BRUSH_FLAG_PERSPECTIVE_INTERPOLATION 1
#define BRUSH_FLAG_SEGMENT_RELATIVE 2
#define BRUSH_FLAG_SEGMENT_REPEAT_X 4
#define BRUSH_FLAG_SEGMENT_REPEAT_Y 8
+//Note: these have to match `gpu_types` constants
+#define INT_BITS (31)
+#define CLIP_CHAIN_RECT_BITS (22)
+#define SEGMENT_BITS (INT_BITS - CLIP_CHAIN_RECT_BITS)
+#define EDGE_FLAG_BITS (4)
+#define BRUSH_FLAG_BITS (4)
+#define CLIP_SCROLL_INDEX_BITS (INT_BITS - EDGE_FLAG_BITS - BRUSH_FLAG_BITS)
+
struct BrushInstance {
int picture_address;
int prim_address;
int clip_chain_rect_index;
int scroll_node_id;
int clip_address;
int z;
int segment_index;
@@ -38,22 +46,22 @@ struct BrushInstance {
};
BrushInstance load_brush() {
BrushInstance bi;
bi.picture_address = aData0.x & 0xffff;
bi.clip_address = aData0.x >> 16;
bi.prim_address = aData0.y;
- bi.clip_chain_rect_index = aData0.z >> 16;
- bi.scroll_node_id = aData0.z & 0xffff;
+ bi.clip_chain_rect_index = aData0.z & ((1 << CLIP_CHAIN_RECT_BITS) - 1);
+ bi.segment_index = aData0.z >> CLIP_CHAIN_RECT_BITS;
bi.z = aData0.w;
- bi.segment_index = aData1.x & 0xffff;
- bi.edge_mask = (aData1.x >> 16) & 0xff;
- bi.flags = (aData1.x >> 24);
+ bi.scroll_node_id = aData1.x & ((1 << CLIP_SCROLL_INDEX_BITS) - 1);
+ bi.edge_mask = (aData1.x >> CLIP_SCROLL_INDEX_BITS) & 0xf;
+ bi.flags = (aData1.x >> (CLIP_SCROLL_INDEX_BITS + EDGE_FLAG_BITS)) & 0xf;
bi.user_data = aData1.yzw;
return bi;
}
struct BrushPrimitive {
RectWithSize local_rect;
RectWithSize local_clip_rect;
--- a/gfx/webrender/res/prim_shared.glsl
+++ b/gfx/webrender/res/prim_shared.glsl
@@ -114,20 +114,20 @@ struct PrimitiveInstance {
int user_data2;
};
PrimitiveInstance fetch_prim_instance() {
PrimitiveInstance pi;
pi.prim_address = aData0.x;
pi.specific_prim_address = pi.prim_address + VECS_PER_PRIM_HEADER;
- pi.render_task_index = aData0.y;
- pi.clip_task_index = aData0.z;
- pi.clip_chain_rect_index = aData0.w / 65536;
- pi.scroll_node_id = aData0.w % 65536;
+ pi.render_task_index = aData0.y % 0x10000;
+ pi.clip_task_index = aData0.y / 0x10000;
+ pi.clip_chain_rect_index = aData0.z;
+ pi.scroll_node_id = aData0.w;
pi.z = aData1.x;
pi.user_data0 = aData1.y;
pi.user_data1 = aData1.z;
pi.user_data2 = aData1.w;
return pi;
}
--- a/gfx/webrender/src/batch.rs
+++ b/gfx/webrender/src/batch.rs
@@ -1,14 +1,14 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use api::{AlphaType, ClipMode, DeviceIntRect, DeviceIntSize};
-use api::{DeviceUintRect, DeviceUintPoint, DeviceUintSize, ExternalImageType, FilterOp, ImageRendering, LayoutRect};
+use api::{DeviceUintRect, DeviceUintPoint, ExternalImageType, FilterOp, ImageRendering, LayoutRect};
use api::{DeviceIntPoint, SubpixelDirection, YuvColorSpace, YuvFormat};
use api::{LayoutToWorldTransform, WorldPixel};
use border::{BorderCornerInstance, BorderCornerSide, BorderEdgeKind};
use clip::{ClipSource, ClipStore, ClipWorkItem};
use clip_scroll_tree::{CoordinateSystemId};
use euclid::{TypedTransform3D, vec3};
use glyph_rasterizer::GlyphFormat;
use gpu_cache::{GpuCache, GpuCacheAddress};
@@ -1750,20 +1750,17 @@ pub fn resolve_image(
// the deferred resolves list to be patched by
// the render thread...
let cache_handle = gpu_cache.push_deferred_per_frame_blocks(BLOCKS_PER_UV_RECT);
let cache_item = CacheItem {
texture_id: SourceTexture::External(external_image),
uv_rect_handle: cache_handle,
uv_rect: DeviceUintRect::new(
DeviceUintPoint::zero(),
- DeviceUintSize::new(
- image_properties.descriptor.width,
- image_properties.descriptor.height,
- )
+ image_properties.descriptor.size,
),
texture_layer: 0,
};
deferred_resolves.push(DeferredResolve {
image_properties,
address: gpu_cache.get_address(&cache_handle),
});
--- a/gfx/webrender/src/clip_scroll_node.rs
+++ b/gfx/webrender/src/clip_scroll_node.rs
@@ -211,19 +211,21 @@ impl ClipScrollNode {
self.children.push(child);
}
pub fn apply_old_scrolling_state(&mut self, old_scrolling_state: &ScrollFrameInfo) {
match self.node_type {
NodeType::ScrollFrame(ref mut scrolling) => {
let scroll_sensitivity = scrolling.scroll_sensitivity;
let scrollable_size = scrolling.scrollable_size;
+ let viewport_rect = scrolling.viewport_rect;
*scrolling = *old_scrolling_state;
scrolling.scroll_sensitivity = scroll_sensitivity;
scrolling.scrollable_size = scrollable_size;
+ scrolling.viewport_rect = viewport_rect;
}
_ if old_scrolling_state.offset != LayoutVector2D::zero() => {
warn!("Tried to scroll a non-scroll node.")
}
_ => {}
}
}
--- a/gfx/webrender/src/device.rs
+++ b/gfx/webrender/src/device.rs
@@ -1600,18 +1600,18 @@ impl Device {
}
}
#[cfg(any(feature = "debug_renderer", feature = "capture"))]
pub fn read_pixels(&mut self, img_desc: &ImageDescriptor) -> Vec<u8> {
let desc = self.gl_describe_format(img_desc.format);
self.gl.read_pixels(
0, 0,
- img_desc.width as i32,
- img_desc.height as i32,
+ img_desc.size.width as i32,
+ img_desc.size.height as i32,
desc.external,
desc.pixel_type,
)
}
/// Read rectangle of pixels into the specified output slice.
pub fn read_pixels_into(
&mut self,
--- a/gfx/webrender/src/display_list_flattener.rs
+++ b/gfx/webrender/src/display_list_flattener.rs
@@ -551,18 +551,17 @@ impl<'a> DisplayListFlattener<'a> {
info: &IframeDisplayItem,
clip_and_scroll_ids: &ClipAndScrollInfo,
reference_frame_relative_offset: &LayoutVector2D,
) {
let iframe_pipeline_id = info.pipeline_id;
let pipeline = match self.scene.pipelines.get(&iframe_pipeline_id) {
Some(pipeline) => pipeline,
None => {
- //TODO: assert/debug_assert?
- error!("Unknown pipeline used for iframe {:?}", info);
+ debug_assert!(info.ignore_missing_pipeline);
return
},
};
self.id_to_index_mapper.initialize_for_pipeline(pipeline);
self.add_clip_node(
info.clip_id,
--- a/gfx/webrender/src/glyph_rasterizer/no_pathfinder.rs
+++ b/gfx/webrender/src/glyph_rasterizer/no_pathfinder.rs
@@ -2,16 +2,17 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Module only available when pathfinder is deactivated when webrender is
//! compiled regularly (i.e. any configuration without feature = "pathfinder")
use api::{GlyphKey, ImageData, ImageDescriptor, ImageFormat};
use device::TextureFilter;
+use euclid::size2;
use gpu_types::UvRectKind;
use rayon::prelude::*;
use std::sync::{Arc, MutexGuard};
use platform::font::FontContext;
use glyph_rasterizer::{FontInstance, FontContexts, GlyphRasterizer, GlyphRasterJob, GlyphRasterJobs, GlyphRasterResult};
use glyph_cache::{GlyphCache, CachedGlyphInfo, GlyphCacheEntry};
use texture_cache::{TextureCache, TextureCacheHandle};
use gpu_cache::GpuCache;
@@ -167,18 +168,17 @@ impl GlyphRasterizer {
}
GlyphRasterResult::Bitmap(glyph) => {
assert_eq!((glyph.left.fract(), glyph.top.fract()), (0.0, 0.0));
let mut texture_cache_handle = TextureCacheHandle::new();
texture_cache.request(&texture_cache_handle, gpu_cache);
texture_cache.update(
&mut texture_cache_handle,
ImageDescriptor {
- width: glyph.width,
- height: glyph.height,
+ size: size2(glyph.width, glyph.height),
stride: None,
format: ImageFormat::BGRA8,
is_opaque: false,
allow_mipmaps: false,
offset: 0,
},
TextureFilter::Linear,
Some(ImageData::Raw(Arc::new(glyph.bytes))),
--- a/gfx/webrender/src/gpu_types.rs
+++ b/gfx/webrender/src/gpu_types.rs
@@ -1,29 +1,40 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use api::{DevicePoint, LayoutToWorldTransform, WorldToLayoutTransform};
use gpu_cache::{GpuCacheAddress, GpuDataRequest};
-use prim_store::EdgeAaSegmentMask;
+use prim_store::{VECS_PER_SEGMENT, EdgeAaSegmentMask};
use render_task::RenderTaskAddress;
+use renderer::MAX_VERTEX_TEXTURE_WIDTH;
// Contains type that must exactly match the same structures declared in GLSL.
+const INT_BITS: usize = 31; //TODO: convert to unsigned
+const CLIP_CHAIN_RECT_BITS: usize = 22;
+const SEGMENT_BITS: usize = INT_BITS - CLIP_CHAIN_RECT_BITS;
+// The guard ensures (at compile time) that the designated number of bits cover
+// the maximum supported segment count for the texture width.
+const _SEGMENT_GUARD: usize = (1 << SEGMENT_BITS) * VECS_PER_SEGMENT - MAX_VERTEX_TEXTURE_WIDTH;
+const EDGE_FLAG_BITS: usize = 4;
+const BRUSH_FLAG_BITS: usize = 4;
+const CLIP_SCROLL_INDEX_BITS: usize = INT_BITS - EDGE_FLAG_BITS - BRUSH_FLAG_BITS;
+
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub struct ZBufferId(i32);
pub struct ZBufferIdGenerator {
next: i32,
}
impl ZBufferIdGenerator {
- pub fn new() -> ZBufferIdGenerator {
+ pub fn new() -> Self {
ZBufferIdGenerator {
next: 0
}
}
pub fn next(&mut self) -> ZBufferId {
let id = ZBufferId(self.next);
self.next += 1;
@@ -128,19 +139,19 @@ impl SimplePrimitiveInstance {
z,
}
}
pub fn build(&self, data0: i32, data1: i32, data2: i32) -> PrimitiveInstance {
PrimitiveInstance {
data: [
self.specific_prim_address.as_int(),
- self.task_address.0 as i32,
- self.clip_task_address.0 as i32,
- ((self.clip_chain_rect_index.0 as i32) << 16) | self.scroll_id.0 as i32,
+ self.task_address.0 as i32 | (self.clip_task_address.0 as i32) << 16,
+ self.clip_chain_rect_index.0 as i32,
+ self.scroll_id.0 as i32,
self.z.0,
data0,
data1,
data2,
],
}
}
}
@@ -231,25 +242,28 @@ pub struct BrushInstance {
pub segment_index: i32,
pub edge_flags: EdgeAaSegmentMask,
pub brush_flags: BrushFlags,
pub user_data: [i32; 3],
}
impl From<BrushInstance> for PrimitiveInstance {
fn from(instance: BrushInstance) -> Self {
+ debug_assert_eq!(0, instance.clip_chain_rect_index.0 >> CLIP_CHAIN_RECT_BITS);
+ debug_assert_eq!(0, instance.scroll_id.0 >> CLIP_SCROLL_INDEX_BITS);
+ debug_assert_eq!(0, instance.segment_index >> SEGMENT_BITS);
PrimitiveInstance {
data: [
instance.picture_address.0 as i32 | (instance.clip_task_address.0 as i32) << 16,
instance.prim_address.as_int(),
- ((instance.clip_chain_rect_index.0 as i32) << 16) | instance.scroll_id.0 as i32,
+ instance.clip_chain_rect_index.0 as i32 | (instance.segment_index << CLIP_CHAIN_RECT_BITS),
instance.z.0,
- instance.segment_index |
- ((instance.edge_flags.bits() as i32) << 16) |
- ((instance.brush_flags.bits() as i32) << 24),
+ instance.scroll_id.0 as i32 |
+ ((instance.edge_flags.bits() as i32) << CLIP_SCROLL_INDEX_BITS) |
+ ((instance.brush_flags.bits() as i32) << (CLIP_SCROLL_INDEX_BITS + EDGE_FLAG_BITS)),
instance.user_data[0],
instance.user_data[1],
instance.user_data[2],
]
}
}
}
--- a/gfx/webrender/src/prim_store.rs
+++ b/gfx/webrender/src/prim_store.rs
@@ -1,14 +1,14 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use api::{AlphaType, BorderRadius, BoxShadowClipMode, BuiltDisplayList, ClipMode, ColorF, ComplexClipRegion};
-use api::{DeviceIntRect, DeviceIntSize, DeviceUintSize, DevicePixelScale, Epoch, ExtendMode, FontRenderMode};
+use api::{DeviceIntRect, DeviceIntSize, DevicePixelScale, Epoch, ExtendMode, FontRenderMode};
use api::{FilterOp, GlyphInstance, GlyphKey, GradientStop, ImageKey, ImageRendering, ItemRange, ItemTag, TileOffset};
use api::{GlyphRasterSpace, LayoutPoint, LayoutRect, LayoutSize, LayoutToWorldTransform, LayoutVector2D};
use api::{PipelineId, PremultipliedColorF, PropertyBinding, Shadow, YuvColorSpace, YuvFormat, DeviceIntSideOffsets};
use border::{BorderCornerInstance, BorderEdgeKind};
use box_shadow::BLUR_SAMPLE_SCALE;
use clip_scroll_tree::{ClipChainIndex, ClipScrollNodeIndex, CoordinateSystemId};
use clip_scroll_node::ClipScrollNode;
use clip::{ClipChain, ClipChainNode, ClipChainNodeIter, ClipChainNodeRef, ClipSource};
@@ -31,16 +31,17 @@ use scene::SceneProperties;
use segment::SegmentBuilder;
use std::{mem, usize};
use std::sync::Arc;
use util::{MatrixHelpers, WorldToLayoutFastTransform, calculate_screen_bounding_rect};
use util::{pack_as_float, recycle_vec};
const MIN_BRUSH_SPLIT_AREA: f32 = 256.0 * 256.0;
+pub const VECS_PER_SEGMENT: usize = 2;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct ScrollNodeAndClipChain {
pub scroll_node_id: ClipScrollNodeIndex,
pub clip_chain_index: ClipChainIndex,
}
impl ScrollNodeAndClipChain {
@@ -539,35 +540,16 @@ pub enum ImageSource {
// via a render task.
Cache {
size: DeviceIntSize,
handle: Option<RenderTaskCacheEntryHandle>,
},
}
#[derive(Debug)]
-pub struct ImagePrimitiveCpu {
- pub tile_spacing: LayoutSize,
- pub alpha_type: AlphaType,
- pub stretch_size: LayoutSize,
- pub current_epoch: Epoch,
- pub source: ImageSource,
- pub key: ImageCacheKey,
-}
-
-impl ToGpuBlocks for ImagePrimitiveCpu {
- fn write_gpu_blocks(&self, mut request: GpuDataRequest) {
- request.push([
- self.stretch_size.width, self.stretch_size.height,
- self.tile_spacing.width, self.tile_spacing.height,
- ]);
- }
-}
-
-#[derive(Debug)]
pub struct BorderPrimitiveCpu {
pub corner_instances: [BorderCornerInstance; 4],
pub edges: [BorderEdgeKind; 4],
pub gpu_blocks: [GpuBlockData; 8],
}
impl ToGpuBlocks for BorderPrimitiveCpu {
fn write_gpu_blocks(&self, mut request: GpuDataRequest) {
@@ -1153,44 +1135,41 @@ impl PrimitiveContainer {
}
}
}
pub struct PrimitiveStore {
/// CPU side information only.
pub cpu_brushes: Vec<BrushPrimitive>,
pub cpu_text_runs: Vec<TextRunPrimitiveCpu>,
- pub cpu_images: Vec<ImagePrimitiveCpu>,
pub cpu_metadata: Vec<PrimitiveMetadata>,
pub cpu_borders: Vec<BorderPrimitiveCpu>,
pub pictures: Vec<PicturePrimitive>,
next_picture_id: u64,
}
impl PrimitiveStore {
pub fn new() -> PrimitiveStore {
PrimitiveStore {
cpu_metadata: Vec::new(),
cpu_brushes: Vec::new(),
cpu_text_runs: Vec::new(),
- cpu_images: Vec::new(),
cpu_borders: Vec::new(),
pictures: Vec::new(),
next_picture_id: 0,
}
}
pub fn recycle(self) -> Self {
PrimitiveStore {
cpu_metadata: recycle_vec(self.cpu_metadata),
cpu_brushes: recycle_vec(self.cpu_brushes),
cpu_text_runs: recycle_vec(self.cpu_text_runs),
- cpu_images: recycle_vec(self.cpu_images),
cpu_borders: recycle_vec(self.cpu_borders),
pictures: recycle_vec(self.pictures),
next_picture_id: self.next_picture_id,
}
}
pub fn add_image_picture(
@@ -1488,20 +1467,17 @@ impl PrimitiveStore {
// batching parameters are used.
metadata.opacity.is_opaque =
image_properties.descriptor.is_opaque &&
opacity_binding.current == 1.0;
if *tile_spacing != LayoutSize::zero() && !is_tiled {
*source = ImageSource::Cache {
// Size in device-pixels we need to allocate in render task cache.
- size: DeviceIntSize::new(
- image_properties.descriptor.width as i32,
- image_properties.descriptor.height as i32
- ),
+ size: image_properties.descriptor.size.to_i32(),
handle: None,
};
}
// Work out whether this image is a normal / simple type, or if
// we need to pre-render it to the render task cache.
if let Some(rect) = sub_rect {
// We don't properly support this right now.
@@ -1589,20 +1565,17 @@ impl PrimitiveStore {
ImageSource::Default => {
// Normal images just reference the source texture each frame.
request_source_image = true;
}
}
if let Some(tile_size) = image_properties.tiling {
- let device_image_size = DeviceUintSize::new(
- image_properties.descriptor.width,
- image_properties.descriptor.height,
- );
+ let device_image_size = image_properties.descriptor.size;
// Tighten the clip rect because decomposing the repeated image can
// produce primitives that are partially covering the original image
// rect and we want to clip these extra parts out.
let tight_clip_rect = metadata.local_clip_rect.intersection(&metadata.local_rect).unwrap();
let visible_rect = compute_conservative_visible_rect(
prim_run_context,
@@ -2759,12 +2732,13 @@ fn get_local_clip_rect_for_nodes(
impl<'a> GpuDataRequest<'a> {
// Write the GPU cache data for an individual segment.
fn write_segment(
&mut self,
local_rect: LayoutRect,
extra_data: [f32; 4],
) {
+ let _ = VECS_PER_SEGMENT;
self.push(local_rect);
self.push(extra_data);
}
}
--- a/gfx/webrender/src/renderer.rs
+++ b/gfx/webrender/src/renderer.rs
@@ -1893,17 +1893,17 @@ impl Renderer {
#[cfg(feature = "debugger")]
fn get_screenshot_for_debugger(&mut self) -> String {
use api::ImageDescriptor;
let desc = ImageDescriptor::new(1024, 768, ImageFormat::BGRA8, true, false);
let data = self.device.read_pixels(&desc);
- let screenshot = debug_server::Screenshot::new(desc.width, desc.height, data);
+ let screenshot = debug_server::Screenshot::new(desc.size.width, desc.size.height, data);
serde_json::to_string(&screenshot).unwrap()
}
#[cfg(not(feature = "debugger"))]
fn get_passes_for_debugger(&self) -> String {
// Avoid unused param warning.
let _ = &self.debug_server;
@@ -4406,17 +4406,17 @@ impl Renderer {
let tid = match native_map.entry(plain_ext.data) {
Entry::Occupied(e) => e.get().clone(),
Entry::Vacant(e) => {
//TODO: provide a way to query both the layer count and the filter from external images
let (layer_count, filter) = (1, TextureFilter::Linear);
let plain_tex = PlainTexture {
data: e.key().clone(),
- size: (descriptor.width, descriptor.height, layer_count),
+ size: (descriptor.size.width, descriptor.size.height, layer_count),
format: descriptor.format,
filter,
render_target: None,
};
let mut t = self.device.create_texture(target, plain_tex.format);
Self::load_texture(&mut t, &plain_tex, &root, &mut self.device);
let extex = t.into_external();
self.owned_external_images.insert(key, extex.clone());
--- a/gfx/webrender/src/resource_cache.rs
+++ b/gfx/webrender/src/resource_cache.rs
@@ -1,45 +1,45 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-use api::{AddFont, BlobImageData, BlobImageResources, ResourceUpdate, ResourceUpdates};
+use api::{AddFont, BlobImageResources, ResourceUpdate, ResourceUpdates};
use api::{BlobImageDescriptor, BlobImageError, BlobImageRenderer, BlobImageRequest};
use api::{ClearCache, ColorF, DevicePoint, DeviceUintPoint, DeviceUintRect, DeviceUintSize};
use api::{Epoch, FontInstanceKey, FontKey, FontTemplate};
use api::{ExternalImageData, ExternalImageType};
use api::{FontInstanceOptions, FontInstancePlatformOptions, FontVariation};
use api::{GlyphDimensions, GlyphKey, IdNamespace};
use api::{ImageData, ImageDescriptor, ImageKey, ImageRendering};
use api::{TileOffset, TileSize};
use app_units::Au;
#[cfg(feature = "capture")]
use capture::ExternalCaptureImage;
#[cfg(feature = "replay")]
use capture::PlainExternalImage;
#[cfg(any(feature = "replay", feature = "png"))]
use capture::CaptureConfig;
use device::TextureFilter;
+use euclid::size2;
use glyph_cache::GlyphCache;
#[cfg(not(feature = "pathfinder"))]
use glyph_cache::GlyphCacheEntry;
use glyph_rasterizer::{FontInstance, GlyphFormat, GlyphRasterizer, GlyphRequest};
use gpu_cache::{GpuCache, GpuCacheAddress, GpuCacheHandle};
use gpu_types::UvRectKind;
use internal_types::{FastHashMap, FastHashSet, SourceTexture, TextureUpdateList};
use profiler::{ResourceProfileCounters, TextureCacheProfileCounters};
use render_backend::FrameId;
use render_task::{RenderTaskCache, RenderTaskCacheKey, RenderTaskId};
use render_task::{RenderTaskCacheEntry, RenderTaskCacheEntryHandle, RenderTaskTree};
use std::collections::hash_map::Entry::{self, Occupied, Vacant};
use std::cmp;
use std::fmt::Debug;
use std::hash::Hash;
-use std::mem;
#[cfg(any(feature = "capture", feature = "replay"))]
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
use texture_cache::{TextureCache, TextureCacheHandle};
use tiling::SpecialRenderPasses;
const DEFAULT_TILE_SIZE: TileSize = 512;
@@ -144,34 +144,33 @@ struct CachedImageInfo {
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct ResourceClassCache<K: Hash + Eq, V, U: Default> {
resources: FastHashMap<K, V>,
pub user_data: U,
}
pub fn intersect_for_tile(
dirty: DeviceUintRect,
- width: u32,
- height: u32,
+ clipped_tile_size: DeviceUintSize,
tile_size: TileSize,
tile_offset: TileOffset,
) -> Option<DeviceUintRect> {
- dirty.intersection(&DeviceUintRect::new(
- DeviceUintPoint::new(
- tile_offset.x as u32 * tile_size as u32,
- tile_offset.y as u32 * tile_size as u32
- ),
- DeviceUintSize::new(width, height),
- )).map(|mut r| {
- // we can't translate by a negative size so do it manually
- r.origin.x -= tile_offset.x as u32 * tile_size as u32;
- r.origin.y -= tile_offset.y as u32 * tile_size as u32;
- r
- })
+ dirty.intersection(&DeviceUintRect::new(
+ DeviceUintPoint::new(
+ tile_offset.x as u32 * tile_size as u32,
+ tile_offset.y as u32 * tile_size as u32
+ ),
+ clipped_tile_size,
+ )).map(|mut r| {
+ // we can't translate by a negative size so do it manually
+ r.origin.x -= tile_offset.x as u32 * tile_size as u32;
+ r.origin.y -= tile_offset.y as u32 * tile_size as u32;
+ r
+ })
}
impl<K, V, U> ResourceClassCache<K, V, U>
where
K: Clone + Hash + Eq + Debug,
U: Default,
{
@@ -329,17 +328,17 @@ impl ResourceCache {
}
}
pub fn max_texture_size(&self) -> u32 {
self.texture_cache.max_texture_size()
}
fn should_tile(limit: u32, descriptor: &ImageDescriptor, data: &ImageData) -> bool {
- let size_check = descriptor.width > limit || descriptor.height > limit;
+ let size_check = descriptor.size.width > limit || descriptor.size.height > limit;
match *data {
ImageData::Raw(_) | ImageData::Blob(_) => size_check,
ImageData::External(info) => {
// External handles already represent existing textures so it does
// not make sense to tile them into smaller ones.
info.image_type == ExternalImageType::Buffer && size_check
}
}
@@ -490,40 +489,42 @@ impl ResourceCache {
let instance_map = self.resources.font_instances.read().unwrap();
instance_map.get(&instance_key).cloned()
}
pub fn add_image_template(
&mut self,
image_key: ImageKey,
descriptor: ImageDescriptor,
- mut data: ImageData,
+ data: ImageData,
mut tiling: Option<TileSize>,
) {
if tiling.is_none() && Self::should_tile(self.max_texture_size(), &descriptor, &data) {
// We aren't going to be able to upload a texture this big, so tile it, even
// if tiling was not requested.
tiling = Some(DEFAULT_TILE_SIZE);
}
- if let ImageData::Blob(ref mut blob) = data {
+ if let ImageData::Blob(ref blob) = data {
self.blob_image_renderer.as_mut().unwrap().add(
image_key,
- mem::replace(blob, BlobImageData::new()),
+ Arc::clone(&blob),
tiling,
);
}
let resource = ImageResource {
descriptor,
data,
epoch: Epoch(0),
tiling,
- dirty_rect: Some(DeviceUintRect::new(DeviceUintPoint::zero(),
- DeviceUintSize::new(descriptor.width, descriptor.height))),
+ dirty_rect: Some(DeviceUintRect::new(
+ DeviceUintPoint::zero(),
+ descriptor.size,
+ )),
};
self.resources.image_templates.insert(image_key, resource);
}
pub fn update_image_template(
&mut self,
image_key: ImageKey,
@@ -541,17 +542,17 @@ impl ResourceCache {
if tiling.is_none() && Self::should_tile(max_texture_size, &descriptor, &data) {
tiling = Some(DEFAULT_TILE_SIZE);
}
if let ImageData::Blob(ref mut blob) = data {
self.blob_image_renderer
.as_mut()
.unwrap()
- .update(image_key, mem::replace(blob, BlobImageData::new()), dirty_rect);
+ .update(image_key, Arc::clone(&blob), dirty_rect);
}
*image = ImageResource {
descriptor,
data,
epoch: Epoch(image.epoch.0 + 1),
tiling,
dirty_rect: match (dirty_rect, image.dirty_rect) {
@@ -596,22 +597,22 @@ impl ResourceCache {
};
// Images that don't use the texture cache can early out.
if !template.data.uses_texture_cache() {
return;
}
let side_size =
- template.tiling.map_or(cmp::max(template.descriptor.width, template.descriptor.height),
+ template.tiling.map_or(cmp::max(template.descriptor.size.width, template.descriptor.size.height),
|tile_size| tile_size as u32);
if side_size > self.texture_cache.max_texture_size() {
// The image or tiling size is too big for hardware texture size.
warn!("Dropping image, image:(w:{},h:{}, tile:{}) is too big for hardware!",
- template.descriptor.width, template.descriptor.height, template.tiling.unwrap_or(0));
+ template.descriptor.size.width, template.descriptor.size.height, template.tiling.unwrap_or(0));
self.cached_images.insert(request, Err(ImageCacheError::OverLimitSize));
return;
}
// If this image exists in the texture cache, *and* the epoch
// in the cache matches that of the template, then it is
// valid to use as-is.
let (entry, needs_update) = match self.cached_images.entry(request) {
@@ -639,51 +640,46 @@ impl ResourceCache {
return;
}
// We can start a worker thread rasterizing right now, if:
// - The image is a blob.
// - The blob hasn't already been requested this frame.
if self.pending_image_requests.insert(request) && template.data.is_blob() {
if let Some(ref mut renderer) = self.blob_image_renderer {
- let (offset, w, h) = match template.tiling {
+ let (offset, size) = match template.tiling {
Some(tile_size) => {
let tile_offset = request.tile.unwrap();
- let (w, h) = compute_tile_size(
+ let actual_size = compute_tile_size(
&template.descriptor,
tile_size,
tile_offset,
);
let offset = DevicePoint::new(
tile_offset.x as f32 * tile_size as f32,
tile_offset.y as f32 * tile_size as f32,
);
if let Some(dirty) = template.dirty_rect {
- if intersect_for_tile(dirty, w, h, tile_size, tile_offset).is_none() {
+ if intersect_for_tile(dirty, actual_size, tile_size, tile_offset).is_none() {
// don't bother requesting unchanged tiles
return
}
}
- (offset, w, h)
+ (offset, actual_size)
}
- None => (
- DevicePoint::zero(),
- template.descriptor.width,
- template.descriptor.height,
- ),
+ None => (DevicePoint::zero(), template.descriptor.size),
};
renderer.request(
&self.resources,
request.into(),
&BlobImageDescriptor {
- width: w,
- height: h,
+ size,
offset,
format: template.descriptor.format,
},
template.dirty_rect,
);
}
}
}
@@ -965,21 +961,20 @@ impl ResourceCache {
}
}
};
let descriptor = if let Some(tile) = request.tile {
let tile_size = image_template.tiling.unwrap();
let image_descriptor = &image_template.descriptor;
- let (actual_width, actual_height) =
- compute_tile_size(image_descriptor, tile_size, tile);
+ let clipped_tile_size = compute_tile_size(image_descriptor, tile_size, tile);
if let Some(dirty) = dirty_rect {
- dirty_rect = intersect_for_tile(dirty, actual_width, actual_height, tile_size, tile);
+ dirty_rect = intersect_for_tile(dirty, clipped_tile_size, tile_size, tile);
if dirty_rect.is_none() {
continue
}
}
// The tiled image could be stored on the CPU as one large image or be
// already broken up into tiles. This affects the way we compute the stride
// and offset.
@@ -992,18 +987,17 @@ impl ResourceCache {
let stride = image_descriptor.compute_stride();
let offset = image_descriptor.offset +
tile.y as u32 * tile_size as u32 * stride +
tile.x as u32 * tile_size as u32 * bpp;
(Some(stride), offset)
};
ImageDescriptor {
- width: actual_width,
- height: actual_height,
+ size: clipped_tile_size,
stride,
offset,
..*image_descriptor
}
} else {
image_template.descriptor.clone()
};
@@ -1015,18 +1009,18 @@ impl ResourceCache {
// If the texture uses linear filtering, enable mipmaps and
// trilinear filtering, for better image quality. We only
// support this for now on textures that are not placed
// into the shared cache. This accounts for any image
// that is > 512 in either dimension, so it should cover
// the most important use cases. We may want to support
// mip-maps on shared cache items in the future.
if descriptor.allow_mipmaps &&
- descriptor.width > 512 &&
- descriptor.height > 512 &&
+ descriptor.size.width > 512 &&
+ descriptor.size.height > 512 &&
!self.texture_cache.is_allowed_in_shared_cache(
TextureFilter::Linear,
&descriptor,
) {
TextureFilter::Trilinear
} else {
TextureFilter::Linear
}
@@ -1099,34 +1093,34 @@ impl ResourceCache {
}
}
// Compute the width and height of a tile depending on its position in the image.
pub fn compute_tile_size(
descriptor: &ImageDescriptor,
base_size: TileSize,
tile: TileOffset,
-) -> (u32, u32) {
+) -> DeviceUintSize {
let base_size = base_size as u32;
// Most tiles are going to have base_size as width and height,
// except for tiles around the edges that are shrunk to fit the mage data
// (See decompose_tiled_image in frame.rs).
- let actual_width = if (tile.x as u32) < descriptor.width / base_size {
+ let actual_width = if (tile.x as u32) < descriptor.size.width / base_size {
base_size
} else {
- descriptor.width % base_size
+ descriptor.size.width % base_size
};
- let actual_height = if (tile.y as u32) < descriptor.height / base_size {
+ let actual_height = if (tile.y as u32) < descriptor.size.height / base_size {
base_size
} else {
- descriptor.height % base_size
+ descriptor.size.height % base_size
};
- (actual_width, actual_height)
+ size2(actual_width, actual_height)
}
#[cfg(any(feature = "capture", feature = "replay"))]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
enum PlainFontTemplate {
Raw {
data: String,
@@ -1242,17 +1236,17 @@ impl ResourceCache {
let entry = match image_paths.entry(arc.as_ptr()) {
Entry::Occupied(_) => continue,
Entry::Vacant(e) => e,
};
#[cfg(feature = "png")]
CaptureConfig::save_png(
root.join(format!("images/{}.png", image_id)),
- (desc.width, desc.height),
+ (desc.size.width, desc.size.height),
ReadPixelsFormat::Standard(desc.format),
&arc,
);
let file_name = format!("{}.raw", image_id);
let short_path = format!("images/{}", file_name);
fs::File::create(path_images.join(file_name))
.expect(&format!("Unable to create {}", short_path))
.write_all(&*arc)
@@ -1267,33 +1261,32 @@ impl ResourceCache {
// https://github.com/servo/webrender/issues/2236
tile: None,
};
let renderer = self.blob_image_renderer.as_mut().unwrap();
renderer.request(
&self.resources,
request,
&BlobImageDescriptor {
- width: desc.width,
- height: desc.height,
+ size: desc.size,
offset: DevicePoint::zero(),
format: desc.format,
},
None,
);
let result = renderer.resolve(request)
.expect("Blob resolve failed");
- assert_eq!((result.width, result.height), (desc.width, desc.height));
+ assert_eq!(result.size, desc.size);
assert_eq!(result.data.len(), desc.compute_total_size() as usize);
num_blobs += 1;
#[cfg(feature = "png")]
CaptureConfig::save_png(
root.join(format!("blobs/{}.png", num_blobs)),
- (desc.width, desc.height),
+ (desc.size.width, desc.size.height),
ReadPixelsFormat::Standard(desc.format),
&result.data,
);
let file_name = format!("{}.raw", num_blobs);
let short_path = format!("blobs/{}", file_name);
let full_path = path_blobs.clone().join(&file_name);
fs::File::create(full_path)
.expect(&format!("Unable to create {}", short_path))
--- a/gfx/webrender/src/texture_cache.rs
+++ b/gfx/webrender/src/texture_cache.rs
@@ -406,19 +406,17 @@ impl TextureCache {
// is true:
// - Never been in the cache
// - Has been in the cache but was evicted.
// - Exists in the cache but dimensions / format have changed.
let realloc = match handle.entry {
Some(ref handle) => {
match self.entries.get_opt(handle) {
Some(entry) => {
- entry.size.width != descriptor.width ||
- entry.size.height != descriptor.height ||
- entry.format != descriptor.format
+ entry.size != descriptor.size || entry.format != descriptor.format
}
None => {
// Was previously allocated but has been evicted.
true
}
}
}
None => {
@@ -645,18 +643,17 @@ impl TextureCache {
// want to evict everything we can, since that will result in
// more items being uploaded than necessary.
// Instead, we say we will keep evicting until both of these
// conditions are met:
// - We have evicted some arbitrary number of items (512 currently).
// AND
// - We have freed an item that will definitely allow us to
// fit the currently requested allocation.
- let needed_slab_size =
- SlabSize::new(required_alloc.width, required_alloc.height);
+ let needed_slab_size = SlabSize::new(required_alloc.size);
let mut found_matching_slab = false;
let mut freed_complete_page = false;
let mut evicted_items = 0;
for handle in eviction_candidates {
if evicted_items > 512 && (found_matching_slab || freed_complete_page) {
retained_entries.push(handle);
} else {
@@ -749,18 +746,17 @@ impl TextureCache {
self.pending_updates.push(update_op);
texture_array.texture_id = Some(texture_id);
}
// Do the allocation. This can fail and return None
// if there are no free slots or regions available.
texture_array.alloc(
- descriptor.width,
- descriptor.height,
+ descriptor.size,
user_data,
self.frame_id,
uv_rect_kind,
)
}
// Returns true if the given image descriptor *may* be
// placed in the shared texture cache.
@@ -778,18 +774,18 @@ impl TextureCache {
descriptor.format != ImageFormat::BGRA8 {
allowed_in_shared_cache = false;
}
// Anything larger than TEXTURE_REGION_DIMENSIONS goes in a standalone texture.
// TODO(gw): If we find pages that suffer from batch breaks in this
// case, add support for storing these in a standalone
// texture array.
- if descriptor.width > TEXTURE_REGION_DIMENSIONS ||
- descriptor.height > TEXTURE_REGION_DIMENSIONS {
+ if descriptor.size.width > TEXTURE_REGION_DIMENSIONS ||
+ descriptor.size.height > TEXTURE_REGION_DIMENSIONS {
allowed_in_shared_cache = false;
}
allowed_in_shared_cache
}
// Allocate storage for a given image. This attempts to allocate
// from the shared cache, but falls back to standalone texture
@@ -797,26 +793,25 @@ impl TextureCache {
fn allocate(
&mut self,
handle: &mut TextureCacheHandle,
descriptor: ImageDescriptor,
filter: TextureFilter,
user_data: [f32; 3],
uv_rect_kind: UvRectKind,
) {
- assert!(descriptor.width > 0 && descriptor.height > 0);
+ assert!(descriptor.size.width > 0 && descriptor.size.height > 0);
// Work out if this image qualifies to go in the shared (batching) cache.
let allowed_in_shared_cache = self.is_allowed_in_shared_cache(
filter,
&descriptor,
);
let mut allocated_in_shared_cache = true;
let mut new_cache_entry = None;
- let size = DeviceUintSize::new(descriptor.width, descriptor.height);
let frame_id = self.frame_id;
// If it's allowed in the cache, see if there is a spot for it.
if allowed_in_shared_cache {
new_cache_entry = self.allocate_from_shared_cache(
&descriptor,
filter,
user_data,
@@ -843,29 +838,29 @@ impl TextureCache {
if new_cache_entry.is_none() {
let texture_id = self.cache_textures.allocate(descriptor.format);
// Create an update operation to allocate device storage
// of the right size / format.
let update_op = TextureUpdate {
id: texture_id,
op: TextureUpdateOp::Create {
- width: descriptor.width,
- height: descriptor.height,
+ width: descriptor.size.width,
+ height: descriptor.size.height,
format: descriptor.format,
filter,
render_target: Some(RenderTargetInfo { has_depth: false }),
layer_count: 1,
},
};
self.pending_updates.push(update_op);
new_cache_entry = Some(CacheEntry::new_standalone(
texture_id,
- size,
+ descriptor.size,
descriptor.format,
filter,
user_data,
frame_id,
uv_rect_kind,
));
allocated_in_shared_cache = false;
@@ -923,19 +918,19 @@ impl TextureCache {
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[derive(Copy, Clone, PartialEq)]
struct SlabSize {
width: u32,
height: u32,
}
impl SlabSize {
- fn new(width: u32, height: u32) -> SlabSize {
- let x_size = quantize_dimension(width);
- let y_size = quantize_dimension(height);
+ fn new(size: DeviceUintSize) -> SlabSize {
+ let x_size = quantize_dimension(size.width);
+ let y_size = quantize_dimension(size.height);
assert!(x_size > 0 && x_size <= TEXTURE_REGION_DIMENSIONS);
assert!(y_size > 0 && y_size <= TEXTURE_REGION_DIMENSIONS);
let (width, height) = match (x_size, y_size) {
// Special cased rectangular slab pages.
(512, 256) => (512, 256),
(512, 128) => (512, 128),
@@ -1104,18 +1099,17 @@ impl TextureArray {
} else {
counter.set(0, 0);
}
}
// Allocate space in this texture array.
fn alloc(
&mut self,
- width: u32,
- height: u32,
+ size: DeviceUintSize,
user_data: [f32; 3],
frame_id: FrameId,
uv_rect_kind: UvRectKind,
) -> Option<CacheEntry> {
// Lazily allocate the regions if not already created.
// This means that very rarely used image formats can be
// added but won't allocate a cache if never used.
if !self.is_allocated {
@@ -1137,17 +1131,17 @@ impl TextureArray {
}
}
}
self.is_allocated = true;
}
// Quantize the size of the allocation to select a region to
// allocate from.
- let slab_size = SlabSize::new(width, height);
+ let slab_size = SlabSize::new(size);
// TODO(gw): For simplicity, the initial implementation just
// has a single vec<> of regions. We could easily
// make this more efficient by storing a list of
// regions for each slab size specifically...
// Keep track of the location of an empty region,
// in case we need to select a new empty region
@@ -1184,17 +1178,17 @@ impl TextureArray {
origin: location,
}
});
}
}
entry_kind.map(|kind| {
CacheEntry {
- size: DeviceUintSize::new(width, height),
+ size,
user_data,
last_access: frame_id,
kind,
uv_rect_handle: GpuCacheHandle::new(),
format: self.format,
filter: self.filter,
texture_id: self.texture_id.unwrap(),
eviction_notice: None,
@@ -1227,18 +1221,18 @@ impl TextureUpdate {
}
ExternalImageType::Buffer => TextureUpdateSource::External {
id: ext_image.id,
channel_index: ext_image.channel_index,
},
},
ImageData::Raw(bytes) => {
let finish = descriptor.offset +
- descriptor.width * descriptor.format.bytes_per_pixel() +
- (descriptor.height - 1) * descriptor.compute_stride();
+ descriptor.size.width * descriptor.format.bytes_per_pixel() +
+ (descriptor.size.height - 1) * descriptor.compute_stride();
assert!(bytes.len() >= finish as usize);
TextureUpdateSource::Bytes { data: bytes }
}
};
let update_op = match dirty_rect {
Some(dirty) => {
--- a/gfx/webrender_api/src/display_item.rs
+++ b/gfx/webrender_api/src/display_item.rs
@@ -538,16 +538,17 @@ pub enum FilterOp {
DropShadow(LayoutVector2D, f32, ColorF),
ColorMatrix([f32; 20]),
}
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]
pub struct IframeDisplayItem {
pub clip_id: ClipId,
pub pipeline_id: PipelineId,
+ pub ignore_missing_pipeline: bool,
}
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]
pub struct ImageDisplayItem {
pub image_key: ImageKey,
pub stretch_size: LayoutSize,
pub tile_spacing: LayoutSize,
pub image_rendering: ImageRendering,
--- a/gfx/webrender_api/src/display_list.rs
+++ b/gfx/webrender_api/src/display_list.rs
@@ -1486,20 +1486,26 @@ impl DisplayListBuilder {
self.clip_stack.pop();
if let Some(save_state) = self.save_state.as_ref() {
assert!(self.clip_stack.len() >= save_state.clip_stack_len,
"Cannot pop clips that were pushed before the DisplayListBuilder save.");
}
assert!(!self.clip_stack.is_empty());
}
- pub fn push_iframe(&mut self, info: &LayoutPrimitiveInfo, pipeline_id: PipelineId) {
+ pub fn push_iframe(
+ &mut self,
+ info: &LayoutPrimitiveInfo,
+ pipeline_id: PipelineId,
+ ignore_missing_pipeline: bool
+ ) {
let item = SpecificDisplayItem::Iframe(IframeDisplayItem {
clip_id: self.generate_clip_id(),
pipeline_id,
+ ignore_missing_pipeline,
});
self.push_item(item, info);
}
pub fn push_shadow(&mut self, info: &LayoutPrimitiveInfo, shadow: Shadow) {
self.push_item(SpecificDisplayItem::PushShadow(shadow), info);
}
--- a/gfx/webrender_api/src/image.rs
+++ b/gfx/webrender_api/src/image.rs
@@ -1,17 +1,18 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate serde_bytes;
use font::{FontInstanceKey, FontKey, FontTemplate};
use std::sync::Arc;
-use {DevicePoint, DeviceUintRect, IdNamespace, TileOffset, TileSize};
+use {DevicePoint, DeviceUintRect, DeviceUintSize, IdNamespace, TileOffset, TileSize};
+use euclid::size2;
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub struct ImageKey(pub IdNamespace, pub u32);
impl ImageKey {
pub const DUMMY: Self = ImageKey(IdNamespace(0), 0);
@@ -68,57 +69,54 @@ impl ImageFormat {
ImageFormat::RG8 => 2,
}
}
}
#[derive(Copy, Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct ImageDescriptor {
pub format: ImageFormat,
- pub width: u32,
- pub height: u32,
+ pub size: DeviceUintSize,
pub stride: Option<u32>,
pub offset: u32,
pub is_opaque: bool,
pub allow_mipmaps: bool,
}
impl ImageDescriptor {
pub fn new(
width: u32,
height: u32,
format: ImageFormat,
is_opaque: bool,
allow_mipmaps: bool,
) -> Self {
ImageDescriptor {
- width,
- height,
+ size: size2(width, height),
format,
stride: None,
offset: 0,
is_opaque,
allow_mipmaps,
}
}
pub fn compute_stride(&self) -> u32 {
- self.stride
- .unwrap_or(self.width * self.format.bytes_per_pixel())
+ self.stride.unwrap_or(self.size.width * self.format.bytes_per_pixel())
}
pub fn compute_total_size(&self) -> u32 {
- self.compute_stride() * self.height
+ self.compute_stride() * self.size.height
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum ImageData {
Raw(#[serde(with = "serde_image_data_raw")] Arc<Vec<u8>>),
- Blob(#[serde(with = "serde_bytes")] BlobImageData),
+ Blob(#[serde(with = "serde_image_data_raw")] Arc<BlobImageData>),
External(ExternalImageData),
}
mod serde_image_data_raw {
extern crate serde_bytes;
use std::sync::Arc;
use serde::{Deserializer, Serializer};
@@ -136,18 +134,18 @@ impl ImageData {
pub fn new(bytes: Vec<u8>) -> Self {
ImageData::Raw(Arc::new(bytes))
}
pub fn new_shared(bytes: Arc<Vec<u8>>) -> Self {
ImageData::Raw(bytes)
}
- pub fn new_blob_image(commands: Vec<u8>) -> Self {
- ImageData::Blob(commands)
+ pub fn new_blob_image(commands: BlobImageData) -> Self {
+ ImageData::Blob(Arc::new(commands))
}
#[inline]
pub fn is_blob(&self) -> bool {
match *self {
ImageData::Blob(_) => true,
_ => false,
}
@@ -167,19 +165,19 @@ impl ImageData {
}
pub trait BlobImageResources {
fn get_font_data(&self, key: FontKey) -> &FontTemplate;
fn get_image(&self, key: ImageKey) -> Option<(&ImageData, &ImageDescriptor)>;
}
pub trait BlobImageRenderer: Send {
- fn add(&mut self, key: ImageKey, data: BlobImageData, tiling: Option<TileSize>);
+ fn add(&mut self, key: ImageKey, data: Arc<BlobImageData>, tiling: Option<TileSize>);
- fn update(&mut self, key: ImageKey, data: BlobImageData, dirty_rect: Option<DeviceUintRect>);
+ fn update(&mut self, key: ImageKey, data: Arc<BlobImageData>, dirty_rect: Option<DeviceUintRect>);
fn delete(&mut self, key: ImageKey);
fn request(
&mut self,
services: &BlobImageResources,
key: BlobImageRequest,
descriptor: &BlobImageDescriptor,
@@ -197,25 +195,23 @@ pub trait BlobImageRenderer: Send {
pub type BlobImageData = Vec<u8>;
pub type BlobImageResult = Result<RasterizedBlobImage, BlobImageError>;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct BlobImageDescriptor {
- pub width: u32,
- pub height: u32,
+ pub size: DeviceUintSize,
pub offset: DevicePoint,
pub format: ImageFormat,
}
pub struct RasterizedBlobImage {
- pub width: u32,
- pub height: u32,
+ pub size: DeviceUintSize,
pub data: Vec<u8>,
}
#[derive(Clone, Debug)]
pub enum BlobImageError {
Oom,
InvalidKey,
InvalidData,
--- a/gfx/webrender_bindings/revision.txt
+++ b/gfx/webrender_bindings/revision.txt
@@ -1,1 +1,1 @@
-9d20df4e76e3b19c569fd89965f70a2c278ff0c8
+672f480af48b0ad69c1b2781151278d99816763a
--- a/gfx/wrench/src/blob.rs
+++ b/gfx/wrench/src/blob.rs
@@ -4,16 +4,17 @@
// A very basic BlobImageRenderer that can only render a checkerboard pattern.
use std::collections::HashMap;
use std::sync::Arc;
use std::sync::Mutex;
use webrender::api::*;
use webrender::intersect_for_tile;
+use euclid::size2;
// Serialize/deserialize the blob.
pub fn serialize_blob(color: ColorU) -> Vec<u8> {
vec![color.r, color.g, color.b, color.a]
}
fn deserialize_blob(blob: &[u8]) -> Result<ColorU, ()> {
@@ -36,31 +37,34 @@ fn premul(x: u8, a: u8) -> u8 {
fn render_blob(
color: ColorU,
descriptor: &BlobImageDescriptor,
tile: Option<(TileSize, TileOffset)>,
dirty_rect: Option<DeviceUintRect>,
) -> BlobImageResult {
// Allocate storage for the result. Right now the resource cache expects the
// tiles to have have no stride or offset.
- let mut texels = vec![0u8; (descriptor.width * descriptor.height * descriptor.format.bytes_per_pixel()) as usize];
+ let buf_size = descriptor.size.width *
+ descriptor.size.height *
+ descriptor.format.bytes_per_pixel();
+ let mut texels = vec![0u8; (buf_size) as usize];
// Generate a per-tile pattern to see it in the demo. For a real use case it would not
// make sense for the rendered content to depend on its tile.
let tile_checker = match tile {
Some((_, tile)) => (tile.x % 2 == 0) != (tile.y % 2 == 0),
None => true,
};
let mut dirty_rect = dirty_rect.unwrap_or(DeviceUintRect::new(
DeviceUintPoint::new(0, 0),
- DeviceUintSize::new(descriptor.width, descriptor.height)));
+ DeviceUintSize::new(descriptor.size.width, descriptor.size.height)));
if let Some((tile_size, tile)) = tile {
- dirty_rect = intersect_for_tile(dirty_rect, tile_size as u32, tile_size as u32,
+ dirty_rect = intersect_for_tile(dirty_rect, size2(tile_size as u32, tile_size as u32),
tile_size, tile)
.expect("empty rects should be culled by webrender");
}
for y in dirty_rect.min_y() .. dirty_rect.max_y() {
for x in dirty_rect.min_x() .. dirty_rect.max_x() {
// Apply the tile's offset. This is important: all drawing commands should be
@@ -75,37 +79,36 @@ fn render_blob(
0
};
// ..nested in the per-tile checkerboard pattern
let tc = if tile_checker { 0 } else { (1 - checker) * 40 };
match descriptor.format {
ImageFormat::BGRA8 => {
let a = color.a * checker + tc;
- texels[((y * descriptor.width + x) * 4 + 0) as usize] = premul(color.b * checker + tc, a);
- texels[((y * descriptor.width + x) * 4 + 1) as usize] = premul(color.g * checker + tc, a);
- texels[((y * descriptor.width + x) * 4 + 2) as usize] = premul(color.r * checker + tc, a);
- texels[((y * descriptor.width + x) * 4 + 3) as usize] = a;
+ texels[((y * descriptor.size.width + x) * 4 + 0) as usize] = premul(color.b * checker + tc, a);
+ texels[((y * descriptor.size.width + x) * 4 + 1) as usize] = premul(color.g * checker + tc, a);
+ texels[((y * descriptor.size.width + x) * 4 + 2) as usize] = premul(color.r * checker + tc, a);
+ texels[((y * descriptor.size.width + x) * 4 + 3) as usize] = a;
}
ImageFormat::R8 => {
- texels[(y * descriptor.width + x) as usize] = color.a * checker + tc;
+ texels[(y * descriptor.size.width + x) as usize] = color.a * checker + tc;
}
_ => {
return Err(BlobImageError::Other(
format!("Unsupported image format {:?}", descriptor.format),
));
}
}
}
}
Ok(RasterizedBlobImage {
data: texels,
- width: descriptor.width,
- height: descriptor.height,
+ size: descriptor.size,
})
}
pub struct BlobCallbacks {
pub request: Box<Fn(&BlobImageRequest) + Send + 'static>,
pub resolve: Box<Fn() + Send + 'static>,
}
@@ -129,22 +132,22 @@ impl CheckerboardRenderer {
callbacks,
image_cmds: HashMap::new(),
rendered_images: HashMap::new(),
}
}
}
impl BlobImageRenderer for CheckerboardRenderer {
- fn add(&mut self, key: ImageKey, cmds: BlobImageData, tile_size: Option<TileSize>) {
+ fn add(&mut self, key: ImageKey, cmds: Arc<BlobImageData>, tile_size: Option<TileSize>) {
self.image_cmds
.insert(key, (deserialize_blob(&cmds[..]).unwrap(), tile_size));
}
- fn update(&mut self, key: ImageKey, cmds: BlobImageData, _dirty_rect: Option<DeviceUintRect>) {
+ fn update(&mut self, key: ImageKey, cmds: Arc<BlobImageData>, _dirty_rect: Option<DeviceUintRect>) {
// Here, updating is just replacing the current version of the commands with
// the new one (no incremental updates).
self.image_cmds.get_mut(&key).unwrap().0 = deserialize_blob(&cmds[..]).unwrap();
}
fn delete(&mut self, key: ImageKey) {
self.image_cmds.remove(&key);
}
--- a/gfx/wrench/src/json_frame_writer.rs
+++ b/gfx/wrench/src/json_frame_writer.rs
@@ -111,40 +111,40 @@ impl JsonFrameWriter {
file.write_all(b"\n").unwrap();
}
fn update_resources(&mut self, updates: &ResourceUpdates) {
for update in &updates.updates {
match *update {
ResourceUpdate::AddImage(ref img) => {
let stride = img.descriptor.stride.unwrap_or(
- img.descriptor.width * img.descriptor.format.bytes_per_pixel(),
+ img.descriptor.size.width * img.descriptor.format.bytes_per_pixel(),
);
let bytes = match img.data {
ImageData::Raw(ref v) => (**v).clone(),
ImageData::External(_) | ImageData::Blob(_) => {
return;
}
};
self.images.insert(
img.key,
CachedImage {
- width: img.descriptor.width,
- height: img.descriptor.height,
+ width: img.descriptor.size.width,
+ height: img.descriptor.size.height,
stride,
format: img.descriptor.format,
bytes: Some(bytes),
path: None,
},
);
}
ResourceUpdate::UpdateImage(ref img) => {
if let Some(ref mut data) = self.images.get_mut(&img.key) {
- assert_eq!(data.width, img.descriptor.width);
- assert_eq!(data.height, img.descriptor.height);
+ assert_eq!(data.width, img.descriptor.size.width);
+ assert_eq!(data.height, img.descriptor.size.height);
assert_eq!(data.format, img.descriptor.format);
if let ImageData::Raw(ref bytes) = img.data {
data.path = None;
data.bytes = Some((**bytes).clone());
} else {
// Other existing image types only make sense within the gecko integration.
println!(
--- a/gfx/wrench/src/rawtest.rs
+++ b/gfx/wrench/src/rawtest.rs
@@ -39,16 +39,17 @@ impl<'a> RawtestHarness<'a> {
}
pub fn run(mut self) {
self.test_hit_testing();
self.test_retained_blob_images_test();
self.test_blob_update_test();
self.test_blob_update_epoch_test();
self.test_tile_decomposition();
+ self.test_very_large_blob();
self.test_save_restore();
self.test_capture();
}
fn render_and_get_pixels(&mut self, window_rect: DeviceUintRect) -> Vec<u8> {
self.rx.recv().unwrap();
self.wrench.render();
self.wrench.renderer.read_pixels_rgba8(window_rect)
@@ -58,16 +59,17 @@ impl<'a> RawtestHarness<'a> {
&mut self,
epoch: &mut Epoch,
layout_size: LayoutSize,
builder: DisplayListBuilder,
resources: Option<ResourceUpdates>
) {
let mut txn = Transaction::new();
let root_background_color = Some(ColorF::new(1.0, 1.0, 1.0, 1.0));
+ txn.use_scene_builder_thread();
if let Some(resources) = resources {
txn.update_resources(resources);
}
txn.set_display_list(
*epoch,
root_background_color,
layout_size,
builder.finalize(),
@@ -116,16 +118,99 @@ impl<'a> RawtestHarness<'a> {
// Leaving a tiled blob image in the resource cache
// confuses the `test_capture`. TODO: remove this
resources = ResourceUpdates::new();
resources.delete_image(blob_img);
self.wrench.api.update_resources(resources);
}
+ fn test_very_large_blob(&mut self) {
+ println!("\tvery large blob...");
+
+ assert_eq!(self.wrench.device_pixel_ratio, 1.);
+
+ let window_size = self.window.get_inner_size();
+
+ let test_size = DeviceUintSize::new(800, 800);
+
+ let window_rect = DeviceUintRect::new(
+ DeviceUintPoint::new(0, window_size.height - test_size.height),
+ test_size,
+ );
+
+ // This exposes a crash in tile decomposition
+ let layout_size = LayoutSize::new(800., 800.);
+ let mut resources = ResourceUpdates::new();
+
+ let blob_img = self.wrench.api.generate_image_key();
+ resources.add_image(
+ blob_img,
+ ImageDescriptor::new(1510, 111256, ImageFormat::BGRA8, false, false),
+ ImageData::new_blob_image(blob::serialize_blob(ColorU::new(50, 50, 150, 255))),
+ Some(31),
+ );
+
+ let mut builder = DisplayListBuilder::new(self.wrench.root_pipeline_id, layout_size);
+
+ let info = LayoutPrimitiveInfo::new(rect(0., -9600.0, 1510.000031, 111256.));
+
+ let image_size = size(1510., 111256.);
+
+ let clip_id = builder.define_clip(rect(40., 41., 200., 201.), vec![], None);
+
+ builder.push_clip_id(clip_id);
+ // setup some malicious image size parameters
+ builder.push_image(
+ &info,
+ image_size * 2.,
+ image_size,
+ ImageRendering::Auto,
+ AlphaType::PremultipliedAlpha,
+ blob_img,
+ );
+ builder.pop_clip_id();
+
+ let mut epoch = Epoch(0);
+
+ self.submit_dl(&mut epoch, layout_size, builder, Some(resources));
+
+ let called = Arc::new(AtomicIsize::new(0));
+ let called_inner = Arc::clone(&called);
+
+ self.wrench.callbacks.lock().unwrap().request = Box::new(move |_| {
+ called_inner.fetch_add(1, Ordering::SeqCst);
+ });
+
+ let pixels = self.render_and_get_pixels(window_rect);
+
+ // make sure we didn't request too many blobs
+ assert_eq!(called.load(Ordering::SeqCst), 16);
+
+ // make sure things are in the right spot
+ assert!(
+ pixels[(148 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4] == 255 &&
+ pixels[(148 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4 + 1] == 255 &&
+ pixels[(148 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4 + 2] == 255 &&
+ pixels[(148 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4 + 3] == 255
+ );
+ assert!(
+ pixels[(132 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4] == 50 &&
+ pixels[(132 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4 + 1] == 50 &&
+ pixels[(132 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4 + 2] == 150 &&
+ pixels[(132 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4 + 3] == 255
+ );
+
+ // Leaving a tiled blob image in the resource cache
+ // confuses the `test_capture`. TODO: remove this
+ resources = ResourceUpdates::new();
+ resources.delete_image(blob_img);
+ self.wrench.api.update_resources(resources);
+ }
+
fn test_retained_blob_images_test(&mut self) {
println!("\tretained blob images test...");
let blob_img;
let window_size = self.window.get_inner_size();
let test_size = DeviceUintSize::new(400, 400);
let window_rect = DeviceUintRect::new(
--- a/gfx/wrench/src/ron_frame_writer.rs
+++ b/gfx/wrench/src/ron_frame_writer.rs
@@ -95,28 +95,28 @@ impl RonFrameWriter {
ImageData::Raw(ref v) => (**v).clone(),
ImageData::External(_) | ImageData::Blob(_) => {
return;
}
};
self.images.insert(
img.key,
CachedImage {
- width: img.descriptor.width,
- height: img.descriptor.height,
+ width: img.descriptor.size.width,
+ height: img.descriptor.size.height,
format: img.descriptor.format,
bytes: Some(bytes),
path: None,
},
);
}
ResourceUpdate::UpdateImage(ref img) => {
if let Some(ref mut data) = self.images.get_mut(&img.key) {
- assert_eq!(data.width, img.descriptor.width);
- assert_eq!(data.height, img.descriptor.height);
+ assert_eq!(data.width, img.descriptor.size.width);
+ assert_eq!(data.height, img.descriptor.size.height);
assert_eq!(data.format, img.descriptor.format);
if let ImageData::Raw(ref bytes) = img.data {
data.path = None;
data.bytes = Some((**bytes).clone());
} else {
// Other existing image types only make sense within the gecko integration.
println!(
--- a/gfx/wrench/src/wrench.rs
+++ b/gfx/wrench/src/wrench.rs
@@ -139,17 +139,17 @@ impl WrenchThing for CapturedDocument {
}
}
0
}
}
pub struct Wrench {
window_size: DeviceUintSize,
- device_pixel_ratio: f32,
+ pub device_pixel_ratio: f32,
page_zoom_factor: ZoomFactor,
pub renderer: webrender::Renderer,
pub api: RenderApi,
pub document_id: DocumentId,
pub root_pipeline_id: PipelineId,
window_title_to_set: Option<String>,
--- a/gfx/wrench/src/yaml_frame_reader.rs
+++ b/gfx/wrench/src/yaml_frame_reader.rs
@@ -511,17 +511,17 @@ impl YamlFrameReader {
};
let tiling = tiling.map(|tile_size| tile_size as u16);
let image_key = wrench.api.generate_image_key();
let mut resources = ResourceUpdates::new();
resources.add_image(image_key, descriptor, image_data, tiling);
wrench.api.update_resources(resources);
let val = (
image_key,
- LayoutSize::new(descriptor.width as f32, descriptor.height as f32),
+ LayoutSize::new(descriptor.size.width as f32, descriptor.size.height as f32),
);
self.image_map.insert(key, val);
val
}
fn get_or_create_font(&mut self, desc: FontDescriptor, wrench: &mut Wrench) -> FontKey {
let list_resources = self.list_resources;
*self.fonts
@@ -1238,17 +1238,18 @@ impl YamlFrameReader {
fn handle_iframe(
&mut self,
dl: &mut DisplayListBuilder,
item: &Yaml,
info: &mut LayoutPrimitiveInfo,
) {
info.rect = item["bounds"].as_rect().expect("iframe must have bounds");
let pipeline_id = item["id"].as_pipeline_id().unwrap();
- dl.push_iframe(&info, pipeline_id);
+ let ignore = item["ignore_missing_pipeline"].as_bool().unwrap_or(true);
+ dl.push_iframe(&info, pipeline_id, ignore);
}
pub fn get_complex_clip_for_item(&mut self, yaml: &Yaml) -> Option<ComplexClipRegion> {
let complex_clip = &yaml["complex-clip"];
if complex_clip.is_badvalue() {
return None;
}
Some(self.to_complex_clip_region(complex_clip))
--- a/gfx/wrench/src/yaml_frame_writer.rs
+++ b/gfx/wrench/src/yaml_frame_writer.rs
@@ -509,41 +509,41 @@ impl YamlFrameWriter {
ResourceUpdate::AddImage(ref img) => {
if let Some(ref data) = self.images.get(&img.key) {
if data.path.is_some() {
return;
}
}
let stride = img.descriptor.stride.unwrap_or(
- img.descriptor.width * img.descriptor.format.bytes_per_pixel(),
+ img.descriptor.size.width * img.descriptor.format.bytes_per_pixel(),
);
let bytes = match img.data {
ImageData::Raw(ref v) => (**v).clone(),
ImageData::External(_) | ImageData::Blob(_) => {
return;
}
};
self.images.insert(
img.key,
CachedImage {
- width: img.descriptor.width,
- height: img.descriptor.height,
+ width: img.descriptor.size.width,
+ height: img.descriptor.size.height,
stride,
format: img.descriptor.format,
bytes: Some(bytes),
tiling: img.tiling,
path: None,
},
);
}
ResourceUpdate::UpdateImage(ref img) => {
if let Some(ref mut data) = self.images.get_mut(&img.key) {
- assert_eq!(data.width, img.descriptor.width);
- assert_eq!(data.height, img.descriptor.height);
+ assert_eq!(data.width, img.descriptor.size.width);
+ assert_eq!(data.height, img.descriptor.size.height);
assert_eq!(data.format, img.descriptor.format);
if let ImageData::Raw(ref bytes) = img.data {
data.path = None;
data.bytes = Some((**bytes).clone());
} else {
// Other existing image types only make sense within the gecko integration.
println!(
@@ -1008,16 +1008,17 @@ impl YamlFrameWriter {
&item.gradient,
base.gradient_stops(),
display_list
);
}
Iframe(item) => {
str_node(&mut v, "type", "iframe");
u32_vec_node(&mut v, "id", &[item.pipeline_id.0, item.pipeline_id.1]);
+ bool_node(&mut v, "ignore_missing_pipeline", item.ignore_missing_pipeline);
}
PushStackingContext(item) => {
str_node(&mut v, "type", "stacking-context");
let filters = display_list.get(base.filters());
write_stacking_context(
&mut v,
&item.stacking_context,
&scene.properties,