Compare commits

9 Commits

Author SHA1 Message Date
0a148369aa Merge branch 'main' of https://codeberg.org/river/river-classic 2026-01-08 17:06:11 -08:00
5885fb79db river: support wl_shm version 2
This was a typo
2026-01-08 19:14:22 +00:00
21142a8f7c build: bump version to 0.3.15-dev 2026-01-07 04:39:16 +00:00
1787792560 build: bump version to 0.3.14 2026-01-07 04:37:34 +00:00
a46c0df7ed Server: fix wlroots assertion failure on GPU reset 2026-01-06 21:00:44 +00:00
96b54bde86 Merge branch 'main' of git.zander.im:Zander671/river 2025-12-30 16:08:45 -08:00
f6fd7655e2 Merge branch 'main' of https://codeberg.org/river/river-classic 2025-12-30 16:07:48 -08:00
df131f9a9d LayerSurface: don't configure in response to unmap
We should wait until the next "initial commit" before
sending a configure after a layer surface unmaps itself.
2025-12-20 18:40:30 +00:00
d38d4105c4 docs: update wiki link 2025-12-18 23:37:34 +00:00
4 changed files with 35 additions and 9 deletions

View File

@ -8,7 +8,7 @@ that are happy with how river 0.3 works and do not wish to deal with the majorly
breaking changes planned for the river 0.4.0 release.
Join us at [#river](https://web.libera.chat/?channels=#river) on irc.libera.chat —
Read our man pages, [wiki](https://codeberg.org/river/wiki), and
Read our man pages, [wiki](https://codeberg.org/river/wiki-classic), and
[Code of Conduct](CODE_OF_CONDUCT.md)
The main repository is on [codeberg](https://codeberg.org/river/river-classic),

View File

@ -5,7 +5,7 @@
// When a release is tagged, the "-dev" suffix should be removed for the
// commit that gets tagged. Directly after the tagged commit, the version
// should be bumped and the "-dev" suffix added.
.version = "0.3.14-dev",
.version = "0.3.15-dev",
.paths = .{""},
.dependencies = .{
.pixman = .{

View File

@ -366,7 +366,11 @@ fn sendLayerConfigures(
if (@as(?*SceneNodeData, @ptrCast(@alignCast(node.data)))) |node_data| {
const layer_surface = node_data.data.layer_surface;
if (!layer_surface.wlr_layer_surface.initialized) continue;
if (!layer_surface.wlr_layer_surface.surface.mapped and
!layer_surface.wlr_layer_surface.initial_commit)
{
continue;
}
const exclusive = layer_surface.wlr_layer_surface.current.exclusive_zone > 0;
if (exclusive != (mode == .exclusive)) {

View File

@ -57,6 +57,7 @@ session: ?*wlr.Session,
renderer: *wlr.Renderer,
allocator: *wlr.Allocator,
gpu_reset_recover: ?*wl.EventSource = null,
security_context_manager: *wlr.SecurityContextManagerV1,
@ -143,7 +144,7 @@ pub fn init(server: *Server, runtime_xwayland: bool) !void {
.security_context_manager = try wlr.SecurityContextManagerV1.create(wl_server),
.shm = try wlr.Shm.createWithRenderer(wl_server, 1, renderer),
.shm = try wlr.Shm.createWithRenderer(wl_server, 2, renderer),
.single_pixel_buffer_manager = try wlr.SinglePixelBufferManagerV1.create(wl_server),
.viewporter = try wlr.Viewporter.create(wl_server),
@ -373,19 +374,40 @@ fn terminate(_: c_int, wl_server: *wl.Server) c_int {
fn handleRendererLost(listener: *wl.Listener(void)) void {
const server: *Server = @fieldParentPtr("renderer_lost", listener);
if (server.gpu_reset_recover != null) {
log.info("ignoring GPU reset event, recovery already scheduled", .{});
return;
}
log.info("received GPU reset event, scheduling recovery", .{});
// There's a design wart in this wlroots API: calling wlr_renderer_destroy()
// from inside this listener for the renderer lost event causes the assertion
// that all listener lists are empty in wlr_renderer_destroy() to fail. This
// happens even if river has already called server.renderer_lost.link.remove()
// since wlroots uses wl_signal_emit_mutable(), which is implemented by adding
// temporary links to the list during iteration.
// Using an idle callback is the most straightforward way to work around this
// design wart.
const event_loop = server.wl_server.getEventLoop();
server.gpu_reset_recover = event_loop.addIdle(*Server, gpuResetRecoverIdle, server) catch |err| switch (err) {
error.OutOfMemory => {
log.err("out of memory", .{});
return;
},
};
}
log.info("recovering from GPU reset", .{});
fn gpuResetRecoverIdle(server: *Server) void {
server.gpu_reset_recover = null;
// There's not much that can be done if creating a new renderer or allocator fails.
// With luck there might be another GPU reset after which we try again and succeed.
server.recoverFromGpuReset() catch |err| switch (err) {
server.gpuResetRecover() catch |err| switch (err) {
error.RendererCreateFailed => log.err("failed to create new renderer after GPU reset", .{}),
error.AllocatorCreateFailed => log.err("failed to create new allocator after GPU reset", .{}),
};
}
fn recoverFromGpuReset(server: *Server) !void {
fn gpuResetRecover(server: *Server) !void {
log.info("recovering from GPU reset", .{});
const new_renderer = try wlr.Renderer.autocreate(server.backend);
errdefer new_renderer.destroy();