session-lock: fix potential race
Currently the session lock client has no 100% safe way to know when it is safe to suspend after requesting that the session be locked. For a suspend to be safe the compositor must have either blanked or rendered a lock surface on all outputs before suspending. This is because the current framebuffer on suspend appears to be saved and displayed again after suspend, at least on my Linux system. If a new "locked" frame for all outputs is not rendered before suspend, an "unlocked" frame or frames will likely be briefly displayed on resume before the lock surfaces are rendered or the screen is blanked. To fix this, wait until a lock surface has been rendered on all outputs, or if that times out until all outputs have been blanked, before sending the locked event to the client. Resolving this race on the compositor side without protocol changes is the most effective way to avoid this potential information leak, regardless of which session lock client is used.
This commit is contained in:
parent
5d4c2f2fbd
commit
8f8d94aa45
@ -352,7 +352,7 @@ fn updateKeyboardFocus(self: Self, result: SurfaceAtResult) void {
|
||||
}
|
||||
},
|
||||
.lock_surface => |lock_surface| {
|
||||
assert(server.lock_manager.locked);
|
||||
assert(server.lock_manager.state != .unlocked);
|
||||
self.seat.setFocusRaw(.{ .lock_surface = lock_surface });
|
||||
},
|
||||
.xwayland_override_redirect => |override_redirect| {
|
||||
@ -664,7 +664,7 @@ fn surfaceAtCoords(lx: f64, ly: f64) ?SurfaceAtResult {
|
||||
var oy = ly;
|
||||
server.root.output_layout.outputCoords(wlr_output, &ox, &oy);
|
||||
|
||||
if (server.lock_manager.locked) {
|
||||
if (server.lock_manager.state != .unlocked) {
|
||||
if (output.lock_surface) |lock_surface| {
|
||||
var sx: f64 = undefined;
|
||||
var sy: f64 = undefined;
|
||||
@ -1083,7 +1083,7 @@ fn shouldPassthrough(self: Self) bool {
|
||||
return false;
|
||||
},
|
||||
.resize, .move => {
|
||||
assert(!server.lock_manager.locked);
|
||||
assert(server.lock_manager.state == .unlocked);
|
||||
const target = if (self.mode == .resize) self.mode.resize.view else self.mode.move.view;
|
||||
// The target view is no longer visible, is part of the layout, or is fullscreen.
|
||||
return target.current.tags & target.output.current.tags == 0 or
|
||||
@ -1098,7 +1098,7 @@ fn passthrough(self: *Self, time: u32) void {
|
||||
assert(self.mode == .passthrough);
|
||||
|
||||
if (self.surfaceAt()) |result| {
|
||||
assert((result.parent == .lock_surface) == server.lock_manager.locked);
|
||||
assert((result.parent == .lock_surface) == (server.lock_manager.state != .unlocked));
|
||||
self.seat.wlr_seat.pointerNotifyEnter(result.surface, result.sx, result.sy);
|
||||
self.seat.wlr_seat.pointerNotifyMotion(time, result.sx, result.sy);
|
||||
} else {
|
||||
|
@ -28,9 +28,28 @@ const LockSurface = @import("LockSurface.zig");
|
||||
|
||||
const log = std.log.scoped(.session_lock);
|
||||
|
||||
locked: bool = false,
|
||||
state: enum {
|
||||
/// No lock request has been made and the session is unlocked.
|
||||
unlocked,
|
||||
/// A lock request has been made and river is waiting for all outputs to have
|
||||
/// rendered a lock surface before sending the locked event.
|
||||
waiting_for_lock_surfaces,
|
||||
/// A lock request has been made but waiting for a lock surface to be rendered
|
||||
/// on all outputs timed out. Now river is waiting only for all outputs to at
|
||||
/// least be blanked before sending the locked event.
|
||||
waiting_for_blank,
|
||||
/// All outputs are either blanked or have a lock surface rendered and the
|
||||
/// locked event has been sent.
|
||||
locked,
|
||||
} = .unlocked,
|
||||
lock: ?*wlr.SessionLockV1 = null,
|
||||
|
||||
/// Limit on how long the locked event will be delayed to wait for
|
||||
/// lock surfaces to be created and rendered. If this times out, then
|
||||
/// the locked event will be sent immediately after all outputs have
|
||||
/// been blanked.
|
||||
lock_surfaces_timer: *wl.EventSource,
|
||||
|
||||
new_lock: wl.Listener(*wlr.SessionLockV1) = wl.Listener(*wlr.SessionLockV1).init(handleLock),
|
||||
unlock: wl.Listener(void) = wl.Listener(void).init(handleUnlock),
|
||||
destroy: wl.Listener(void) = wl.Listener(void).init(handleDestroy),
|
||||
@ -38,7 +57,14 @@ new_surface: wl.Listener(*wlr.SessionLockSurfaceV1) =
|
||||
wl.Listener(*wlr.SessionLockSurfaceV1).init(handleSurface),
|
||||
|
||||
pub fn init(manager: *LockManager) !void {
|
||||
manager.* = .{};
|
||||
const event_loop = server.wl_server.getEventLoop();
|
||||
const timer = try event_loop.addTimer(*LockManager, handleLockSurfacesTimeout, manager);
|
||||
errdefer timer.remove();
|
||||
|
||||
manager.* = .{
|
||||
.lock_surfaces_timer = timer,
|
||||
};
|
||||
|
||||
const wlr_manager = try wlr.SessionLockManagerV1.create(server.wl_server);
|
||||
wlr_manager.events.new_lock.add(&manager.new_lock);
|
||||
}
|
||||
@ -47,6 +73,8 @@ pub fn deinit(manager: *LockManager) void {
|
||||
// deinit() should only be called after wl.Server.destroyClients()
|
||||
assert(manager.lock == null);
|
||||
|
||||
manager.lock_surfaces_timer.remove();
|
||||
|
||||
manager.new_lock.link.remove();
|
||||
}
|
||||
|
||||
@ -60,24 +88,32 @@ fn handleLock(listener: *wl.Listener(*wlr.SessionLockV1), lock: *wlr.SessionLock
|
||||
}
|
||||
|
||||
manager.lock = lock;
|
||||
lock.sendLocked();
|
||||
|
||||
if (!manager.locked) {
|
||||
manager.locked = true;
|
||||
if (manager.state == .unlocked) {
|
||||
manager.state = .waiting_for_lock_surfaces;
|
||||
|
||||
var it = server.input_manager.seats.first;
|
||||
while (it) |node| : (it = node.next) {
|
||||
const seat = &node.data;
|
||||
seat.setFocusRaw(.none);
|
||||
seat.cursor.updateState();
|
||||
manager.lock_surfaces_timer.timerUpdate(200) catch {
|
||||
log.err("error setting lock surfaces timer, imperfect frames may be shown", .{});
|
||||
manager.state = .waiting_for_blank;
|
||||
};
|
||||
|
||||
// Enter locked mode
|
||||
seat.prev_mode_id = seat.mode_id;
|
||||
seat.enterMode(1);
|
||||
{
|
||||
var it = server.input_manager.seats.first;
|
||||
while (it) |node| : (it = node.next) {
|
||||
const seat = &node.data;
|
||||
seat.setFocusRaw(.none);
|
||||
seat.cursor.updateState();
|
||||
|
||||
// Enter locked mode
|
||||
seat.prev_mode_id = seat.mode_id;
|
||||
seat.enterMode(1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (manager.state == .locked) {
|
||||
lock.sendLocked();
|
||||
}
|
||||
|
||||
log.info("session locked", .{});
|
||||
} else {
|
||||
log.info("new session lock client given control of already locked session", .{});
|
||||
}
|
||||
|
||||
@ -86,11 +122,71 @@ fn handleLock(listener: *wl.Listener(*wlr.SessionLockV1), lock: *wlr.SessionLock
|
||||
lock.events.destroy.add(&manager.destroy);
|
||||
}
|
||||
|
||||
fn handleLockSurfacesTimeout(manager: *LockManager) callconv(.C) c_int {
|
||||
log.err("waiting for lock surfaces timed out, imperfect frames may be shown", .{});
|
||||
|
||||
assert(manager.state == .waiting_for_lock_surfaces);
|
||||
manager.state = .waiting_for_blank;
|
||||
|
||||
{
|
||||
var it = server.root.outputs.first;
|
||||
while (it) |node| : (it = node.next) {
|
||||
const output = &node.data;
|
||||
if (output.lock_render_state == .unlocked) {
|
||||
output.damage.?.addWhole();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
pub fn maybeLock(manager: *LockManager) void {
|
||||
var all_outputs_blanked = true;
|
||||
var all_outputs_rendered_lock_surface = true;
|
||||
{
|
||||
var it = server.root.outputs.first;
|
||||
while (it) |node| : (it = node.next) {
|
||||
const output = &node.data;
|
||||
switch (output.lock_render_state) {
|
||||
.unlocked => {
|
||||
all_outputs_blanked = false;
|
||||
all_outputs_rendered_lock_surface = false;
|
||||
},
|
||||
.blanked => {
|
||||
all_outputs_rendered_lock_surface = false;
|
||||
},
|
||||
.lock_surface => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch (manager.state) {
|
||||
.waiting_for_lock_surfaces => if (all_outputs_rendered_lock_surface) {
|
||||
log.info("session locked", .{});
|
||||
manager.lock.?.sendLocked();
|
||||
manager.state = .locked;
|
||||
manager.lock_surfaces_timer.timerUpdate(0) catch {};
|
||||
},
|
||||
.waiting_for_blank => if (all_outputs_blanked) {
|
||||
log.info("session locked", .{});
|
||||
manager.lock.?.sendLocked();
|
||||
manager.state = .locked;
|
||||
},
|
||||
.unlocked, .locked => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn handleUnlock(listener: *wl.Listener(void)) void {
|
||||
const manager = @fieldParentPtr(LockManager, "unlock", listener);
|
||||
|
||||
assert(manager.locked);
|
||||
manager.locked = false;
|
||||
// TODO(wlroots): this will soon be handled by the wlroots session lock implementation
|
||||
if (manager.state != .locked) {
|
||||
manager.lock.?.resource.postError(.invalid_unlock, "the locked event was never sent");
|
||||
return;
|
||||
}
|
||||
|
||||
manager.state = .unlocked;
|
||||
|
||||
log.info("session unlocked", .{});
|
||||
|
||||
@ -120,6 +216,10 @@ fn handleDestroy(listener: *wl.Listener(void)) void {
|
||||
manager.destroy.link.remove();
|
||||
|
||||
manager.lock = null;
|
||||
if (manager.state == .waiting_for_lock_surfaces) {
|
||||
manager.state = .waiting_for_blank;
|
||||
manager.lock_surfaces_timer.timerUpdate(0) catch {};
|
||||
}
|
||||
}
|
||||
|
||||
fn handleSurface(
|
||||
@ -130,7 +230,7 @@ fn handleSurface(
|
||||
|
||||
log.debug("new ext_session_lock_surface_v1 created", .{});
|
||||
|
||||
assert(manager.locked);
|
||||
assert(manager.state != .unlocked);
|
||||
assert(manager.lock != null);
|
||||
|
||||
LockSurface.create(wlr_lock_surface, manager.lock.?);
|
||||
|
@ -69,6 +69,11 @@ usable_box: wlr.Box,
|
||||
views: ViewStack(View) = .{},
|
||||
|
||||
lock_surface: ?*LockSurface = null,
|
||||
lock_render_state: enum {
|
||||
unlocked,
|
||||
blanked,
|
||||
lock_surface,
|
||||
} = .unlocked,
|
||||
|
||||
/// The double-buffered state of the output.
|
||||
current: State = State{ .tags = 1 << 0 },
|
||||
|
@ -151,7 +151,7 @@ pub fn focus(self: *Self, _target: ?*View) void {
|
||||
var target = _target;
|
||||
|
||||
// Views may not recieve focus while locked.
|
||||
if (server.lock_manager.locked) return;
|
||||
if (server.lock_manager.state != .unlocked) return;
|
||||
|
||||
// While a layer surface is focused, views may not recieve focus
|
||||
if (self.focused == .layer) return;
|
||||
@ -242,17 +242,17 @@ pub fn setFocusRaw(self: *Self, new_focus: FocusTarget) void {
|
||||
// Set the new focus
|
||||
switch (new_focus) {
|
||||
.view => |target_view| {
|
||||
assert(!server.lock_manager.locked);
|
||||
assert(server.lock_manager.state == .unlocked);
|
||||
assert(self.focused_output == target_view.output);
|
||||
if (target_view.pending.focus == 0) target_view.setActivated(true);
|
||||
target_view.pending.focus += 1;
|
||||
target_view.pending.urgent = false;
|
||||
},
|
||||
.layer => |target_layer| {
|
||||
assert(!server.lock_manager.locked);
|
||||
assert(server.lock_manager.state == .unlocked);
|
||||
assert(self.focused_output == target_layer.output);
|
||||
},
|
||||
.lock_surface => assert(server.lock_manager.locked),
|
||||
.lock_surface => assert(server.lock_manager.state != .unlocked),
|
||||
.xwayland_override_redirect, .none => {},
|
||||
}
|
||||
self.focused = new_focus;
|
||||
|
@ -63,7 +63,13 @@ pub fn renderOutput(output: *Output) void {
|
||||
|
||||
server.renderer.begin(@intCast(u32, output.wlr_output.width), @intCast(u32, output.wlr_output.height));
|
||||
|
||||
if (server.lock_manager.locked) {
|
||||
// In order to avoid flashing a blank black screen as the session is locked
|
||||
// continue to render the unlocked session until either a lock surface is
|
||||
// created or waiting for lock surfaces times out.
|
||||
if (server.lock_manager.state == .locked or
|
||||
(server.lock_manager.state == .waiting_for_lock_surfaces and output.lock_surface != null) or
|
||||
server.lock_manager.state == .waiting_for_blank)
|
||||
{
|
||||
server.renderer.clear(&[_]f32{ 0, 0, 0, 1 }); // solid black
|
||||
|
||||
// TODO: this isn't frame-perfect if the output mode is changed. We
|
||||
@ -87,10 +93,19 @@ pub fn renderOutput(output: *Output) void {
|
||||
|
||||
output.wlr_output.renderSoftwareCursors(null);
|
||||
server.renderer.end();
|
||||
output.wlr_output.commit() catch
|
||||
output.wlr_output.commit() catch {
|
||||
log.err("output commit failed for {s}", .{output.wlr_output.name});
|
||||
return;
|
||||
};
|
||||
|
||||
output.lock_render_state = if (output.lock_surface != null) .lock_surface else .blanked;
|
||||
if (server.lock_manager.state != .locked) {
|
||||
server.lock_manager.maybeLock();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
output.lock_render_state = .unlocked;
|
||||
|
||||
// Find the first visible fullscreen view in the stack if there is one
|
||||
var it = ViewStack(View).iter(output.views.first, .forward, output.current.tags, renderFilter);
|
||||
|
Loading…
Reference in New Issue
Block a user