DRM: multi-plane support via libliftoff.

Import libliftoff as subproject and plumb our planes through it.

Wait for a flip to complete immediately after queuing it for now.

Also immediately wait for cursor image uploads, as we don't have to
do Vulkan rendering anymore after queuing an upload.

Cursor shows up with format hack, but is badly corrupt. We might need
to use a DRM dumb_fb rather than going through the same Vulkan image.

Alpha bits of surfaces are ignored again, need to make Steam select
an ARGB visual for overlay windows, probably.

Setting up the "alpha" plane property somehow makes overlays not
show up at all despite liftoff happily letting us go with scanout.
This commit is contained in:
Pierre-Loup A. Griffais 2020-01-05 20:22:27 -08:00
parent a8d96fb227
commit 6b3e2ddf68
8 changed files with 205 additions and 133 deletions

3
.gitmodules vendored
View file

@ -1,3 +1,6 @@
[submodule "subprojects/wlroots"]
path = subprojects/wlroots
url = https://github.com/Plagman/wlroots.git
[submodule "subprojects/libliftoff"]
path = subprojects/libliftoff
url = https://github.com/emersion/libliftoff.git

View file

@ -40,6 +40,9 @@ spirv_shader = custom_target('shader_target',
install : false,
)
liftoff_proj = subproject('libliftoff')
libftoff_dep = liftoff_proj.get_variable('liftoff')
executable(
'steamcompmgr',
'src/steamcompmgr.c',
@ -52,7 +55,7 @@ executable(
dep_x11, dep_xdamage, dep_xcomposite, dep_xrender, dep_xext,
dep_xxf86vm, pixman_dep, drm_dep, wayland_server, wayland_protos,
libinput, xkbcommon, math, thread_dep, sdl_dep, wlroots_static_dep,
vulkan_dep
vulkan_dep, libftoff_dep
],
install: true,
)

View file

@ -187,17 +187,17 @@ static int get_plane_id(struct drm_t *drm)
static void page_flip_handler(int fd, unsigned int frame,
unsigned int sec, unsigned int usec, void *data)
{
uint32_t fbid = (uint32_t)(uint64_t)data;
static uint32_t previous_fbid = 0;
// TODO: get the fbids_in_req instance from data if we ever have more than one in flight
if ( s_drm_log != 0 )
{
printf("page_flip_handler %u\n", fbid);
printf("page_flip_handler %p\n", data);
}
if ( previous_fbid != 0 )
for ( uint32_t i = 0; i < g_DRM.fbids_on_screen.size(); i++ )
{
uint32_t previous_fbid = g_DRM.fbids_on_screen[ i ];
assert( previous_fbid != 0 );
assert( g_DRM.map_fbid_inflightflips[ previous_fbid ].second > 0 );
g_DRM.map_fbid_inflightflips[ previous_fbid ].second--;
@ -223,8 +223,17 @@ static void page_flip_handler(int fd, unsigned int frame,
}
}
}
previous_fbid = fbid;
g_DRM.fbids_on_screen.clear();
for ( uint32_t i = 0; i < g_DRM.fbids_in_req.size(); i++ )
{
g_DRM.fbids_on_screen.push_back( g_DRM.fbids_in_req[ i ] );
}
g_DRM.fbids_in_req.clear();
g_DRM.flip_lock.unlock();
}
void flip_handler_thread_run(void)
@ -372,6 +381,7 @@ int init_drm(struct drm_t *drm, const char *device, const char *mode_str, unsign
drm->connector_id = connector->connector_id;
drmSetClientCap(drm->fd, DRM_CLIENT_CAP_ATOMIC, 1);
drmSetClientCap(drm->fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
drm->plane_id = get_plane_id( &g_DRM );
@ -436,6 +446,17 @@ int init_drm(struct drm_t *drm, const char *device, const char *mode_str, unsign
g_nOutputHeight = drm->mode->hdisplay;
}
drm->lo_device = liftoff_device_create( drm->fd );
drm->lo_output = liftoff_output_create( drm->lo_device, drm->crtc_id );
assert( drm->lo_device && drm->lo_output );
for ( int i = 0; i < k_nMaxLayers; i++ )
{
drm->lo_layers[ i ] = liftoff_layer_create( drm->lo_output );
assert( drm->lo_layers[ i ] );
}
return 0;
}
@ -485,117 +506,27 @@ static int add_crtc_property(struct drm_t *drm, drmModeAtomicReq *req,
return drmModeAtomicAddProperty(req, obj_id, prop_id, value);
}
static int add_plane_property(struct drm_t *drm, drmModeAtomicReq *req,
uint32_t obj_id, const char *name,
uint64_t value)
{
struct plane *obj = drm->plane;
unsigned int i;
int prop_id = -1;
for (i = 0 ; i < obj->props->count_props ; i++) {
if (strcmp(obj->props_info[i]->name, name) == 0) {
prop_id = obj->props_info[i]->prop_id;
break;
}
}
if (prop_id < 0) {
printf("no plane property: %s\n", name);
return -EINVAL;
}
return drmModeAtomicAddProperty(req, obj_id, prop_id, value);
}
int drm_atomic_commit(struct drm_t *drm, struct Composite_t *pComposite, struct VulkanPipeline_t *pPipeline )
{
drmModeAtomicReq *req;
uint32_t plane_id = drm->plane->plane->plane_id;
uint32_t blob_id;
int ret;
// :/
assert( pComposite->flLayerCount == 1.0f );
assert( drm->req != nullptr );
uint32_t fb_id = pPipeline->layerBindings[ 0 ].fbid;
// if (drm->kms_in_fence_fd != -1) {
// add_plane_property(drm, req, plane_id, "IN_FENCE_FD", drm->kms_in_fence_fd);
// }
req = drmModeAtomicAlloc();
// drm->kms_out_fence_fd = -1;
static bool bFirstSwap = true;
uint32_t flags = DRM_MODE_ATOMIC_NONBLOCK;
if ( bFirstSwap == true )
{
flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
bFirstSwap = false;
}
// We do internal refcounting with these events
flags |= DRM_MODE_PAGE_FLIP_EVENT;
if (flags & DRM_MODE_ATOMIC_ALLOW_MODESET) {
if (add_connector_property(drm, req, drm->connector_id, "CRTC_ID",
drm->crtc_id) < 0)
return -1;
if (drmModeCreatePropertyBlob(drm->fd, drm->mode, sizeof(*drm->mode),
&blob_id) != 0)
return -1;
if (add_crtc_property(drm, req, drm->crtc_id, "MODE_ID", blob_id) < 0)
return -1;
if (add_crtc_property(drm, req, drm->crtc_id, "ACTIVE", 1) < 0)
return -1;
}
if ( g_bRotated )
{
add_plane_property(drm, req, plane_id, "rotation", DRM_MODE_ROTATE_270);
}
add_plane_property(drm, req, plane_id, "FB_ID", fb_id);
add_plane_property(drm, req, plane_id, "CRTC_ID", drm->crtc_id);
add_plane_property(drm, req, plane_id, "SRC_X", 0);
add_plane_property(drm, req, plane_id, "SRC_Y", 0);
add_plane_property(drm, req, plane_id, "SRC_W", pPipeline->layerBindings[ 0 ].surfaceWidth << 16);
add_plane_property(drm, req, plane_id, "SRC_H", pPipeline->layerBindings[ 0 ].surfaceHeight << 16);
if ( g_bRotated )
{
add_plane_property(drm, req, plane_id, "CRTC_X", pComposite->layers[ 0 ].flOffsetY * -1);
add_plane_property(drm, req, plane_id, "CRTC_Y", pComposite->layers[ 0 ].flOffsetX * -1);
add_plane_property(drm, req, plane_id, "CRTC_H", pPipeline->layerBindings[ 0 ].surfaceWidth / pComposite->layers[ 0 ].flScaleX);
add_plane_property(drm, req, plane_id, "CRTC_W", pPipeline->layerBindings[ 0 ].surfaceHeight / pComposite->layers[ 0 ].flScaleY);
}
else
{
add_plane_property(drm, req, plane_id, "CRTC_X", pComposite->layers[ 0 ].flOffsetX * -1);
add_plane_property(drm, req, plane_id, "CRTC_Y", pComposite->layers[ 0 ].flOffsetY * -1);
add_plane_property(drm, req, plane_id, "CRTC_W", pPipeline->layerBindings[ 0 ].surfaceWidth / pComposite->layers[ 0 ].flScaleX);
add_plane_property(drm, req, plane_id, "CRTC_H", pPipeline->layerBindings[ 0 ].surfaceHeight / pComposite->layers[ 0 ].flScaleY);
}
if (drm->kms_in_fence_fd != -1) {
add_plane_property(drm, req, plane_id, "IN_FENCE_FD", drm->kms_in_fence_fd);
}
drm->kms_out_fence_fd = -1;
add_crtc_property(drm, req, drm->crtc_id, "OUT_FENCE_PTR",
(uint64_t)(unsigned long)&drm->kms_out_fence_fd);
// add_crtc_property(drm, req, drm->crtc_id, "OUT_FENCE_PTR",
// (uint64_t)(unsigned long)&drm->kms_out_fence_fd);
if ( s_drm_log != 0 )
{
printf("flipping fbid %u\n", fb_id);
printf("flipping\n");
}
ret = drmModeAtomicCommit(drm->fd, req, flags, (void *)(uint64_t)fb_id);
drm->flip_lock.lock();
ret = drmModeAtomicCommit(drm->fd, drm->req, drm->flags, nullptr );
if (ret)
{
if ( ret != -EBUSY )
@ -609,18 +540,26 @@ int drm_atomic_commit(struct drm_t *drm, struct Composite_t *pComposite, struct
goto out;
}
assert( g_DRM.map_fbid_inflightflips[ fb_id ].first == true );
g_DRM.map_fbid_inflightflips[ fb_id ].second++;
if (drm->kms_in_fence_fd != -1) {
close(drm->kms_in_fence_fd);
drm->kms_in_fence_fd = -1;
for ( uint32_t i = 0; i < drm->fbids_in_req.size(); i++ )
{
assert( g_DRM.map_fbid_inflightflips[ drm->fbids_in_req[ i ] ].first == true );
g_DRM.map_fbid_inflightflips[ drm->fbids_in_req[ i ] ].second++;
}
drm->kms_in_fence_fd = drm->kms_out_fence_fd;
// Wait for flip handler to unlock
drm->flip_lock.lock();
drm->flip_lock.unlock();
// if (drm->kms_in_fence_fd != -1) {
// close(drm->kms_in_fence_fd);
// drm->kms_in_fence_fd = -1;
// }
//
// drm->kms_in_fence_fd = drm->kms_out_fence_fd;
out:
drmModeAtomicFree(req);
drmModeAtomicFree( drm->req );
drm->req = nullptr;
return ret;
}
@ -666,11 +605,106 @@ void drm_free_fbid( struct drm_t *drm, uint32_t fbid )
}
}
bool drm_can_avoid_composite( struct drm_t *drm, struct Composite_t *pComposite )
bool drm_can_avoid_composite( struct drm_t *drm, struct Composite_t *pComposite, struct VulkanPipeline_t *pPipeline )
{
// No multiplane support for now, thoon
if ( pComposite->flLayerCount == 1 )
drm->fbids_in_req.clear();
int nLayerCount = pComposite->flLayerCount;
for ( int i = 0; i < k_nMaxLayers; i++ )
{
if ( i < nLayerCount )
{
if ( g_bRotated )
{
liftoff_layer_set_property( drm->lo_layers[ i ], "rotation", DRM_MODE_ROTATE_270);
}
assert( pPipeline->layerBindings[ i ].fbid != 0 );
liftoff_layer_set_property( drm->lo_layers[ i ], "FB_ID", pPipeline->layerBindings[ i ].fbid);
drm->fbids_in_req.push_back( pPipeline->layerBindings[ i ].fbid );
// liftoff_layer_set_property( drm->lo_layers[ i ], "alpha", pComposite->layers[ i ].flOpacity * 0xffff);
liftoff_layer_set_property( drm->lo_layers[ i ], "SRC_X", 0);
liftoff_layer_set_property( drm->lo_layers[ i ], "SRC_Y", 0);
liftoff_layer_set_property( drm->lo_layers[ i ], "SRC_W", pPipeline->layerBindings[ i ].surfaceWidth << 16);
liftoff_layer_set_property( drm->lo_layers[ i ], "SRC_H", pPipeline->layerBindings[ i ].surfaceHeight << 16);
if ( g_bRotated )
{
liftoff_layer_set_property( drm->lo_layers[ i ], "CRTC_X", pComposite->layers[ i ].flOffsetY * -1);
liftoff_layer_set_property( drm->lo_layers[ i ], "CRTC_Y", pComposite->layers[ i ].flOffsetX * -1);
liftoff_layer_set_property( drm->lo_layers[ i ], "CRTC_H", pPipeline->layerBindings[ i ].surfaceWidth / pComposite->layers[ i ].flScaleX);
liftoff_layer_set_property( drm->lo_layers[ i ], "CRTC_W", pPipeline->layerBindings[ i ].surfaceHeight / pComposite->layers[ i ].flScaleY);
}
else
{
liftoff_layer_set_property( drm->lo_layers[ i ], "CRTC_X", pComposite->layers[ i ].flOffsetX * -1);
liftoff_layer_set_property( drm->lo_layers[ i ], "CRTC_Y", pComposite->layers[ i ].flOffsetY * -1);
liftoff_layer_set_property( drm->lo_layers[ i ], "CRTC_W", pPipeline->layerBindings[ i ].surfaceWidth / pComposite->layers[ i ].flScaleX);
liftoff_layer_set_property( drm->lo_layers[ i ], "CRTC_H", pPipeline->layerBindings[ i ].surfaceHeight / pComposite->layers[ i ].flScaleY);
}
}
else
{
liftoff_layer_set_property( drm->lo_layers[ i ], "FB_ID", 0 );
}
}
assert( drm->req == nullptr );
drm->req = drmModeAtomicAlloc();
static bool bFirstSwap = true;
uint32_t flags = DRM_MODE_ATOMIC_NONBLOCK;
uint32_t blob_id;
if ( bFirstSwap == true )
{
flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
bFirstSwap = false;
}
// We do internal refcounting with these events
flags |= DRM_MODE_PAGE_FLIP_EVENT;
if (flags & DRM_MODE_ATOMIC_ALLOW_MODESET) {
if (add_connector_property(drm, drm->req, drm->connector_id, "CRTC_ID",
drm->crtc_id) < 0)
return -1;
if (drmModeCreatePropertyBlob(drm->fd, drm->mode, sizeof(*drm->mode),
&blob_id) != 0)
return -1;
if (add_crtc_property(drm, drm->req, drm->crtc_id, "MODE_ID", blob_id) < 0)
return -1;
if (add_crtc_property(drm, drm->req, drm->crtc_id, "ACTIVE", 1) < 0)
return -1;
}
drm->flags = flags;
if ( liftoff_output_apply( drm->lo_output, drm->req ) == true )
{
if ( s_drm_log != 0 )
{
fprintf( stderr, "can drm present %i layers\n", nLayerCount );
}
return true;
}
if ( s_drm_log != 0 )
{
fprintf( stderr, "can NOT drm present %i layers\n", nLayerCount );
}
drmModeAtomicFree( drm->req );
drm->req = nullptr;
return false;
}

View file

@ -54,15 +54,29 @@ struct drm_t {
uint32_t plane_id;
drmModeAtomicReq *req;
uint32_t flags;
struct liftoff_device *lo_device;
struct liftoff_output *lo_output;
struct liftoff_layer *lo_layers[ k_nMaxLayers ];
std::vector < uint32_t > fbids_in_req;
std::vector < uint32_t > fbids_on_screen;
std::unordered_map< uint32_t, std::pair< bool, std::atomic< uint32_t > > > map_fbid_inflightflips;
std::mutex free_queue_lock;
std::vector< uint32_t > fbid_free_queue;
std::mutex flip_lock;
};
#endif
#ifndef C_SIDE
extern "C" {
#endif
#include "libliftoff.h"
extern struct drm_t g_DRM;
@ -72,7 +86,7 @@ int init_drm(struct drm_t *drm, const char *device, const char *mode_str, unsign
int drm_atomic_commit(struct drm_t *drm, struct Composite_t *pComposite, struct VulkanPipeline_t *pPipeline );
uint32_t drm_fbid_from_dmabuf( struct drm_t *drm, struct wlr_dmabuf_attributes *dma_buf );
void drm_free_fbid( struct drm_t *drm, uint32_t fbid );
bool drm_can_avoid_composite( struct drm_t *drm, struct Composite_t *pComposite );
bool drm_can_avoid_composite( struct drm_t *drm, struct Composite_t *pComposite, struct VulkanPipeline_t *pPipeline );
#ifndef C_SIDE
}

View file

@ -73,7 +73,6 @@ VkBuffer uploadBuffer;
VkDeviceMemory uploadBufferMemory;
void *pUploadBuffer;
bool bUploadCmdBufferIdle;
VkCommandBuffer uploadCommandBuffer;
struct VkPhysicalDeviceMemoryProperties memoryProperties;
@ -120,7 +119,7 @@ struct {
} s_DRMVKFormatTable[] = {
{ DRM_FORMAT_XRGB8888, VK_FORMAT_A8B8G8R8_UNORM_PACK32, true, false },
{ DRM_FORMAT_ARGB8888, VK_FORMAT_A8B8G8R8_UNORM_PACK32, true, true },
{ DRM_FORMAT_RGBA8888, VK_FORMAT_R8G8B8A8_UNORM, false, true },
{ DRM_FORMAT_ARGB8888, VK_FORMAT_R8G8B8A8_UNORM, false, true }, // TODO: figure out why the cursor surface didn't like DRM_FORMAT_RGBA8888
{ DRM_FORMAT_INVALID, VK_FORMAT_UNDEFINED, false, false },
};
@ -320,7 +319,7 @@ bool CVulkanTexture::BInit( uint32_t width, uint32_t height, VkFormat format, bo
{
// We assume we own the memory when doing this right now.
// We could support the import scenario as well if needed
assert( bTextureable == false );
// assert( bTextureable == false );
m_DMA.modifier = DRM_FORMAT_MOD_INVALID;
m_DMA.n_planes = 1;
@ -1034,7 +1033,7 @@ VulkanTexture_t vulkan_create_texture_from_bits( uint32_t width, uint32_t height
CVulkanTexture *pTex = new CVulkanTexture();
if ( pTex->BInit( width, height, format, false, true, nullptr ) == false )
if ( pTex->BInit( width, height, format, true, true, nullptr ) == false )
{
delete pTex;
return ret;
@ -1048,9 +1047,7 @@ VulkanTexture_t vulkan_create_texture_from_bits( uint32_t width, uint32_t height
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
0
};
assert( bUploadCmdBufferIdle == true );
VkResult res = vkResetCommandBuffer( uploadCommandBuffer, 0 );
if ( res != VK_SUCCESS )
@ -1101,7 +1098,12 @@ VulkanTexture_t vulkan_create_texture_from_bits( uint32_t width, uint32_t height
res = vkQueueSubmit(queue, 1, &submitInfo, 0);
bUploadCmdBufferIdle = false;
if ( res != VK_SUCCESS )
{
return false;
}
vkQueueWaitIdle( queue );
ret = ++g_nMaxVulkanTexHandle;
g_mapVulkanTextures[ ret ] = pTex;
@ -1422,8 +1424,6 @@ bool vulkan_composite( struct Composite_t *pComposite, struct VulkanPipeline_t *
vkQueueWaitIdle( queue );
bUploadCmdBufferIdle = true;
if ( BIsNested() == false )
{
g_output.nOutImage = !g_output.nOutImage;
@ -1439,6 +1439,20 @@ uint32_t vulkan_get_last_composite_fbid( void )
return g_output.outputImage[ !g_output.nOutImage ].m_FBID;
}
uint32_t vulkan_texture_get_fbid( VulkanTexture_t vulkanTex )
{
if ( vulkanTex == 0 )
return 0;
assert( g_mapVulkanTextures[ vulkanTex ] != nullptr );
uint32_t ret = g_mapVulkanTextures[ vulkanTex ]->m_FBID;
assert( ret != 0 );
return ret;
}

View file

@ -82,6 +82,9 @@ int vulkan_init(void);
VulkanTexture_t vulkan_create_texture_from_dmabuf( struct wlr_dmabuf_attributes *pDMA );
VulkanTexture_t vulkan_create_texture_from_bits( uint32_t width, uint32_t height, VkFormat format, void *bits );
uint32_t vulkan_texture_get_fbid( VulkanTexture_t vulkanTex );
void vulkan_free_texture( VulkanTexture_t vulkanTex );
bool vulkan_composite( struct Composite_t *pComposite, struct VulkanPipeline_t *pPipeline );

View file

@ -491,7 +491,7 @@ paint_cursor ( Display *dpy, win *w, struct Composite_t *pComposite, struct Vulk
pPipeline->layerBindings[ curLayer ].surfaceHeight = cursorHeight;
pPipeline->layerBindings[ curLayer ].tex = cursorTexture;
pPipeline->layerBindings[ curLayer ].fbid = 0;
pPipeline->layerBindings[ curLayer ].fbid = vulkan_texture_get_fbid( cursorTexture );
pPipeline->layerBindings[ curLayer ].bFilter = false;
pPipeline->layerBindings[ curLayer ].bBlackBorder = false;
@ -548,7 +548,7 @@ paint_window (Display *dpy, win *w, struct Composite_t *pComposite, struct Vulka
int curLayer = (int)pComposite->flLayerCount;
pComposite->layers[ curLayer ].flOpacity = (float)w->opacity / OPAQUE;
pComposite->layers[ curLayer ].flOpacity = w->isOverlay ? w->opacity / (float)OPAQUE : 1.0f;
pComposite->layers[ curLayer ].flScaleX = 1.0 / currentScaleRatio;
pComposite->layers[ curLayer ].flScaleY = 1.0 / currentScaleRatio;
@ -790,7 +790,7 @@ paint_all (Display *dpy)
if ( BIsNested() == false )
{
if ( drm_can_avoid_composite( &g_DRM, &composite ) == true )
if ( drm_can_avoid_composite( &g_DRM, &composite, &pipeline ) == true )
{
bDoComposite = false;
}

@ -0,0 +1 @@
Subproject commit cfeee41ec1aa03578bfbe4cd513a25e84c407dec