Dedupe and memoize fb_id and vulkan texture per buffer.

Also split lock/unlock from import/drop in drm, because we still
need the lock/unlock semantics to signal to the app when ti can
reuse the buffer.
This commit is contained in:
Bas Nieuwenhuizen 2021-10-18 22:14:33 +02:00 committed by Pierre-Loup A. Griffais
parent 5fc9df512c
commit 7e93200e08
3 changed files with 103 additions and 37 deletions

View file

@ -179,7 +179,7 @@ static struct plane *find_primary_plane(struct drm_t *drm)
return primary;
}
static void drm_free_fb( struct drm_t *drm, struct fb *fb );
static void drm_unlock_fb_internal( struct drm_t *drm, struct fb *fb );
static void page_flip_handler(int fd, unsigned int frame, unsigned int sec, unsigned int usec, unsigned int crtc_id, void *data)
{
@ -210,13 +210,26 @@ static void page_flip_handler(int fd, unsigned int frame, unsigned int sec, unsi
// we flipped away from this previous fbid, now safe to delete
std::lock_guard<std::mutex> lock( g_DRM.free_queue_lock );
for ( uint32_t i = 0; i < g_DRM.fbid_unlock_queue.size(); i++ )
{
if ( g_DRM.fbid_unlock_queue[ i ] == previous_fbid )
{
drm_verbose_log.debugf("deferred unlock %u", previous_fbid);
drm_unlock_fb_internal( &g_DRM, &get_fb( g_DRM, previous_fbid ) );
g_DRM.fbid_unlock_queue.erase( g_DRM.fbid_unlock_queue.begin() + i );
break;
}
}
for ( uint32_t i = 0; i < g_DRM.fbid_free_queue.size(); i++ )
{
if ( g_DRM.fbid_free_queue[ i ] == previous_fbid )
{
drm_verbose_log.debugf("deferred free %u", previous_fbid);
drm_verbose_log.debugf( "deferred free %u", previous_fbid );
drm_free_fb( &g_DRM, &previous_fb );
drm_drop_fbid( &g_DRM, previous_fbid );
g_DRM.fbid_free_queue.erase( g_DRM.fbid_free_queue.begin() + i );
break;
@ -899,20 +912,13 @@ uint32_t drm_fbid_from_dmabuf( struct drm_t *drm, struct wlr_buffer *buf, struct
drm_verbose_log.debugf("make fbid %u", fb_id);
if ( buf != nullptr )
{
wlserver_lock();
buf = wlr_buffer_lock( buf );
wlserver_unlock();
}
/* Nested scope so fb doesn't end up in the out: label */
{
struct fb &fb = get_fb( *drm, fb_id );
assert( fb.held == false );
fb.id = fb_id;
fb.buf = buf;
fb.held = true;
fb.held = !buf;
fb.n_refs = 0;
}
@ -941,15 +947,32 @@ out:
return fb_id;
}
static void drm_free_fb( struct drm_t *drm, struct fb *fb )
void drm_drop_fbid( struct drm_t *drm, uint32_t fbid )
{
assert( !fb->held );
assert( fb->n_refs == 0 );
struct fb &fb = get_fb( *drm, fbid );
assert( fb.held == false ||
fb.buf == nullptr );
if ( drmModeRmFB( drm->fd, fb->id ) != 0 )
fb.held = false;
if ( fb.n_refs != 0 )
{
std::lock_guard<std::mutex> lock( drm->free_queue_lock );
drm->fbid_free_queue.push_back( fbid );
return;
}
if (drmModeRmFB( drm->fd, fbid ) != 0 )
{
drm_log.errorf_errno( "drmModeRmFB failed" );
}
}
static void drm_unlock_fb_internal( struct drm_t *drm, struct fb *fb )
{
assert( !fb->held );
assert( fb->n_refs == 0 );
if ( fb->buf != nullptr )
{
@ -957,11 +980,24 @@ static void drm_free_fb( struct drm_t *drm, struct fb *fb )
wlr_buffer_unlock( fb->buf );
wlserver_unlock();
}
fb = {};
}
void drm_drop_fbid( struct drm_t *drm, uint32_t fbid )
void drm_lock_fbid( struct drm_t *drm, uint32_t fbid )
{
struct fb &fb = get_fb( *drm, fbid );
assert( fb.held == false );
assert( fb.n_refs == 0 );
fb.held = true;
if ( fb.buf != nullptr )
{
wlserver_lock();
wlr_buffer_lock( fb.buf );
wlserver_unlock();
}
}
void drm_unlock_fbid( struct drm_t *drm, uint32_t fbid )
{
struct fb &fb = get_fb( *drm, fbid );
@ -972,13 +1008,13 @@ void drm_drop_fbid( struct drm_t *drm, uint32_t fbid )
{
/* FB isn't being used in any page-flip, free it immediately */
drm_verbose_log.debugf("free fbid %u", fbid);
drm_free_fb( drm, &fb );
drm_unlock_fb_internal( drm, &fb );
}
else
{
std::lock_guard<std::mutex> lock( drm->free_queue_lock );
drm->fbid_free_queue.push_back( fbid );
drm->fbid_unlock_queue.push_back( fbid );
}
}

View file

@ -106,6 +106,7 @@ struct drm_t {
std::mutex fb_map_mutex;
std::mutex free_queue_lock;
std::vector< uint32_t > fbid_unlock_queue;
std::vector< uint32_t > fbid_free_queue;
std::mutex flip_lock;
@ -141,6 +142,8 @@ int drm_commit(struct drm_t *drm, struct Composite_t *pComposite, struct VulkanP
int drm_prepare( struct drm_t *drm, const struct Composite_t *pComposite, const struct VulkanPipeline_t *pPipeline );
bool drm_poll_state(struct drm_t *drm);
uint32_t drm_fbid_from_dmabuf( struct drm_t *drm, struct wlr_buffer *buf, struct wlr_dmabuf_attributes *dma_buf );
void drm_lock_fbid( struct drm_t *drm, uint32_t fbid );
void drm_unlock_fbid( struct drm_t *drm, uint32_t fbid );
void drm_drop_fbid( struct drm_t *drm, uint32_t fbid );
bool drm_set_connector( struct drm_t *drm, struct connector *conn );
bool drm_set_mode( struct drm_t *drm, const drmModeModeInfo *mode );

View file

@ -303,6 +303,8 @@ std::vector<ResListEntry_t> wayland_commit_queue;
struct wlr_buffer_map_entry {
struct wl_listener listener;
struct wlr_buffer *buf;
VulkanTexture_t vulkanTex;
uint32_t fb_id;
};
static std::mutex wlr_buffer_map_lock;
@ -626,6 +628,16 @@ destroy_buffer( struct wl_listener *listener, void * )
std::lock_guard<std::mutex> lock( wlr_buffer_map_lock );
wlr_buffer_map_entry *entry = wl_container_of( listener, entry, listener );
if ( entry->fb_id != 0 )
{
drm_drop_fbid( &g_DRM, entry->fb_id );
}
if ( entry->vulkanTex != 0 )
{
vulkan_free_texture( entry->vulkanTex );
}
wl_list_remove( &entry->listener.link );
/* Has to be the last thing we do as this deletes *entry. */
@ -637,16 +649,10 @@ release_commit( commit_t &commit )
{
if ( commit.fb_id != 0 )
{
drm_drop_fbid( &g_DRM, commit.fb_id );
drm_unlock_fbid( &g_DRM, commit.fb_id );
commit.fb_id = 0;
}
if ( commit.vulkanTex != 0 )
{
vulkan_free_texture( commit.vulkanTex );
commit.vulkanTex = 0;
}
wlserver_lock();
wlr_buffer_unlock( commit.buf );
wlserver_unlock();
@ -655,10 +661,27 @@ release_commit( commit_t &commit )
static bool
import_commit ( struct wlr_buffer *buf, commit_t &commit )
{
/* Keep this locked during the entire function so we
* avoid racing the creation of the commit. Unlikely to
* come up with the current callers but a safe default. */
std::lock_guard<std::mutex> lock( wlr_buffer_map_lock );
commit.buf = buf;
auto it = wlr_buffer_map.find( buf );
if ( it != wlr_buffer_map.end() )
{
commit.vulkanTex = it->second.vulkanTex;
commit.fb_id = it->second.fb_id;
if (commit.fb_id)
{
drm_lock_fbid( &g_DRM, commit.fb_id );
}
return true;
}
commit.vulkanTex = vulkan_create_texture_from_wlr_buffer( buf );
assert( commit.vulkanTex != 0 );
@ -666,23 +689,27 @@ import_commit ( struct wlr_buffer *buf, commit_t &commit )
if ( BIsNested() == false && wlr_buffer_get_dmabuf( buf, &dmabuf ) )
{
commit.fb_id = drm_fbid_from_dmabuf( &g_DRM, buf, &dmabuf );
if ( commit.fb_id )
{
drm_lock_fbid( &g_DRM, commit.fb_id );
}
}
else
{
commit.fb_id = 0;
}
auto it = wlr_buffer_map.find( buf );
if ( it == wlr_buffer_map.end() )
{
wlr_buffer_map_entry& entry = wlr_buffer_map[buf];
entry.listener.notify = destroy_buffer;
entry.buf = buf;
wlr_buffer_map_entry& entry = wlr_buffer_map[buf];
entry.listener.notify = destroy_buffer;
entry.buf = buf;
entry.vulkanTex = commit.vulkanTex;
entry.fb_id = commit.fb_id;
wlserver_lock();
wl_signal_add( &buf->events.destroy, &entry.listener );
wlserver_unlock();
wlserver_lock();
wl_signal_add( &buf->events.destroy, &entry.listener );
wlserver_unlock();
}
return true;
}