mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
The current attempted split between xe/i915 vs. display for intel_frontbuffer is a mess: - the i915 rcu leaks through the interface to the display side - the obj->frontbuffer write-side is now protected by a display specific spinlock even though the actual obj->framebuffer pointer lives in a i915 specific structure - the kref is getting poked directly from both sides - i915_active is still on the display side Clean up the mess by moving everything about the frontbuffer lifetime management to the i915/xe side: - the rcu usage is now completely contained in i915 - frontbuffer_lock is moved into i915 - kref is on the i915/xe side (xe needs the refcount as well due to intel_frontbuffer_queue_flush()->intel_frontbuffer_ref()) - the bo (and its refcounting) is no longer on the display side - i915_active is contained in i915 I was pondering whether we could do this in some kind of smaller steps, and perhaps we could, but it would probably have to start with a bunch of reverts (which for sure won't go cleanly anymore). So not convinced it's worth the hassle. Acked-by: Jani Nikula <jani.nikula@intel.com> Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patch.msgid.link/20251016185408.22735-10-ville.syrjala@linux.intel.com
104 lines
2.0 KiB
C
104 lines
2.0 KiB
C
// SPDX-License-Identifier: MIT
|
|
/* Copyright © 2024 Intel Corporation */
|
|
|
|
#include <drm/drm_gem.h>
|
|
|
|
#include "xe_bo.h"
|
|
#include "intel_bo.h"
|
|
#include "intel_frontbuffer.h"
|
|
|
|
bool intel_bo_is_tiled(struct drm_gem_object *obj)
|
|
{
|
|
/* legacy tiling is unused */
|
|
return false;
|
|
}
|
|
|
|
bool intel_bo_is_userptr(struct drm_gem_object *obj)
|
|
{
|
|
/* xe does not have userptr bos */
|
|
return false;
|
|
}
|
|
|
|
bool intel_bo_is_shmem(struct drm_gem_object *obj)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
bool intel_bo_is_protected(struct drm_gem_object *obj)
|
|
{
|
|
return xe_bo_is_protected(gem_to_xe_bo(obj));
|
|
}
|
|
|
|
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
|
|
{
|
|
return drm_gem_prime_mmap(obj, vma);
|
|
}
|
|
|
|
int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
|
|
{
|
|
struct xe_bo *bo = gem_to_xe_bo(obj);
|
|
|
|
return xe_bo_read(bo, offset, dst, size);
|
|
}
|
|
|
|
struct xe_frontbuffer {
|
|
struct intel_frontbuffer base;
|
|
struct drm_gem_object *obj;
|
|
struct kref ref;
|
|
};
|
|
|
|
struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *obj)
|
|
{
|
|
struct xe_frontbuffer *front;
|
|
|
|
front = kmalloc(sizeof(*front), GFP_KERNEL);
|
|
if (!front)
|
|
return NULL;
|
|
|
|
intel_frontbuffer_init(&front->base, obj->dev);
|
|
|
|
kref_init(&front->ref);
|
|
|
|
drm_gem_object_get(obj);
|
|
front->obj = obj;
|
|
|
|
return &front->base;
|
|
}
|
|
|
|
void intel_bo_frontbuffer_ref(struct intel_frontbuffer *_front)
|
|
{
|
|
struct xe_frontbuffer *front =
|
|
container_of(_front, typeof(*front), base);
|
|
|
|
kref_get(&front->ref);
|
|
}
|
|
|
|
static void frontbuffer_release(struct kref *ref)
|
|
{
|
|
struct xe_frontbuffer *front =
|
|
container_of(ref, typeof(*front), ref);
|
|
|
|
intel_frontbuffer_fini(&front->base);
|
|
|
|
drm_gem_object_put(front->obj);
|
|
|
|
kfree(front);
|
|
}
|
|
|
|
void intel_bo_frontbuffer_put(struct intel_frontbuffer *_front)
|
|
{
|
|
struct xe_frontbuffer *front =
|
|
container_of(_front, typeof(*front), base);
|
|
|
|
kref_put(&front->ref, frontbuffer_release);
|
|
}
|
|
|
|
void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *front)
|
|
{
|
|
}
|
|
|
|
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
|
|
{
|
|
/* FIXME */
|
|
}
|