[v2,04/18] x86/sgx: Use sgx_epc_lru_lists for existing active page list
Commit Message
Replace the existing sgx_active_page_list and its spinlock with
a global sgx_epc_lru_lists struct.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Kristen Carlson Accardi <kristen@linux.intel.com>
Cc: Sean Christopherson <seanjc@google.com>
---
arch/x86/kernel/cpu/sgx/main.c | 39 +++++++++++++++++-----------------
1 file changed, 19 insertions(+), 20 deletions(-)
Comments
On 12/2/22 10:36, Kristen Carlson Accardi wrote:
> + spin_lock(&sgx_global_lru.lock);
> for (i = 0; i < SGX_NR_TO_SCAN; i++) {
> - if (list_empty(&sgx_active_page_list))
> + epc_page = sgx_epc_pop_reclaimable(&sgx_global_lru);
> + if (!epc_page)
> break;
One other nit about the structure of the patches: This introduced *both*
reclaimable and unreclaimable list_heads. But, it has zero use for the
unreclaimable ones during the refactoring here. I probably would have
left out the 'unreclaimable' bits for now.
BTW, this is a nice sign:
> arch/x86/kernel/cpu/sgx/main.c | 39 +++++++++++++++++-----------------
> 1 file changed, 19 insertions(+), 20 deletions(-)
On Fri, 2022-12-02 at 13:43 -0800, Dave Hansen wrote:
> On 12/2/22 10:36, Kristen Carlson Accardi wrote:
> > + spin_lock(&sgx_global_lru.lock);
> > for (i = 0; i < SGX_NR_TO_SCAN; i++) {
> > - if (list_empty(&sgx_active_page_list))
> > + epc_page =
> > sgx_epc_pop_reclaimable(&sgx_global_lru);
> > + if (!epc_page)
> > break;
>
> One other nit about the structure of the patches: This introduced
> *both*
> reclaimable and unreclaimable list_heads. But, it has zero use for
> the
> unreclaimable ones during the refactoring here. I probably would
> have
> left out the 'unreclaimable' bits for now.
I know - and originally the addition of unreclaimable was added later,
but when I posted the RFC I felt there was some misunderstanding about
what this data structure was and how it would be used because the
addition of the unreclaimable bits came later. So I stuck both lists in
one so it'd be a better view of what the data structure would look
like.
>
> BTW, this is a nice sign:
>
> > arch/x86/kernel/cpu/sgx/main.c | 39 +++++++++++++++++-------------
> > ----
> > 1 file changed, 19 insertions(+), 20 deletions(-)
>
>
On 12/2/22 13:51, Kristen Carlson Accardi wrote:
> I know - and originally the addition of unreclaimable was added later,
> but when I posted the RFC I felt there was some misunderstanding about
> what this data structure was and how it would be used because the
> addition of the unreclaimable bits came later. So I stuck both lists in
> one so it'd be a better view of what the data structure would look
> like.
You're not insane for thinking that.
But, it's really OK to introduce an abstraction that *looks* silly on
its face at first. You can easily just make up for it by saying:
struct silly_abstraction {
struct list_head list;
}
Oh, boy does my structure look silly. It's a structure with a
single list_head. Why oh why would I do something silly like
that? Well, for now, the code has but one list. Soon, I'll
add a whole smorgasbord of lists. Bear with me for now.
@@ -26,10 +26,9 @@ static DEFINE_XARRAY(sgx_epc_address_space);
/*
* These variables are part of the state of the reclaimer, and must be accessed
- * with sgx_reclaimer_lock acquired.
+ * with sgx_global_lru.lock acquired.
*/
-static LIST_HEAD(sgx_active_page_list);
-static DEFINE_SPINLOCK(sgx_reclaimer_lock);
+static struct sgx_epc_lru_lists sgx_global_lru;
static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0);
@@ -298,14 +297,12 @@ static void __sgx_reclaim_pages(void)
int ret;
int i;
- spin_lock(&sgx_reclaimer_lock);
+ spin_lock(&sgx_global_lru.lock);
for (i = 0; i < SGX_NR_TO_SCAN; i++) {
- if (list_empty(&sgx_active_page_list))
+ epc_page = sgx_epc_pop_reclaimable(&sgx_global_lru);
+ if (!epc_page)
break;
- epc_page = list_first_entry(&sgx_active_page_list,
- struct sgx_epc_page, list);
- list_del_init(&epc_page->list);
encl_page = epc_page->encl_owner;
if (kref_get_unless_zero(&encl_page->encl->refcount) != 0)
@@ -316,7 +313,7 @@ static void __sgx_reclaim_pages(void)
*/
epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
}
- spin_unlock(&sgx_reclaimer_lock);
+ spin_unlock(&sgx_global_lru.lock);
for (i = 0; i < cnt; i++) {
epc_page = chunk[i];
@@ -339,9 +336,9 @@ static void __sgx_reclaim_pages(void)
continue;
skip:
- spin_lock(&sgx_reclaimer_lock);
- list_add_tail(&epc_page->list, &sgx_active_page_list);
- spin_unlock(&sgx_reclaimer_lock);
+ spin_lock(&sgx_global_lru.lock);
+ sgx_epc_push_reclaimable(&sgx_global_lru, epc_page);
+ spin_unlock(&sgx_global_lru.lock);
kref_put(&encl_page->encl->refcount, sgx_encl_release);
@@ -378,7 +375,7 @@ static void sgx_reclaim_pages(void)
static bool sgx_should_reclaim(unsigned long watermark)
{
return atomic_long_read(&sgx_nr_free_pages) < watermark &&
- !list_empty(&sgx_active_page_list);
+ !list_empty(&sgx_global_lru.reclaimable);
}
/*
@@ -433,6 +430,8 @@ static bool __init sgx_page_reclaimer_init(void)
ksgxd_tsk = tsk;
+ sgx_lru_init(&sgx_global_lru);
+
return true;
}
@@ -508,10 +507,10 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void)
*/
void sgx_mark_page_reclaimable(struct sgx_epc_page *page)
{
- spin_lock(&sgx_reclaimer_lock);
+ spin_lock(&sgx_global_lru.lock);
page->flags |= SGX_EPC_PAGE_RECLAIMER_TRACKED;
- list_add_tail(&page->list, &sgx_active_page_list);
- spin_unlock(&sgx_reclaimer_lock);
+ sgx_epc_push_reclaimable(&sgx_global_lru, page);
+ spin_unlock(&sgx_global_lru.lock);
}
/**
@@ -526,18 +525,18 @@ void sgx_mark_page_reclaimable(struct sgx_epc_page *page)
*/
int sgx_unmark_page_reclaimable(struct sgx_epc_page *page)
{
- spin_lock(&sgx_reclaimer_lock);
+ spin_lock(&sgx_global_lru.lock);
if (page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED) {
/* The page is being reclaimed. */
if (list_empty(&page->list)) {
- spin_unlock(&sgx_reclaimer_lock);
+ spin_unlock(&sgx_global_lru.lock);
return -EBUSY;
}
list_del(&page->list);
page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
}
- spin_unlock(&sgx_reclaimer_lock);
+ spin_unlock(&sgx_global_lru.lock);
return 0;
}
@@ -574,7 +573,7 @@ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim)
break;
}
- if (list_empty(&sgx_active_page_list))
+ if (list_empty(&sgx_global_lru.reclaimable))
return ERR_PTR(-ENOMEM);
if (!reclaim) {