Skip to content

Commit

Permalink
fs/spinlock: replace no sched_lock() version to improve the performance
Browse files Browse the repository at this point in the history
after below change merge to kernel, spin_lock() will turn off preemption by default,
but this change is not applicable to all scenarios. The locations in the kernel that
use spin_lock() extensively only require short critical sections and do not trigger
scheduling, which leads to serious performance degradation of NuttX in AMP mode.

In this PR, I try to expose similar problems and hope that each subsystem will carefully check the code coverage

#14578
|commit b69111d
|Author: hujun5 <[email protected]>
|Date:   Thu Jan 23 16:14:18 2025 +0800
|
|    spinlock: add sched_lock to spin_lock_irqsave
|
|    reason:
|    We aim to replace big locks with smaller ones. So we will use spin_lock_irqsave extensively to
|    replace enter_critical_section in the subsequent process. We imitate the implementation of Linux
|    by adding sched_lock to spin_lock_irqsave in order to address scenarios where sem_post occurs
|    within spin_lock_irqsave, which can lead to spinlock failures and deadlocks.
|
|    Signed-off-by: hujun5 <[email protected]>

Signed-off-by: chao an <[email protected]>
  • Loading branch information
anchao authored and xiaoxiang781216 committed Jan 24, 2025
1 parent b9e995b commit ace7f0c
Showing 1 changed file with 9 additions and 9 deletions.
18 changes: 9 additions & 9 deletions fs/inode/fs_files.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,9 @@ static FAR struct file *files_fget_by_index(FAR struct filelist *list,
FAR struct file *filep;
irqstate_t flags;

flags = spin_lock_irqsave(&list->fl_lock);
flags = raw_spin_lock_irqsave(&list->fl_lock);
filep = &list->fl_files[l1][l2];
spin_unlock_irqrestore(&list->fl_lock, flags);
raw_spin_unlock_irqrestore(&list->fl_lock, flags);

#ifdef CONFIG_FS_REFCOUNT
if (filep->f_inode != NULL)
Expand Down Expand Up @@ -164,7 +164,7 @@ static int files_extend(FAR struct filelist *list, size_t row)
}
while (++i < row);

flags = spin_lock_irqsave(&list->fl_lock);
flags = raw_spin_lock_irqsave(&list->fl_lock);

/* To avoid race condition, if the file list is updated by other threads
* and list rows is greater or equal than temp list,
Expand All @@ -173,7 +173,7 @@ static int files_extend(FAR struct filelist *list, size_t row)

if (orig_rows != list->fl_rows && list->fl_rows >= row)
{
spin_unlock_irqrestore(&list->fl_lock, flags);
raw_spin_unlock_irqrestore(&list->fl_lock, flags);

for (j = orig_rows; j < i; j++)
{
Expand All @@ -195,7 +195,7 @@ static int files_extend(FAR struct filelist *list, size_t row)
list->fl_files = files;
list->fl_rows = row;

spin_unlock_irqrestore(&list->fl_lock, flags);
raw_spin_unlock_irqrestore(&list->fl_lock, flags);

if (tmp != NULL && tmp != &list->fl_prefile)
{
Expand Down Expand Up @@ -565,21 +565,21 @@ int file_allocate_from_tcb(FAR struct tcb_s *tcb, FAR struct inode *inode,

/* Find free file */

flags = spin_lock_irqsave(&list->fl_lock);
flags = raw_spin_lock_irqsave(&list->fl_lock);

for (; ; i++, j = 0)
{
if (i >= list->fl_rows)
{
spin_unlock_irqrestore(&list->fl_lock, flags);
raw_spin_unlock_irqrestore(&list->fl_lock, flags);

ret = files_extend(list, i + 1);
if (ret < 0)
{
return ret;
}

flags = spin_lock_irqsave(&list->fl_lock);
flags = raw_spin_lock_irqsave(&list->fl_lock);
}

do
Expand Down Expand Up @@ -608,7 +608,7 @@ int file_allocate_from_tcb(FAR struct tcb_s *tcb, FAR struct inode *inode,
}

found:
spin_unlock_irqrestore(&list->fl_lock, flags);
raw_spin_unlock_irqrestore(&list->fl_lock, flags);

if (addref)
{
Expand Down

0 comments on commit ace7f0c

Please sign in to comment.