@@ -1764,24 +1764,21 @@ static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info)
1764
1764
1765
1765
static bool should_reclaim_block_group (struct btrfs_block_group * bg , u64 bytes_freed )
1766
1766
{
1767
- const struct btrfs_space_info * space_info = bg -> space_info ;
1768
- const int reclaim_thresh = READ_ONCE ( space_info -> bg_reclaim_threshold );
1767
+ const int thresh_pct = btrfs_calc_reclaim_threshold ( bg -> space_info ) ;
1768
+ u64 thresh_bytes = mult_perc ( bg -> length , thresh_pct );
1769
1769
const u64 new_val = bg -> used ;
1770
1770
const u64 old_val = new_val + bytes_freed ;
1771
- u64 thresh ;
1772
1771
1773
- if (reclaim_thresh == 0 )
1772
+ if (thresh_bytes == 0 )
1774
1773
return false;
1775
1774
1776
- thresh = mult_perc (bg -> length , reclaim_thresh );
1777
-
1778
1775
/*
1779
1776
* If we were below the threshold before don't reclaim, we are likely a
1780
1777
* brand new block group and we don't want to relocate new block groups.
1781
1778
*/
1782
- if (old_val < thresh )
1779
+ if (old_val < thresh_bytes )
1783
1780
return false;
1784
- if (new_val >= thresh )
1781
+ if (new_val >= thresh_bytes )
1785
1782
return false;
1786
1783
return true;
1787
1784
}
@@ -1828,6 +1825,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
1828
1825
list_sort (NULL , & fs_info -> reclaim_bgs , reclaim_bgs_cmp );
1829
1826
while (!list_empty (& fs_info -> reclaim_bgs )) {
1830
1827
u64 zone_unusable ;
1828
+ u64 reclaimed ;
1831
1829
int ret = 0 ;
1832
1830
1833
1831
bg = list_first_entry (& fs_info -> reclaim_bgs ,
@@ -1841,6 +1839,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
1841
1839
/* Don't race with allocators so take the groups_sem */
1842
1840
down_write (& space_info -> groups_sem );
1843
1841
1842
+ spin_lock (& space_info -> lock );
1844
1843
spin_lock (& bg -> lock );
1845
1844
if (bg -> reserved || bg -> pinned || bg -> ro ) {
1846
1845
/*
@@ -1850,6 +1849,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
1850
1849
* this block group.
1851
1850
*/
1852
1851
spin_unlock (& bg -> lock );
1852
+ spin_unlock (& space_info -> lock );
1853
1853
up_write (& space_info -> groups_sem );
1854
1854
goto next ;
1855
1855
}
@@ -1868,6 +1868,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
1868
1868
if (!btrfs_test_opt (fs_info , DISCARD_ASYNC ))
1869
1869
btrfs_mark_bg_unused (bg );
1870
1870
spin_unlock (& bg -> lock );
1871
+ spin_unlock (& space_info -> lock );
1871
1872
up_write (& space_info -> groups_sem );
1872
1873
goto next ;
1873
1874
@@ -1884,10 +1885,12 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
1884
1885
*/
1885
1886
if (!should_reclaim_block_group (bg , bg -> length )) {
1886
1887
spin_unlock (& bg -> lock );
1888
+ spin_unlock (& space_info -> lock );
1887
1889
up_write (& space_info -> groups_sem );
1888
1890
goto next ;
1889
1891
}
1890
1892
spin_unlock (& bg -> lock );
1893
+ spin_unlock (& space_info -> lock );
1891
1894
1892
1895
/*
1893
1896
* Get out fast, in case we're read-only or unmounting the
@@ -1920,15 +1923,26 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
1920
1923
div64_u64 (bg -> used * 100 , bg -> length ),
1921
1924
div64_u64 (zone_unusable * 100 , bg -> length ));
1922
1925
trace_btrfs_reclaim_block_group (bg );
1926
+ reclaimed = bg -> used ;
1923
1927
ret = btrfs_relocate_chunk (fs_info , bg -> start );
1924
1928
if (ret ) {
1925
1929
btrfs_dec_block_group_ro (bg );
1926
1930
btrfs_err (fs_info , "error relocating chunk %llu" ,
1927
1931
bg -> start );
1932
+ spin_lock (& space_info -> lock );
1933
+ space_info -> reclaim_count ++ ;
1934
+ if (READ_ONCE (space_info -> periodic_reclaim ))
1935
+ space_info -> periodic_reclaim_ready = false;
1936
+ spin_unlock (& space_info -> lock );
1937
+ } else {
1938
+ spin_lock (& space_info -> lock );
1939
+ space_info -> reclaim_count ++ ;
1940
+ space_info -> reclaim_bytes += reclaimed ;
1941
+ spin_unlock (& space_info -> lock );
1928
1942
}
1929
1943
1930
1944
next :
1931
- if (ret )
1945
+ if (ret && ! READ_ONCE ( space_info -> periodic_reclaim ) )
1932
1946
btrfs_mark_bg_to_reclaim (bg );
1933
1947
btrfs_put_block_group (bg );
1934
1948
@@ -1955,6 +1969,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
1955
1969
1956
1970
void btrfs_reclaim_bgs (struct btrfs_fs_info * fs_info )
1957
1971
{
1972
+ btrfs_reclaim_sweep (fs_info );
1958
1973
spin_lock (& fs_info -> unused_bgs_lock );
1959
1974
if (!list_empty (& fs_info -> reclaim_bgs ))
1960
1975
queue_work (system_unbound_wq , & fs_info -> reclaim_bgs_work );
@@ -3653,9 +3668,12 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
3653
3668
old_val += num_bytes ;
3654
3669
cache -> used = old_val ;
3655
3670
cache -> reserved -= num_bytes ;
3671
+ cache -> reclaim_mark = 0 ;
3656
3672
space_info -> bytes_reserved -= num_bytes ;
3657
3673
space_info -> bytes_used += num_bytes ;
3658
3674
space_info -> disk_used += num_bytes * factor ;
3675
+ if (READ_ONCE (space_info -> periodic_reclaim ))
3676
+ btrfs_space_info_update_reclaimable (space_info , - num_bytes );
3659
3677
spin_unlock (& cache -> lock );
3660
3678
spin_unlock (& space_info -> lock );
3661
3679
} else {
@@ -3665,8 +3683,10 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
3665
3683
btrfs_space_info_update_bytes_pinned (info , space_info , num_bytes );
3666
3684
space_info -> bytes_used -= num_bytes ;
3667
3685
space_info -> disk_used -= num_bytes * factor ;
3668
-
3669
- reclaim = should_reclaim_block_group (cache , num_bytes );
3686
+ if (READ_ONCE (space_info -> periodic_reclaim ))
3687
+ btrfs_space_info_update_reclaimable (space_info , num_bytes );
3688
+ else
3689
+ reclaim = should_reclaim_block_group (cache , num_bytes );
3670
3690
3671
3691
spin_unlock (& cache -> lock );
3672
3692
spin_unlock (& space_info -> lock );
0 commit comments