From d63218cbfced147a9171e646c132fdeeff9d0a2f Mon Sep 17 00:00:00 2001 From: psndna88 Date: Sun, 9 Jun 2024 02:07:46 +0530 Subject: [PATCH] Implement YAKT v17 tweaks in kernel sources credits to: @NotZeetaa RedHat tytydraco KTweak kdrag0n Arter97 @darkhz --- kernel/sched/autogroup.c | 2 +- kernel/sched/cpufreq_schedutil.c | 2 +- kernel/sched/fair.c | 8 ++++---- kernel/sysctl.c | 2 +- mm/page_alloc.c | 2 +- mm/vmscan.c | 4 ++-- mm/vmstat.c | 2 +- net/ipv4/tcp_ipv4.c | 2 +- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index 2067080bb2358..4f439cd95eece 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -5,7 +5,7 @@ #include #include "sched.h" -unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; +unsigned int __read_mostly sysctl_sched_autogroup_enabled = 0; static struct autogroup autogroup_default; static atomic_t autogroup_seq_nr; diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 0ebd1c824ca93..1f93345716548 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -1235,7 +1235,7 @@ static int sugov_init(struct cpufreq_policy *policy) tunables->up_rate_limit_us = cpufreq_policy_transition_delay_us(policy); tunables->down_rate_limit_us = cpufreq_policy_transition_delay_us(policy); - tunables->up_rate_limit_us = 500; + tunables->up_rate_limit_us = 10000; tunables->down_rate_limit_us = 20000; tunables->hispeed_load = DEFAULT_HISPEED_LOAD; tunables->hispeed_freq = 0; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index cff2baafe4892..16e6431a66987 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -77,7 +77,7 @@ static unsigned int sched_nr_latency = 8; * After fork, child runs first. If set to 0 (default) then * parent will (try to) run first. */ -unsigned int sysctl_sched_child_runs_first __read_mostly = 0; +unsigned int sysctl_sched_child_runs_first __read_mostly = 1; /* * SCHED_OTHER wake-up granularity. @@ -88,10 +88,10 @@ unsigned int sysctl_sched_child_runs_first __read_mostly = 0; * * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) */ -unsigned int sysctl_sched_wakeup_granularity = 3000000UL; -static unsigned int normalized_sysctl_sched_wakeup_granularity = 3000000UL; +unsigned int sysctl_sched_wakeup_granularity = 1500000UL; +static unsigned int normalized_sysctl_sched_wakeup_granularity = 1500000UL; -const_debug unsigned int sysctl_sched_migration_cost = 500000UL; +const_debug unsigned int sysctl_sched_migration_cost = 50000UL; DEFINE_PER_CPU_READ_MOSTLY(int, sched_load_boost); /* diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 3637c7f8578ea..1570c582f17f6 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -363,7 +363,7 @@ static struct ctl_table sysctl_base_table[] = { { } }; -static int min_sched_granularity_ns = 100000; /* 100 usecs */ +static int min_sched_granularity_ns = 1000000; /* 100 usecs */ static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ static int min_wakeup_granularity_ns; /* 0 usecs */ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ba2d2bc9e09c7..e761f1718143a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -337,7 +337,7 @@ int user_min_free_kbytes = -1; */ int watermark_boost_factor __read_mostly; #else -int watermark_boost_factor __read_mostly = 0; +int watermark_boost_factor __read_mostly = 15000; #endif int watermark_scale_factor = 10; diff --git a/mm/vmscan.c b/mm/vmscan.c index 93f95c33821cc..cb850f5d5cef0 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -196,7 +196,7 @@ int kswapd_threads_current = DEF_KSWAPD_THREADS_PER_NODE; /* * From 0 .. 200. Higher means more swappy. */ -int vm_swappiness = 100; +int vm_swappiness = 60; /* * The total number of pages which are beyond the high watermark within all * zones. @@ -4100,7 +4100,7 @@ static bool age_lruvec(struct lruvec *lruvec, struct scan_control *sc, unsigned } /* to protect the working set of the last N jiffies */ -static unsigned long lru_gen_min_ttl __read_mostly = 1250; //5000ms @ CONFIG_HZ=250 +static unsigned long lru_gen_min_ttl __read_mostly = 5000; static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) { diff --git a/mm/vmstat.c b/mm/vmstat.c index 7b86d679d4367..d963b51d1cc66 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1757,7 +1757,7 @@ static const struct seq_operations vmstat_op = { #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct delayed_work, vmstat_work); /*xiaomi modify to 10s */ -int sysctl_stat_interval __read_mostly = 10*HZ; +int sysctl_stat_interval __read_mostly = 30*HZ; #ifdef CONFIG_PROC_FS static void refresh_vm_stats(struct work_struct *work) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a29810408e374..8f15103770812 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2726,7 +2726,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128); net->ipv4.sysctl_tcp_sack = 1; net->ipv4.sysctl_tcp_window_scaling = 1; - net->ipv4.sysctl_tcp_timestamps = 1; + net->ipv4.sysctl_tcp_timestamps = 0; net->ipv4.sysctl_tcp_early_retrans = 3; net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION; net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */