Skip to content

Commit

Permalink
Implement YAKT v17 tweaks in kernel sources
Browse files Browse the repository at this point in the history
credits to:
@NotZeetaa
RedHat
tytydraco
KTweak
kdrag0n
Arter97
@darkhz
  • Loading branch information
psndna88 committed Jun 8, 2024
1 parent 050545a commit d63218c
Show file tree
Hide file tree
Showing 8 changed files with 12 additions and 12 deletions.
2 changes: 1 addition & 1 deletion kernel/sched/autogroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#include <linux/nospec.h>
#include "sched.h"

unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
unsigned int __read_mostly sysctl_sched_autogroup_enabled = 0;
static struct autogroup autogroup_default;
static atomic_t autogroup_seq_nr;

Expand Down
2 changes: 1 addition & 1 deletion kernel/sched/cpufreq_schedutil.c
Original file line number Diff line number Diff line change
Expand Up @@ -1235,7 +1235,7 @@ static int sugov_init(struct cpufreq_policy *policy)

tunables->up_rate_limit_us = cpufreq_policy_transition_delay_us(policy);
tunables->down_rate_limit_us = cpufreq_policy_transition_delay_us(policy);
tunables->up_rate_limit_us = 500;
tunables->up_rate_limit_us = 10000;
tunables->down_rate_limit_us = 20000;
tunables->hispeed_load = DEFAULT_HISPEED_LOAD;
tunables->hispeed_freq = 0;
Expand Down
8 changes: 4 additions & 4 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ static unsigned int sched_nr_latency = 8;
* After fork, child runs first. If set to 0 (default) then
* parent will (try to) run first.
*/
unsigned int sysctl_sched_child_runs_first __read_mostly = 0;
unsigned int sysctl_sched_child_runs_first __read_mostly = 1;

/*
* SCHED_OTHER wake-up granularity.
Expand All @@ -88,10 +88,10 @@ unsigned int sysctl_sched_child_runs_first __read_mostly = 0;
*
* (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
unsigned int sysctl_sched_wakeup_granularity = 3000000UL;
static unsigned int normalized_sysctl_sched_wakeup_granularity = 3000000UL;
unsigned int sysctl_sched_wakeup_granularity = 1500000UL;
static unsigned int normalized_sysctl_sched_wakeup_granularity = 1500000UL;

const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
const_debug unsigned int sysctl_sched_migration_cost = 50000UL;
DEFINE_PER_CPU_READ_MOSTLY(int, sched_load_boost);

/*
Expand Down
2 changes: 1 addition & 1 deletion kernel/sysctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,7 @@ static struct ctl_table sysctl_base_table[] = {
{ }
};

static int min_sched_granularity_ns = 100000; /* 100 usecs */
static int min_sched_granularity_ns = 1000000; /* 100 usecs */
static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
static int min_wakeup_granularity_ns; /* 0 usecs */
static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
Expand Down
2 changes: 1 addition & 1 deletion mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ int user_min_free_kbytes = -1;
*/
int watermark_boost_factor __read_mostly;
#else
int watermark_boost_factor __read_mostly = 0;
int watermark_boost_factor __read_mostly = 15000;
#endif
int watermark_scale_factor = 10;

Expand Down
4 changes: 2 additions & 2 deletions mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ int kswapd_threads_current = DEF_KSWAPD_THREADS_PER_NODE;
/*
* From 0 .. 200. Higher means more swappy.
*/
int vm_swappiness = 100;
int vm_swappiness = 60;
/*
* The total number of pages which are beyond the high watermark within all
* zones.
Expand Down Expand Up @@ -4100,7 +4100,7 @@ static bool age_lruvec(struct lruvec *lruvec, struct scan_control *sc, unsigned
}

/* to protect the working set of the last N jiffies */
static unsigned long lru_gen_min_ttl __read_mostly = 1250; //5000ms @ CONFIG_HZ=250
static unsigned long lru_gen_min_ttl __read_mostly = 5000;

static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
{
Expand Down
2 changes: 1 addition & 1 deletion mm/vmstat.c
Original file line number Diff line number Diff line change
Expand Up @@ -1757,7 +1757,7 @@ static const struct seq_operations vmstat_op = {
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
/*xiaomi modify to 10s */
int sysctl_stat_interval __read_mostly = 10*HZ;
int sysctl_stat_interval __read_mostly = 30*HZ;

#ifdef CONFIG_PROC_FS
static void refresh_vm_stats(struct work_struct *work)
Expand Down
2 changes: 1 addition & 1 deletion net/ipv4/tcp_ipv4.c
Original file line number Diff line number Diff line change
Expand Up @@ -2726,7 +2726,7 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
net->ipv4.sysctl_tcp_sack = 1;
net->ipv4.sysctl_tcp_window_scaling = 1;
net->ipv4.sysctl_tcp_timestamps = 1;
net->ipv4.sysctl_tcp_timestamps = 0;
net->ipv4.sysctl_tcp_early_retrans = 3;
net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
Expand Down

0 comments on commit d63218c

Please sign in to comment.