Consolidate for_each_* functions

Each topology object used to have its own for_each iterator.  Now that we have a
common topology object, we can merge all that code
This commit is contained in:
Neil Horman 2011-10-10 12:00:23 -04:00
parent f06001f62f
commit 58885160ee
7 changed files with 50 additions and 81 deletions

View file

@ -279,7 +279,7 @@ static void dump_cache_domain(struct topo_obj *d, void *data)
cpumask_scnprintf(buffer, 4095, d->mask);
printf(" Cache domain %i: numa_node is %d cpu mask is %s (load %lu) \n", d->number, cache_domain_numa_node(d)->number, buffer, (unsigned long)d->load);
if (d->children)
for_each_cpu_core(d->children, dump_topo_obj, NULL);
for_each_object(d->children, dump_topo_obj, NULL);
if (d->interrupts)
for_each_irq(d->interrupts, dump_irq, (void *)10);
}
@ -290,7 +290,7 @@ static void dump_package(struct topo_obj *d, void *data)
cpumask_scnprintf(buffer, 4096, d->mask);
printf("Package %i: numa_node is %d cpu mask is %s (load %lu)\n", d->number, package_numa_node(d)->number, buffer, (unsigned long)d->load);
if (d->children)
for_each_cache_domain(d->children, dump_cache_domain, buffer);
for_each_object(d->children, dump_cache_domain, buffer);
if (d->interrupts)
for_each_irq(d->interrupts, dump_irq, (void *)2);
}
@ -298,7 +298,7 @@ static void dump_package(struct topo_obj *d, void *data)
void dump_tree(void)
{
char buffer[4096];
for_each_package(NULL, dump_package, buffer);
for_each_object(packages, dump_package, buffer);
}
static void clear_cpu_stats(struct topo_obj *d, void *data __attribute__((unused)))
@ -310,19 +310,19 @@ static void clear_cpu_stats(struct topo_obj *d, void *data __attribute__((unused
static void clear_cd_stats(struct topo_obj *d, void *data __attribute__((unused)))
{
d->load = 0;
for_each_cpu_core(d->children, clear_cpu_stats, NULL);
for_each_object(d->children, clear_cpu_stats, NULL);
}
static void clear_package_stats(struct topo_obj *d, void *data __attribute__((unused)))
{
d->load = 0;
for_each_cache_domain(d->children, clear_cd_stats, NULL);
for_each_object(d->children, clear_cd_stats, NULL);
}
static void clear_node_stats(struct topo_obj *d, void *data __attribute__((unused)))
{
d->load = 0;
for_each_package(d->children, clear_package_stats, NULL);
for_each_object(d->children, clear_package_stats, NULL);
}
static void clear_irq_stats(struct irq_info *info, void *data __attribute__((unused)))
@ -337,7 +337,7 @@ static void clear_irq_stats(struct irq_info *info, void *data __attribute__((unu
*/
void clear_work_stats(void)
{
for_each_numa_node(NULL, clear_node_stats, NULL);
for_each_object(numa_nodes, clear_node_stats, NULL);
for_each_irq(NULL, clear_irq_stats, NULL);
}
@ -417,43 +417,6 @@ void clear_cpu_tree(void)
}
void for_each_package(GList *list, void (*cb)(struct topo_obj *p, void *data), void *data)
{
GList *entry = g_list_first(list ? list : packages);
GList *next;
while (entry) {
next = g_list_next(entry);
cb(entry->data, data);
entry = next;
}
}
void for_each_cache_domain(GList *list, void (*cb)(struct topo_obj *c, void *data), void *data)
{
GList *entry = g_list_first(list ? list : cache_domains);
GList *next;
while (entry) {
next = g_list_next(entry);
cb(entry->data, data);
entry = next;
}
}
void for_each_cpu_core(GList *list, void (*cb)(struct topo_obj *c, void *data), void *data)
{
GList *entry = g_list_first(list ? list : cpus);
GList *next;
while (entry) {
next = g_list_next(entry);
cb(entry->data, data);
entry = next;
}
}
static gint compare_cpus(gconstpointer a, gconstpointer b)
{
const struct topo_obj *ai = a;

View file

@ -135,7 +135,7 @@ static void free_object_tree()
static void dump_object_tree()
{
for_each_numa_node(NULL, dump_numa_node_info, NULL);
for_each_object(numa_nodes, dump_numa_node_info, NULL);
}
static void force_rebalance_irq(struct irq_info *info, void *data __attribute__((unused)))

View file

@ -40,6 +40,15 @@ void clear_cpu_tree(void);
void pci_numa_scan(void);
/*===================NEW BALANCER FUNCTIONS============================*/
/*
* Master topo_obj type lists
*/
extern GList *numa_nodes;
extern GList *packages;
extern GList *cache_domains;
extern GList *cpus;
enum hp_e {
HINT_POLICY_IGNORE,
HINT_POLICY_SUBSET,
@ -58,7 +67,6 @@ extern enum hp_e hint_policy;
extern void build_numa_node_list(void);
extern void free_numa_node_list(void);
extern void dump_numa_node_info(struct topo_obj *node, void *data);
extern void for_each_numa_node(GList *list, void (*cb)(struct topo_obj *node, void *data), void *data);
extern void add_package_to_node(struct topo_obj *p, int nodeid);
extern struct topo_obj *get_numa_node(int nodeid);
@ -66,14 +74,12 @@ extern struct topo_obj *get_numa_node(int nodeid);
* Package functions
*/
#define package_numa_node(p) ((p)->parent)
extern void for_each_package(GList *list, void (*cb)(struct topo_obj *p, void *data), void *data);
/*
* cache_domain functions
*/
#define cache_domain_package(c) ((c)->parent)
#define cache_domain_numa_node(c) (package_numa_node(cache_domain_package((c))))
extern void for_each_cache_domain(GList *list, void (*cb)(struct topo_obj *c, void *data), void *data);
/*
* cpu core functions
@ -81,7 +87,6 @@ extern void for_each_cache_domain(GList *list, void (*cb)(struct topo_obj *c, vo
#define cpu_cache_domain(cpu) ((cpu)->parent)
#define cpu_package(cpu) (cache_domain_package(cpu_cache_domain((cpu))))
#define cpu_numa_node(cpu) (package_numa_node(cache_domain_package(cpu_cache_domain((cpu)))))
extern void for_each_cpu_core(GList *list, void (*cb)(struct topo_obj *c, void *data), void *data);
extern struct topo_obj *find_cpu_core(int cpunr);
extern int get_cpu_count(void);
@ -94,8 +99,22 @@ extern void for_each_irq(GList *list, void (*cb)(struct irq_info *info, void *d
extern struct irq_info *get_irq_info(int irq);
extern void migrate_irq(GList **from, GList **to, struct irq_info *info);
extern struct irq_info *add_misc_irq(int irq);
#define irq_numa_node(irq) ((irq)->numa_node)
/*
* Generic object functions
*/
static inline void for_each_object(GList *list, void (*cb)(struct topo_obj *obj, void *data), void *data)
{
GList *entry, *next;
entry = g_list_first(list);
while (entry) {
next = g_list_next(entry);
cb(entry->data, data);
entry = next;
}
}
#endif

View file

@ -131,23 +131,23 @@ static void migrate_overloaded_irqs(struct topo_obj *obj, void *data)
#define find_overloaded_objs(name, info) do {\
int ___load_sources;\
memset(&(info), 0, sizeof(struct load_balance_info));\
for_each_##name(NULL, gather_load_stats, &(info));\
for_each_object((name), gather_load_stats, &(info));\
(info).avg_load = (info).total_load / (info).load_sources;\
for_each_##name(NULL, compute_deviations, &(info));\
for_each_object((name), compute_deviations, &(info));\
___load_sources = ((info).load_sources == 1) ? 1 : ((info).load_sources - 1);\
(info).std_deviation = (long double)((info).deviations / ___load_sources);\
(info).std_deviation = sqrt((info).std_deviation);\
for_each_##name(NULL, migrate_overloaded_irqs, &(info));\
for_each_object((name), migrate_overloaded_irqs, &(info));\
}while(0)
void update_migration_status(void)
{
struct load_balance_info info;
find_overloaded_objs(cpu_core, info);
find_overloaded_objs(cache_domain, info);
find_overloaded_objs(package, info);
find_overloaded_objs(numa_node, info);
find_overloaded_objs(cpus, info);
find_overloaded_objs(cache_domains, info);
find_overloaded_objs(packages, info);
find_overloaded_objs(numa_nodes, info);
}

13
numa.c
View file

@ -144,19 +144,6 @@ void dump_numa_node_info(struct topo_obj *d, void *unused __attribute__((unused)
printf("\n");
}
void for_each_numa_node(GList *list, void(*cb)(struct topo_obj *node, void *data), void *data)
{
GList *entry, *next;
entry = g_list_first(list ? list : numa_nodes);
while (entry) {
next = g_list_next(entry);
cb(entry->data, data);
entry = next;
}
}
struct topo_obj *get_numa_node(int nodeid)
{
struct topo_obj find;

View file

@ -89,7 +89,7 @@ static void place_irq_in_cache_domain(struct irq_info *info, void *data)
place.least_irqs = NULL;
place.best_cost = INT_MAX;
for_each_cache_domain(p->children, find_best_object, &place);
for_each_object(p->children, find_best_object, &place);
asign = place.least_irqs ? place.least_irqs : place.best;
@ -124,7 +124,7 @@ static void place_core(struct irq_info *info, void *data)
place.least_irqs = NULL;
place.best_cost = INT_MAX;
for_each_cpu_core(c->children, find_best_object, &place);
for_each_object(c->children, find_best_object, &place);
asign = place.least_irqs ? place.least_irqs : place.best;
@ -159,7 +159,7 @@ static void place_irq_in_package(struct irq_info *info, void *data)
place.least_irqs = NULL;
place.best_cost = INT_MAX;
for_each_package(n->children, find_best_object, &place);
for_each_object(n->children, find_best_object, &place);
asign = place.least_irqs ? place.least_irqs : place.best;
@ -200,7 +200,7 @@ static void place_irq_in_node(struct irq_info *info, void *data __attribute__((u
place.least_irqs = NULL;
place.info = info;
for_each_numa_node(NULL, find_best_object, &place);
for_each_object(numa_nodes, find_best_object, &place);
asign = place.least_irqs ? place.least_irqs : place.best;
@ -226,9 +226,9 @@ static void validate_object(struct topo_obj *d, void *data __attribute__((unused
static void validate_object_tree_placement()
{
for_each_package(NULL, validate_object, NULL);
for_each_cache_domain(NULL, validate_object, NULL);
for_each_cpu_core(NULL, validate_object, NULL);
for_each_object(packages, validate_object, NULL);
for_each_object(cache_domains, validate_object, NULL);
for_each_object(cpus, validate_object, NULL);
}
void calculate_placement(void)
@ -239,9 +239,9 @@ void calculate_placement(void)
sort_irq_list(&rebalance_irq_list);
for_each_irq(rebalance_irq_list, place_irq_in_node, NULL);
for_each_numa_node(NULL, place_packages, NULL);
for_each_package(NULL, place_cache_domain, NULL);
for_each_cache_domain(NULL, place_cores, NULL);
for_each_object(numa_nodes, place_packages, NULL);
for_each_object(packages, place_cache_domain, NULL);
for_each_object(cache_domains, place_cores, NULL);
if (debug_mode)
validate_object_tree_placement();

View file

@ -204,6 +204,6 @@ void parse_proc_stat()
* Now that we have load for each cpu attribute a fair share of the load
* to each irq on that cpu
*/
for_each_cpu_core(NULL, compute_irq_load_share, NULL);
for_each_object(cpus, compute_irq_load_share, NULL);
}