initial import

git-svn-id: https://irqbalance.googlecode.com/svn/trunk@2 46b42954-3823-0410-bd82-eb80b452c9b5
This commit is contained in:
arjanvandeven 2006-12-09 15:59:16 +00:00
parent f34ec171d9
commit 95f9881ff8
19 changed files with 3689 additions and 0 deletions

17
Makefile Normal file
View file

@ -0,0 +1,17 @@
CFLAGS+=-g -Os -D_FORTIFY_SOURCE=2 -Wall -W `pkg-config --cflags glib-2.0`
all: irqbalance
LIBS=bitmap.o irqbalance.o cputree.o procinterrupts.o irqlist.o placement.o activate.o network.o powermode.o numa.o classify.o
irqbalance: .depend $(LIBS)
gcc -g -O2 -D_FORTIFY_SOURCE=2 -Wall `pkg-config --libs glib-2.0` $(LIBS) -o irqbalance
clean:
rm -f irqbalance *~ *.o .depend
# rule for building dependency lists, and writing them to a file
# named ".depend".
.depend:
rm -f .depend
gccmakedep -f- -- $(CFLAGS) -- *.c > .depend

57
activate.c Normal file
View file

@ -0,0 +1,57 @@
/*
* Copyright (C) 2006, Intel Corporation
*
* This file is part of irqbalance
*
* This program file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program in a file named COPYING; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301 USA
*/
/*
* This file contains the code to communicate a selected distribution / mapping
* of interrupts to the kernel.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdint.h>
#include "irqbalance.h"
void activate_mapping(void)
{
struct interrupt *irq;
GList *iter;
iter = g_list_first(interrupts);
while (iter) {
irq = iter->data;
iter = g_list_next(iter);
if (!cpus_equal(irq->mask, irq->old_mask)) {
char buf[PATH_MAX];
FILE *file;
sprintf(buf, "/proc/irq/%i/smp_affinity", irq->number);
file = fopen(buf, "w");
if (!file)
continue;
cpumask_scnprintf(buf, PATH_MAX, irq->mask);
fprintf(file,"%s", buf);
fclose(file);
irq->old_mask = irq->mask;
}
}
}

366
bitmap.c Normal file
View file

@ -0,0 +1,366 @@
/*
This file is taken from the Linux kernel and minimally adapted for use in userspace
*/
/*
* lib/bitmap.c
* Helper functions for bitmap.h.
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <unistd.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include "bitmap.h"
#include "non-atomic.h"
/*
* bitmaps provide an array of bits, implemented using an an
* array of unsigned longs. The number of valid bits in a
* given bitmap does _not_ need to be an exact multiple of
* BITS_PER_LONG.
*
* The possible unused bits in the last, partially used word
* of a bitmap are 'don't care'. The implementation makes
* no particular effort to keep them zero. It ensures that
* their value will not affect the results of any operation.
* The bitmap operations that return Boolean (bitmap_empty,
* for example) or scalar (bitmap_weight, for example) results
* carefully filter out these unused bits from impacting their
* results.
*
* These operations actually hold to a slightly stronger rule:
* if you don't input any bitmaps to these ops that have some
* unused bits set, then they won't output any set unused bits
* in output bitmaps.
*
* The byte ordering of bitmaps is more natural on little
* endian architectures. See the big-endian headers
* include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
* for the best explanations of this ordering.
*/
int __bitmap_empty(const unsigned long *bitmap, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap[k])
return 0;
if (bits % BITS_PER_LONG)
if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
int __bitmap_full(const unsigned long *bitmap, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (~bitmap[k])
return 0;
if (bits % BITS_PER_LONG)
if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] != bitmap2[k])
return 0;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
dst[k] = ~src[k];
if (bits % BITS_PER_LONG)
dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits);
}
/*
* __bitmap_shift_right - logical right shift of the bits in a bitmap
* @dst - destination bitmap
* @src - source bitmap
* @nbits - shift by this many bits
* @bits - bitmap size, in bits
*
* Shifting right (dividing) means moving bits in the MS -> LS bit
* direction. Zeros are fed into the vacated MS positions and the
* LS bits shifted off the bottom are lost.
*/
void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits)
{
int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
unsigned long mask = (1UL << left) - 1;
for (k = 0; off + k < lim; ++k) {
unsigned long upper, lower;
/*
* If shift is not word aligned, take lower rem bits of
* word above and make them the top rem bits of result.
*/
if (!rem || off + k + 1 >= lim)
upper = 0;
else {
upper = src[off + k + 1];
if (off + k + 1 == lim - 1 && left)
upper &= mask;
}
lower = src[off + k];
if (left && off + k == lim - 1)
lower &= mask;
dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
if (left && k == lim - 1)
dst[k] &= mask;
}
if (off)
memset(&dst[lim - off], 0, off*sizeof(unsigned long));
}
/*
* __bitmap_shift_left - logical left shift of the bits in a bitmap
* @dst - destination bitmap
* @src - source bitmap
* @nbits - shift by this many bits
* @bits - bitmap size, in bits
*
* Shifting left (multiplying) means moving bits in the LS -> MS
* direction. Zeros are fed into the vacated LS bit positions
* and those MS bits shifted off the top are lost.
*/
void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits)
{
int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
for (k = lim - off - 1; k >= 0; --k) {
unsigned long upper, lower;
/*
* If shift is not word aligned, take upper rem bits of
* word below and make them the bottom rem bits of result.
*/
if (rem && k > 0)
lower = src[k - 1];
else
lower = 0;
upper = src[k];
if (left && k == lim - 1)
upper &= (1UL << left) - 1;
dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
if (left && k + off == lim - 1)
dst[k + off] &= (1UL << left) - 1;
}
if (off)
memset(dst, 0, off*sizeof(unsigned long));
}
void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] & bitmap2[k];
}
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] | bitmap2[k];
}
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] ^ bitmap2[k];
}
void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] & ~bitmap2[k];
}
int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & bitmap2[k])
return 1;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return 1;
return 0;
}
/*
* Bitmap printing & parsing functions: first version by Bill Irwin,
* second version by Paul Jackson, third by Joe Korty.
*/
#define CHUNKSZ 32
#define nbits_to_hold_value(val) fls(val)
#define unhex(c) (isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10))
#define BASEDEC 10 /* fancier cpuset lists input in decimal */
/**
* bitmap_scnprintf - convert bitmap to an ASCII hex string.
* @buf: byte buffer into which string is placed
* @buflen: reserved size of @buf, in bytes
* @maskp: pointer to bitmap to convert
* @nmaskbits: size of bitmap, in bits
*
* Exactly @nmaskbits bits are displayed. Hex digits are grouped into
* comma-separated sets of eight digits per set.
*/
int bitmap_scnprintf(char *buf, unsigned int buflen,
const unsigned long *maskp, int nmaskbits)
{
int i, word, bit, len = 0;
unsigned long val;
const char *sep = "";
int chunksz;
uint32_t chunkmask;
int first = 1;
chunksz = nmaskbits & (CHUNKSZ - 1);
if (chunksz == 0)
chunksz = CHUNKSZ;
i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ;
for (; i >= 0; i -= CHUNKSZ) {
chunkmask = ((1ULL << chunksz) - 1);
word = i / BITS_PER_LONG;
bit = i % BITS_PER_LONG;
val = (maskp[word] >> bit) & chunkmask;
if (val!=0 || !first) {
len += snprintf(buf+len, buflen-len, "%s%0*lx", sep,
(chunksz+3)/4, val);
chunksz = CHUNKSZ;
sep = ",";
first = 0;
}
}
return len;
}
/**
* __bitmap_parse - convert an ASCII hex string into a bitmap.
* @buf: pointer to buffer containing string.
* @buflen: buffer size in bytes. If string is smaller than this
* then it must be terminated with a \0.
* @is_user: location of buffer, 0 indicates kernel space
* @maskp: pointer to bitmap array that will contain result.
* @nmaskbits: size of bitmap, in bits.
*
* Commas group hex digits into chunks. Each chunk defines exactly 32
* bits of the resultant bitmask. No chunk may specify a value larger
* than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
* then leading 0-bits are prepended. %-EINVAL is returned for illegal
* characters and for grouping errors such as "1,,5", ",44", "," and "".
* Leading and trailing whitespace accepted, but not embedded whitespace.
*/
int __bitmap_parse(const char *buf, unsigned int buflen,
int is_user __attribute((unused)), unsigned long *maskp,
int nmaskbits)
{
int c, old_c, totaldigits, ndigits, nchunks, nbits;
uint32_t chunk;
bitmap_zero(maskp, nmaskbits);
nchunks = nbits = totaldigits = c = 0;
do {
chunk = ndigits = 0;
/* Get the next chunk of the bitmap */
while (buflen) {
old_c = c;
c = *buf++;
buflen--;
if (isspace(c))
continue;
/*
* If the last character was a space and the current
* character isn't '\0', we've got embedded whitespace.
* This is a no-no, so throw an error.
*/
if (totaldigits && c && isspace(old_c))
return 0;
/* A '\0' or a ',' signal the end of the chunk */
if (c == '\0' || c == ',')
break;
if (!isxdigit(c))
return -EINVAL;
/*
* Make sure there are at least 4 free bits in 'chunk'.
* If not, this hexdigit will overflow 'chunk', so
* throw an error.
*/
if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1))
return -EOVERFLOW;
chunk = (chunk << 4) | unhex(c);
ndigits++; totaldigits++;
}
if (ndigits == 0)
return -EINVAL;
if (nchunks == 0 && chunk == 0)
continue;
__bitmap_shift_left(maskp, maskp, CHUNKSZ, nmaskbits);
*maskp |= chunk;
nchunks++;
nbits += (nchunks == 1) ? nbits_to_hold_value(chunk) : CHUNKSZ;
if (nbits > nmaskbits)
return -EOVERFLOW;
} while (buflen && c == ',');
return 0;
}

356
bitmap.h Normal file
View file

@ -0,0 +1,356 @@
#ifndef __LINUX_BITMAP_H
#define __LINUX_BITMAP_H
#ifndef __ASSEMBLY__
#include <string.h>
#include <stdint.h>
#include <unistd.h>
#define BITS_PER_LONG ((int)sizeof(unsigned long)*8)
#define BITS_TO_LONGS(bits) \
(((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
#define ALIGN(x,a) (((x)+(a)-1UL)&~((a)-1UL))
#include "non-atomic.h"
static inline unsigned int hweight32(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x55555555);
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
res = (res + (res >> 4)) & 0x0F0F0F0F;
res = res + (res >> 8);
return (res + (res >> 16)) & 0x000000FF;
}
static inline unsigned long hweight64(uint64_t w)
{
if (BITS_PER_LONG == 32)
return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
w -= (w >> 1) & 0x5555555555555555ull;
w = (w & 0x3333333333333333ull) + ((w >> 2) & 0x3333333333333333ull);
w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0full;
return (w * 0x0101010101010101ull) >> 56;
}
static inline int fls(int x)
{
int r = 32;
if (!x)
return 0;
if (!(x & 0xffff0000u)) {
x <<= 16;
r -= 16;
}
if (!(x & 0xff000000u)) {
x <<= 8;
r -= 8;
}
if (!(x & 0xf0000000u)) {
x <<= 4;
r -= 4;
}
if (!(x & 0xc0000000u)) {
x <<= 2;
r -= 2;
}
if (!(x & 0x80000000u)) {
x <<= 1;
r -= 1;
}
return r;
}
static inline unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}
#define min(x,y) ({ \
typeof(x) _x = (x); \
typeof(y) _y = (y); \
(void) (&_x == &_y); \
_x < _y ? _x : _y; })
/*
* bitmaps provide bit arrays that consume one or more unsigned
* longs. The bitmap interface and available operations are listed
* here, in bitmap.h
*
* Function implementations generic to all architectures are in
* lib/bitmap.c. Functions implementations that are architecture
* specific are in various include/asm-<arch>/bitops.h headers
* and other arch/<arch> specific files.
*
* See lib/bitmap.c for more details.
*/
/*
* The available bitmap operations and their rough meaning in the
* case that the bitmap is a single unsigned long are thus:
*
* Note that nbits should be always a compile time evaluable constant.
* Otherwise many inlines will generate horrible code.
*
* bitmap_zero(dst, nbits) *dst = 0UL
* bitmap_fill(dst, nbits) *dst = ~0UL
* bitmap_copy(dst, src, nbits) *dst = *src
* bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
* bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
* bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
* bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
* bitmap_complement(dst, src, nbits) *dst = ~(*src)
* bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
* bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
* bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
* bitmap_empty(src, nbits) Are all bits zero in *src?
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_weight(src, nbits) Hamming Weight: number set bits
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
* bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf
* bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
* bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf
* bitmap_parselist(buf, dst, nbits) Parse bitmap dst from list
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
*/
/*
* Also the following operations in asm/bitops.h apply to bitmaps.
*
* set_bit(bit, addr) *addr |= bit
* clear_bit(bit, addr) *addr &= ~bit
* change_bit(bit, addr) *addr ^= bit
* test_bit(bit, addr) Is bit set in *addr?
* test_and_set_bit(bit, addr) Set bit and return old value
* test_and_clear_bit(bit, addr) Clear bit and return old value
* test_and_change_bit(bit, addr) Change bit and return old value
* find_first_zero_bit(addr, nbits) Position first zero bit in *addr
* find_first_bit(addr, nbits) Position first set bit in *addr
* find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
* find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
*/
/*
* The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used
* to declare an array named 'name' of just enough unsigned longs to
* contain all bit positions from 0 to 'bits' - 1.
*/
/*
* lib/bitmap.c provides these functions:
*/
extern int __bitmap_empty(const unsigned long *bitmap, int bits);
extern int __bitmap_full(const unsigned long *bitmap, int bits);
extern int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
int bits);
extern void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_weight(const unsigned long *bitmap, int bits);
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
unsigned long *dst, int nbits);
extern int bitmap_scnlistprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int bitmap_parselist(const char *buf, unsigned long *maskp,
int nmaskbits);
extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, int bits);
extern int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
#define BITMAP_LAST_WORD_MASK(nbits) \
( \
((nbits) % BITS_PER_LONG) ? \
(1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
)
static inline void bitmap_zero(unsigned long *dst, int nbits)
{
if (nbits <= BITS_PER_LONG)
*dst = 0UL;
else {
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
}
}
static inline void bitmap_fill(unsigned long *dst, int nbits)
{
size_t nlongs = BITS_TO_LONGS(nbits);
if (nlongs > 1) {
int len = (nlongs - 1) * sizeof(unsigned long);
memset(dst, 0xff, len);
}
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
}
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
int nbits)
{
if (nbits <= BITS_PER_LONG)
*dst = *src;
else {
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memcpy(dst, src, len);
}
}
static inline void bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (nbits <= BITS_PER_LONG)
*dst = *src1 & *src2;
else
__bitmap_and(dst, src1, src2, nbits);
}
static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (nbits <= BITS_PER_LONG)
*dst = *src1 | *src2;
else
__bitmap_or(dst, src1, src2, nbits);
}
static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (nbits <= BITS_PER_LONG)
*dst = *src1 ^ *src2;
else
__bitmap_xor(dst, src1, src2, nbits);
}
static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (nbits <= BITS_PER_LONG)
*dst = *src1 & ~(*src2);
else
__bitmap_andnot(dst, src1, src2, nbits);
}
static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
int nbits)
{
if (nbits <= BITS_PER_LONG)
*dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits);
else
__bitmap_complement(dst, src, nbits);
}
static inline int bitmap_equal(const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (nbits <= BITS_PER_LONG)
return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_equal(src1, src2, nbits);
}
static inline int bitmap_intersects(const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (nbits <= BITS_PER_LONG)
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
else
return __bitmap_intersects(src1, src2, nbits);
}
static inline int bitmap_subset(const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (nbits <= BITS_PER_LONG)
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_subset(src1, src2, nbits);
}
static inline int bitmap_empty(const unsigned long *src, int nbits)
{
if (nbits <= BITS_PER_LONG)
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_empty(src, nbits);
}
static inline int bitmap_full(const unsigned long *src, int nbits)
{
if (nbits <= BITS_PER_LONG)
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_full(src, nbits);
}
static inline int bitmap_weight(const unsigned long *src, int nbits)
{
if (nbits <= BITS_PER_LONG)
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
return __bitmap_weight(src, nbits);
}
static inline void bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int n, int nbits)
{
if (nbits <= BITS_PER_LONG)
*dst = *src >> n;
else
__bitmap_shift_right(dst, src, n, nbits);
}
static inline void bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int n, int nbits)
{
if (nbits <= BITS_PER_LONG)
*dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits);
else
__bitmap_shift_left(dst, src, n, nbits);
}
static inline int bitmap_parse(const char *buf, unsigned int buflen,
unsigned long *maskp, int nmaskbits)
{
return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
}
#endif /* __ASSEMBLY__ */
#endif /* __LINUX_BITMAP_H */

126
classify.c Normal file
View file

@ -0,0 +1,126 @@
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include "irqbalance.h"
#include "types.h"
char *classes[] = {
"other",
"legacy",
"storage",
"timer",
"ethernet",
"fasteth",
0
};
int map_class_to_level[7] =
{ BALANCE_PACKAGE, BALANCE_CACHE, BALANCE_CACHE, BALANCE_NONE, BALANCE_CORE, BALANCE_CORE };
int class_counts[7];
/*
NOTE NOTE although that this file has a hard-coded list of modules, something missing is not
a big deal; the types are also set based on PCI class information when available.
*/
/*
Based on the original irqbalance code which is:
Copyright (C) 2003 Red Hat, Inc. All rights reserved.
Usage and distribution of this file are subject to the Gnu General Public License Version 2
that can be found at http://www.gnu.org/licenses/gpl.txt and the COPYING file as
distributed together with this file is included herein by reference.
Author: Arjan van de Ven <arjanv@redhat.com>
*/
static char *legacy_modules[] = {
"PS/2",
"serial",
"i8042",
"acpi",
"floppy",
"parport",
"keyboard",
"usb-ohci",
"usb-uhci",
"uhci_hcd",
"ohci_hcd",
"ehci_hcd",
"EMU10K1",
0
};
static char *timer_modules[] = {
"rtc",
"timer",
0
};
static char *storage_modules[] = {
"aic7xxx",
"aic79xx",
"ide",
"cciss",
"cpqarray",
"qla2",
"megaraid",
"fusion",
"libata",
"ohci1394",
"sym53c8xx",
0
};
static char *ethernet_modules[] = {
"eth",
"e100",
"eepro100",
"orinico_cs",
"wvlan_cs",
"3c5",
"HiSax",
0
};
int find_class(struct interrupt *irq, char *moduletext)
{
int guess = IRQ_OTHER;
int i;
if (moduletext == NULL)
return guess;
for (i=0; legacy_modules[i]; i++)
if (strstr(moduletext, legacy_modules[i]))
guess = IRQ_LEGACY;
for (i=0; storage_modules[i]; i++)
if (strstr(moduletext, storage_modules[i]))
guess = IRQ_SCSI;
for (i=0; timer_modules[i]; i++)
if (strstr(moduletext, timer_modules[i]))
guess = IRQ_TIMER;
for (i=0; ethernet_modules[i]; i++)
if (strstr(moduletext, ethernet_modules[i]))
guess = IRQ_ETH;
if (guess == IRQ_OTHER && irq->number==0)
guess = IRQ_TIMER;
if (guess > irq->class)
return guess;
return irq->class;
}

30
constants.h Normal file
View file

@ -0,0 +1,30 @@
#ifndef __INCLUDE_GUARD_CONSTANTS_H
#define __INCLUDE_GUARD_CONSTANTS_H
/* interval between rebalance attempts in seconds */
#define SLEEP_INTERVAL 10
/* NUMA topology refresh intervals, in units of SLEEP_INTERVAL */
#define NUMA_REFRESH_INTERVAL 32
/* NIC interrupt refresh interval, in units of SLEEP_INTERVAL */
#define NIC_REFRESH_INTERVAL 32
/* minimum number of interrupts since boot for an interrupt to matter */
#define MIN_IRQ_COUNT 20
/* balancing tunings */
#define CROSS_PACKAGE_PENALTY 3000
#define NUMA_PENALTY 250
#define POWER_MODE_PACKAGE_THRESHOLD 10000
#define CLASS_VIOLATION_PENTALTY 6000
#define CORE_SPECIFIC_THRESHOLD 5000
/* power mode */
#define POWER_MODE_SOFTIRQ_THRESHOLD 20
#define POWER_MODE_HYSTERESIS 3
#endif

400
cpumask.h Normal file
View file

@ -0,0 +1,400 @@
#ifndef __LINUX_CPUMASK_H
#define __LINUX_CPUMASK_H
#define NR_CPUS 256
/*
* Cpumasks provide a bitmap suitable for representing the
* set of CPU's in a system, one bit position per CPU number.
*
* See detailed comments in the file linux/bitmap.h describing the
* data type on which these cpumasks are based.
*
* For details of cpumask_scnprintf() and cpumask_parse_user(),
* see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c.
* For details of cpulist_scnprintf() and cpulist_parse(), see
* bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
* For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
* For details of cpus_remap(), see bitmap_remap in lib/bitmap.c.
*
* The available cpumask operations are:
*
* void cpu_set(cpu, mask) turn on bit 'cpu' in mask
* void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
* void cpus_setall(mask) set all bits
* void cpus_clear(mask) clear all bits
* int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask
* int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask
*
* void cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
* void cpus_or(dst, src1, src2) dst = src1 | src2 [union]
* void cpus_xor(dst, src1, src2) dst = src1 ^ src2
* void cpus_andnot(dst, src1, src2) dst = src1 & ~src2
* void cpus_complement(dst, src) dst = ~src
*
* int cpus_equal(mask1, mask2) Does mask1 == mask2?
* int cpus_intersects(mask1, mask2) Do mask1 and mask2 intersect?
* int cpus_subset(mask1, mask2) Is mask1 a subset of mask2?
* int cpus_empty(mask) Is mask empty (no bits sets)?
* int cpus_full(mask) Is mask full (all bits sets)?
* int cpus_weight(mask) Hamming weigh - number of set bits
*
* void cpus_shift_right(dst, src, n) Shift right
* void cpus_shift_left(dst, src, n) Shift left
*
* int first_cpu(mask) Number lowest set bit, or NR_CPUS
* int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
*
* cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
* CPU_MASK_ALL Initializer - all bits set
* CPU_MASK_NONE Initializer - no bits set
* unsigned long *cpus_addr(mask) Array of unsigned long's in mask
*
* int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
* int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask
* int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
* int cpulist_parse(buf, map) Parse ascii string as cpulist
* int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
* int cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
*
* for_each_cpu_mask(cpu, mask) for-loop cpu over mask
*
* int num_online_cpus() Number of online CPUs
* int num_possible_cpus() Number of all possible CPUs
* int num_present_cpus() Number of present CPUs
*
* int cpu_online(cpu) Is some cpu online?
* int cpu_possible(cpu) Is some cpu possible?
* int cpu_present(cpu) Is some cpu present (can schedule)?
*
* int any_online_cpu(mask) First online cpu in mask
*
* for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map
* for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
* for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
*
* Subtlety:
* 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
* to generate slightly worse code. Note for example the additional
* 40 lines of assembly code compiling the "for each possible cpu"
* loops buried in the disk_stat_read() macros calls when compiling
* drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple
* one-line #define for cpu_isset(), instead of wrapping an inline
* inside a macro, the way we do the other calls.
*/
#include "bitmap.h"
typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
extern cpumask_t _unused_cpumask_arg_;
#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
{
set_bit(cpu, dstp->bits);
}
#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
{
clear_bit(cpu, dstp->bits);
}
#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
static inline void __cpus_setall(cpumask_t *dstp, int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
static inline void __cpus_clear(cpumask_t *dstp, int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
/* No static inline type checking - see Subtlety (1) above. */
#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define cpus_andnot(dst, src1, src2) \
__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
static inline void __cpus_complement(cpumask_t *dstp,
const cpumask_t *srcp, int nbits)
{
bitmap_complement(dstp->bits, srcp->bits, nbits);
}
#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
static inline int __cpus_equal(const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
}
#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
static inline int __cpus_intersects(const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
}
#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
static inline int __cpus_subset(const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
static inline int __cpus_full(const cpumask_t *srcp, int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
#define cpus_shift_right(dst, src, n) \
__cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
static inline void __cpus_shift_right(cpumask_t *dstp,
const cpumask_t *srcp, int n, int nbits)
{
bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
}
#define cpus_shift_left(dst, src, n) \
__cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
static inline void __cpus_shift_left(cpumask_t *dstp,
const cpumask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}
static inline int __first_cpu(const cpumask_t *srcp)
{
return ffs(*srcp->bits)-1;
}
#define first_cpu(src) __first_cpu(&(src))
int __next_cpu(int n, const cpumask_t *srcp);
#define next_cpu(n, src) __next_cpu((n), &(src))
#define cpumask_of_cpu(cpu) \
({ \
typeof(_unused_cpumask_arg_) m; \
if (sizeof(m) == sizeof(unsigned long)) { \
m.bits[0] = 1UL<<(cpu); \
} else { \
cpus_clear(m); \
cpu_set((cpu), m); \
} \
m; \
})
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
#if 0
#define CPU_MASK_ALL \
(cpumask_t) { { \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
} }
#else
#define CPU_MASK_ALL \
(cpumask_t) { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
} }
#endif
#define CPU_MASK_NONE \
(cpumask_t) { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
} }
#define CPU_MASK_CPU0 \
(cpumask_t) { { \
[0] = 1UL \
} }
#define cpus_addr(src) ((src).bits)
#define cpumask_scnprintf(buf, len, src) \
__cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
static inline int __cpumask_scnprintf(char *buf, int len,
const cpumask_t *srcp, int nbits)
{
return bitmap_scnprintf(buf, len, srcp->bits, nbits);
}
#define cpumask_parse_user(ubuf, ulen, dst) \
__cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS)
static inline int __cpumask_parse_user(const char *buf, int len,
cpumask_t *dstp, int nbits)
{
return bitmap_parse(buf, len, dstp->bits, nbits);
}
#define cpulist_scnprintf(buf, len, src) \
__cpulist_scnprintf((buf), (len), &(src), NR_CPUS)
static inline int __cpulist_scnprintf(char *buf, int len,
const cpumask_t *srcp, int nbits)
{
return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
}
#define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS)
static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits)
{
return bitmap_parselist(buf, dstp->bits, nbits);
}
#define cpu_remap(oldbit, old, new) \
__cpu_remap((oldbit), &(old), &(new), NR_CPUS)
static inline int __cpu_remap(int oldbit,
const cpumask_t *oldp, const cpumask_t *newp, int nbits)
{
return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
}
#define cpus_remap(dst, src, old, new) \
__cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS)
static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
const cpumask_t *oldp, const cpumask_t *newp, int nbits)
{
bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
}
#if NR_CPUS > 1
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = first_cpu(mask); \
(cpu) < NR_CPUS; \
(cpu) = next_cpu((cpu), (mask)))
#else /* NR_CPUS == 1 */
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#endif /* NR_CPUS */
/*
* The following particular system cpumasks and operations manage
* possible, present and online cpus. Each of them is a fixed size
* bitmap of size NR_CPUS.
*
* #ifdef CONFIG_HOTPLUG_CPU
* cpu_possible_map - has bit 'cpu' set iff cpu is populatable
* cpu_present_map - has bit 'cpu' set iff cpu is populated
* cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
* #else
* cpu_possible_map - has bit 'cpu' set iff cpu is populated
* cpu_present_map - copy of cpu_possible_map
* cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
* #endif
*
* In either case, NR_CPUS is fixed at compile time, as the static
* size of these bitmaps. The cpu_possible_map is fixed at boot
* time, as the set of CPU id's that it is possible might ever
* be plugged in at anytime during the life of that system boot.
* The cpu_present_map is dynamic(*), representing which CPUs
* are currently plugged in. And cpu_online_map is the dynamic
* subset of cpu_present_map, indicating those CPUs available
* for scheduling.
*
* If HOTPLUG is enabled, then cpu_possible_map is forced to have
* all NR_CPUS bits set, otherwise it is just the set of CPUs that
* ACPI reports present at boot.
*
* If HOTPLUG is enabled, then cpu_present_map varies dynamically,
* depending on what ACPI reports as currently plugged in, otherwise
* cpu_present_map is just a copy of cpu_possible_map.
*
* (*) Well, cpu_present_map is dynamic in the hotplug case. If not
* hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
*
* Subtleties:
* 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
* assumption that their single CPU is online. The UP
* cpu_{online,possible,present}_maps are placebos. Changing them
* will have no useful affect on the following num_*_cpus()
* and cpu_*() macros in the UP case. This ugliness is a UP
* optimization - don't waste any instructions or memory references
* asking if you're online or how many CPUs there are if there is
* only one CPU.
* 2) Most SMP arch's #define some of these maps to be some
* other map specific to that arch. Therefore, the following
* must be #define macros, not inlines. To see why, examine
* the assembly code produced by the following. Note that
* set1() writes phys_x_map, but set2() writes x_map:
* int x_map, phys_x_map;
* #define set1(a) x_map = a
* inline void set2(int a) { x_map = a; }
* #define x_map phys_x_map
* main(){ set1(3); set2(5); }
*/
extern cpumask_t cpu_possible_map;
extern cpumask_t cpu_online_map;
extern cpumask_t cpu_present_map;
#if NR_CPUS > 1
#define num_online_cpus() cpus_weight(cpu_online_map)
#define num_possible_cpus() cpus_weight(cpu_possible_map)
#define num_present_cpus() cpus_weight(cpu_present_map)
#define cpu_online(cpu) cpu_isset((cpu), cpu_online_map)
#define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map)
#define cpu_present(cpu) cpu_isset((cpu), cpu_present_map)
#else
#define num_online_cpus() 1
#define num_possible_cpus() 1
#define num_present_cpus() 1
#define cpu_online(cpu) ((cpu) == 0)
#define cpu_possible(cpu) ((cpu) == 0)
#define cpu_present(cpu) ((cpu) == 0)
#endif
int highest_possible_processor_id(void);
#define any_online_cpu(mask) __any_online_cpu(&(mask))
int __any_online_cpu(const cpumask_t *mask);
#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map)
#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
#endif /* __LINUX_CPUMASK_H */

371
cputree.c Normal file
View file

@ -0,0 +1,371 @@
/*
* Copyright (C) 2006, Intel Corporation
*
* This file is part of irqbalance
*
* This program file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program in a file named COPYING; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301 USA
*/
/*
* This file contains the code to construct and manipulate a hierarchy of processors,
* cache domains and processor cores.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <dirent.h>
#include <glib.h>
#include "irqbalance.h"
GList *cpus;
GList *cache_domains;
GList *packages;
int package_count;
int cache_domain_count;
int core_count;
/* Users want to be able to keep interrupts away from some cpus; store these in a cpumask_t */
cpumask_t banned_cpus;
/*
it's convenient to have the complement of banned_cpus available so that
the AND operator can be used to mask out unwanted cpus
*/
static cpumask_t unbanned_cpus;
static void fill_packages(void)
{
GList *entry;
entry = g_list_first(cache_domains);
while (entry) {
struct package *package;
struct cache_domain *cache = NULL;
GList *entry2;
cache = entry->data;
entry2 = entry;
entry = g_list_next(entry);
if (cache->marker)
continue;
package = malloc(sizeof(struct package));
if (!package)
break;
memset(package, 0, sizeof(struct package));
package->mask = cache->package_mask;
package->number = cache->number;
while (entry2) {
struct cache_domain *cache2;
cache2 = entry2->data;
if (cpus_equal(cache->package_mask, cache2->package_mask)) {
cache2->marker = 1;
package->cache_domains = g_list_append(package->cache_domains, cache2);
if (package->number > cache2->number)