staging: ramster: delete the driver
Turns out it's not quite ready to be included, thanks to some other work done in the zcache and zram code, which breaks this driver. So, delete it for now, per the recommendation of Dan. Acked-by: Dan Magenheimer <dan.magenheimer@oracle.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Родитель
a49aeb1de5
Коммит
c123daaad8
|
@ -136,6 +136,4 @@ source "drivers/staging/android/Kconfig"
|
|||
|
||||
source "drivers/staging/telephony/Kconfig"
|
||||
|
||||
source "drivers/staging/ramster/Kconfig"
|
||||
|
||||
endif # STAGING
|
||||
|
|
|
@ -57,5 +57,4 @@ obj-$(CONFIG_INTEL_MEI) += mei/
|
|||
obj-$(CONFIG_MFD_NVEC) += nvec/
|
||||
obj-$(CONFIG_DRM_OMAP) += omapdrm/
|
||||
obj-$(CONFIG_ANDROID) += android/
|
||||
obj-$(CONFIG_RAMSTER) += ramster/
|
||||
obj-$(CONFIG_PHONE) += telephony/
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
config RAMSTER
|
||||
tristate "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
|
||||
depends on (CLEANCACHE || FRONTSWAP) && CONFIGFS_FS && !OCFS2_FS && !ZCACHE && !HIGHMEM
|
||||
select XVMALLOC
|
||||
select LZO_COMPRESS
|
||||
select LZO_DECOMPRESS
|
||||
default n
|
||||
help
|
||||
RAMster allows RAM on other machines in a cluster to be utilized
|
||||
dynamically and symmetrically instead of swapping to a local swap
|
||||
disk, thus improving performance on memory-constrained workloads
|
||||
while minimizing total RAM across the cluster. RAMster, like
|
||||
zcache, compresses swap pages into local RAM, but then remotifies
|
||||
the compressed pages to another node in the RAMster cluster.
|
|
@ -1,2 +0,0 @@
|
|||
obj-$(CONFIG_RAMSTER) += zcache-main.o tmem.o
|
||||
obj-$(CONFIG_RAMSTER) += ramster_o2net.o cluster/
|
|
@ -1,9 +0,0 @@
|
|||
For this staging driver, RAMster duplicates code from fs/ocfs2/cluster
|
||||
and from drivers/staging/zcache, then incorporates changes to the local
|
||||
copy of the code. Before RAMster can be promoted from staging, this code
|
||||
duplication must be resolved. Specifically, we will first need to work with
|
||||
the ocfs2 maintainers to split out the ocfs2 core cluster code so that
|
||||
it can be easily included by another subsystem, even if ocfs2 is not
|
||||
configured, and also to merge the handful of functional changes required.
|
||||
Second, the zcache and RAMster drivers should be either merged or reorganized
|
||||
to separate out common code.
|
|
@ -1,5 +0,0 @@
|
|||
#obj-$(CONFIG_OCFS2_FS) += ocfs2_nodemanager.o
|
||||
obj-$(CONFIG_RAMSTER) += ocfs2_nodemanager.o
|
||||
|
||||
ocfs2_nodemanager-objs := heartbeat.o masklog.o sys.o nodemanager.o \
|
||||
quorum.o tcp.o netdebug.o ver.o
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,92 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* heartbeat.h
|
||||
*
|
||||
* Function prototypes
|
||||
*
|
||||
* Copyright (C) 2004 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef O2CLUSTER_HEARTBEAT_H
|
||||
#define O2CLUSTER_HEARTBEAT_H
|
||||
|
||||
#include "ocfs2_heartbeat.h"
|
||||
|
||||
#define O2HB_REGION_TIMEOUT_MS 2000
|
||||
|
||||
#define O2HB_MAX_REGION_NAME_LEN 32
|
||||
|
||||
/* number of changes to be seen as live */
|
||||
#define O2HB_LIVE_THRESHOLD 2
|
||||
/* number of equal samples to be seen as dead */
|
||||
extern unsigned int o2hb_dead_threshold;
|
||||
#define O2HB_DEFAULT_DEAD_THRESHOLD 31
|
||||
/* Otherwise MAX_WRITE_TIMEOUT will be zero... */
|
||||
#define O2HB_MIN_DEAD_THRESHOLD 2
|
||||
#define O2HB_MAX_WRITE_TIMEOUT_MS (O2HB_REGION_TIMEOUT_MS * (o2hb_dead_threshold - 1))
|
||||
|
||||
#define O2HB_CB_MAGIC 0x51d1e4ec
|
||||
|
||||
/* callback stuff */
|
||||
enum o2hb_callback_type {
|
||||
O2HB_NODE_DOWN_CB = 0,
|
||||
O2HB_NODE_UP_CB,
|
||||
O2HB_NUM_CB
|
||||
};
|
||||
|
||||
struct o2nm_node;
|
||||
typedef void (o2hb_cb_func)(struct o2nm_node *, int, void *);
|
||||
|
||||
struct o2hb_callback_func {
|
||||
u32 hc_magic;
|
||||
struct list_head hc_item;
|
||||
o2hb_cb_func *hc_func;
|
||||
void *hc_data;
|
||||
int hc_priority;
|
||||
enum o2hb_callback_type hc_type;
|
||||
};
|
||||
|
||||
struct config_group *o2hb_alloc_hb_set(void);
|
||||
void o2hb_free_hb_set(struct config_group *group);
|
||||
|
||||
void o2hb_setup_callback(struct o2hb_callback_func *hc,
|
||||
enum o2hb_callback_type type,
|
||||
o2hb_cb_func *func,
|
||||
void *data,
|
||||
int priority);
|
||||
int o2hb_register_callback(const char *region_uuid,
|
||||
struct o2hb_callback_func *hc);
|
||||
void o2hb_unregister_callback(const char *region_uuid,
|
||||
struct o2hb_callback_func *hc);
|
||||
void o2hb_fill_node_map(unsigned long *map,
|
||||
unsigned bytes);
|
||||
void o2hb_exit(void);
|
||||
int o2hb_init(void);
|
||||
int o2hb_check_node_heartbeating(u8 node_num);
|
||||
int o2hb_check_node_heartbeating_from_callback(u8 node_num);
|
||||
int o2hb_check_local_node_heartbeating(void);
|
||||
void o2hb_stop_all_regions(void);
|
||||
int o2hb_get_all_regions(char *region_uuids, u8 numregions);
|
||||
int o2hb_global_heartbeat_active(void);
|
||||
#ifdef CONFIG_RAMSTER
|
||||
void o2hb_manual_set_node_heartbeating(int);
|
||||
#endif
|
||||
|
||||
#endif /* O2CLUSTER_HEARTBEAT_H */
|
|
@ -1,155 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* Copyright (C) 2004, 2005 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include "masklog.h"
|
||||
|
||||
struct mlog_bits mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK);
|
||||
EXPORT_SYMBOL_GPL(mlog_and_bits);
|
||||
struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(0);
|
||||
EXPORT_SYMBOL_GPL(mlog_not_bits);
|
||||
|
||||
static ssize_t mlog_mask_show(u64 mask, char *buf)
|
||||
{
|
||||
char *state;
|
||||
|
||||
if (__mlog_test_u64(mask, mlog_and_bits))
|
||||
state = "allow";
|
||||
else if (__mlog_test_u64(mask, mlog_not_bits))
|
||||
state = "deny";
|
||||
else
|
||||
state = "off";
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", state);
|
||||
}
|
||||
|
||||
static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count)
|
||||
{
|
||||
if (!strnicmp(buf, "allow", 5)) {
|
||||
__mlog_set_u64(mask, mlog_and_bits);
|
||||
__mlog_clear_u64(mask, mlog_not_bits);
|
||||
} else if (!strnicmp(buf, "deny", 4)) {
|
||||
__mlog_set_u64(mask, mlog_not_bits);
|
||||
__mlog_clear_u64(mask, mlog_and_bits);
|
||||
} else if (!strnicmp(buf, "off", 3)) {
|
||||
__mlog_clear_u64(mask, mlog_not_bits);
|
||||
__mlog_clear_u64(mask, mlog_and_bits);
|
||||
} else
|
||||
return -EINVAL;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
struct mlog_attribute {
|
||||
struct attribute attr;
|
||||
u64 mask;
|
||||
};
|
||||
|
||||
#define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr)
|
||||
|
||||
#define define_mask(_name) { \
|
||||
.attr = { \
|
||||
.name = #_name, \
|
||||
.mode = S_IRUGO | S_IWUSR, \
|
||||
}, \
|
||||
.mask = ML_##_name, \
|
||||
}
|
||||
|
||||
static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
|
||||
define_mask(TCP),
|
||||
define_mask(MSG),
|
||||
define_mask(SOCKET),
|
||||
define_mask(HEARTBEAT),
|
||||
define_mask(HB_BIO),
|
||||
define_mask(DLMFS),
|
||||
define_mask(DLM),
|
||||
define_mask(DLM_DOMAIN),
|
||||
define_mask(DLM_THREAD),
|
||||
define_mask(DLM_MASTER),
|
||||
define_mask(DLM_RECOVERY),
|
||||
define_mask(DLM_GLUE),
|
||||
define_mask(VOTE),
|
||||
define_mask(CONN),
|
||||
define_mask(QUORUM),
|
||||
define_mask(BASTS),
|
||||
define_mask(CLUSTER),
|
||||
define_mask(ERROR),
|
||||
define_mask(NOTICE),
|
||||
define_mask(KTHREAD),
|
||||
};
|
||||
|
||||
static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
|
||||
|
||||
static ssize_t mlog_show(struct kobject *obj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
|
||||
|
||||
return mlog_mask_show(mlog_attr->mask, buf);
|
||||
}
|
||||
|
||||
static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
|
||||
|
||||
return mlog_mask_store(mlog_attr->mask, buf, count);
|
||||
}
|
||||
|
||||
static const struct sysfs_ops mlog_attr_ops = {
|
||||
.show = mlog_show,
|
||||
.store = mlog_store,
|
||||
};
|
||||
|
||||
static struct kobj_type mlog_ktype = {
|
||||
.default_attrs = mlog_attr_ptrs,
|
||||
.sysfs_ops = &mlog_attr_ops,
|
||||
};
|
||||
|
||||
static struct kset mlog_kset = {
|
||||
.kobj = {.ktype = &mlog_ktype},
|
||||
};
|
||||
|
||||
int mlog_sys_init(struct kset *o2cb_kset)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
while (mlog_attrs[i].attr.mode) {
|
||||
mlog_attr_ptrs[i] = &mlog_attrs[i].attr;
|
||||
i++;
|
||||
}
|
||||
mlog_attr_ptrs[i] = NULL;
|
||||
|
||||
kobject_set_name(&mlog_kset.kobj, "logmask");
|
||||
mlog_kset.kobj.kset = o2cb_kset;
|
||||
return kset_register(&mlog_kset);
|
||||
}
|
||||
|
||||
void mlog_sys_shutdown(void)
|
||||
{
|
||||
kset_unregister(&mlog_kset);
|
||||
}
|
|
@ -1,219 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* Copyright (C) 2005 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
#ifndef O2CLUSTER_MASKLOG_H
|
||||
#define O2CLUSTER_MASKLOG_H
|
||||
|
||||
/*
|
||||
* For now this is a trivial wrapper around printk() that gives the critical
|
||||
* ability to enable sets of debugging output at run-time. In the future this
|
||||
* will almost certainly be redirected to relayfs so that it can pay a
|
||||
* substantially lower heisenberg tax.
|
||||
*
|
||||
* Callers associate the message with a bitmask and a global bitmask is
|
||||
* maintained with help from /proc. If any of the bits match the message is
|
||||
* output.
|
||||
*
|
||||
* We must have efficient bit tests on i386 and it seems gcc still emits crazy
|
||||
* code for the 64bit compare. It emits very good code for the dual unsigned
|
||||
* long tests, though, completely avoiding tests that can never pass if the
|
||||
* caller gives a constant bitmask that fills one of the longs with all 0s. So
|
||||
* the desire is to have almost all of the calls decided on by comparing just
|
||||
* one of the longs. This leads to having infrequently given bits that are
|
||||
* frequently matched in the high bits.
|
||||
*
|
||||
* _ERROR and _NOTICE are used for messages that always go to the console and
|
||||
* have appropriate KERN_ prefixes. We wrap these in our function instead of
|
||||
* just calling printk() so that this can eventually make its way through
|
||||
* relayfs along with the debugging messages. Everything else gets KERN_DEBUG.
|
||||
* The inline tests and macro dance give GCC the opportunity to quite cleverly
|
||||
* only emit the appropriage printk() when the caller passes in a constant
|
||||
* mask, as is almost always the case.
|
||||
*
|
||||
* All this bitmask nonsense is managed from the files under
|
||||
* /sys/fs/o2cb/logmask/. Reading the files gives a straightforward
|
||||
* indication of which bits are allowed (allow) or denied (off/deny).
|
||||
* ENTRY deny
|
||||
* EXIT deny
|
||||
* TCP off
|
||||
* MSG off
|
||||
* SOCKET off
|
||||
* ERROR allow
|
||||
* NOTICE allow
|
||||
*
|
||||
* Writing changes the state of a given bit and requires a strictly formatted
|
||||
* single write() call:
|
||||
*
|
||||
* write(fd, "allow", 5);
|
||||
*
|
||||
* Echoing allow/deny/off string into the logmask files can flip the bits
|
||||
* on or off as expected; here is the bash script for example:
|
||||
*
|
||||
* log_mask="/sys/fs/o2cb/log_mask"
|
||||
* for node in ENTRY EXIT TCP MSG SOCKET ERROR NOTICE; do
|
||||
* echo allow >"$log_mask"/"$node"
|
||||
* done
|
||||
*
|
||||
* The debugfs.ocfs2 tool can also flip the bits with the -l option:
|
||||
*
|
||||
* debugfs.ocfs2 -l TCP allow
|
||||
*/
|
||||
|
||||
/* for task_struct */
|
||||
#include <linux/sched.h>
|
||||
|
||||
/* bits that are frequently given and infrequently matched in the low word */
|
||||
/* NOTE: If you add a flag, you need to also update masklog.c! */
|
||||
#define ML_TCP 0x0000000000000001ULL /* net cluster/tcp.c */
|
||||
#define ML_MSG 0x0000000000000002ULL /* net network messages */
|
||||
#define ML_SOCKET 0x0000000000000004ULL /* net socket lifetime */
|
||||
#define ML_HEARTBEAT 0x0000000000000008ULL /* hb all heartbeat tracking */
|
||||
#define ML_HB_BIO 0x0000000000000010ULL /* hb io tracing */
|
||||
#define ML_DLMFS 0x0000000000000020ULL /* dlm user dlmfs */
|
||||
#define ML_DLM 0x0000000000000040ULL /* dlm general debugging */
|
||||
#define ML_DLM_DOMAIN 0x0000000000000080ULL /* dlm domain debugging */
|
||||
#define ML_DLM_THREAD 0x0000000000000100ULL /* dlm domain thread */
|
||||
#define ML_DLM_MASTER 0x0000000000000200ULL /* dlm master functions */
|
||||
#define ML_DLM_RECOVERY 0x0000000000000400ULL /* dlm master functions */
|
||||
#define ML_DLM_GLUE 0x0000000000000800ULL /* ocfs2 dlm glue layer */
|
||||
#define ML_VOTE 0x0000000000001000ULL /* ocfs2 node messaging */
|
||||
#define ML_CONN 0x0000000000002000ULL /* net connection management */
|
||||
#define ML_QUORUM 0x0000000000004000ULL /* net connection quorum */
|
||||
#define ML_BASTS 0x0000000000008000ULL /* dlmglue asts and basts */
|
||||
#define ML_CLUSTER 0x0000000000010000ULL /* cluster stack */
|
||||
|
||||
/* bits that are infrequently given and frequently matched in the high word */
|
||||
#define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */
|
||||
#define ML_NOTICE 0x2000000000000000ULL /* setn to KERN_NOTICE */
|
||||
#define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */
|
||||
|
||||
#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
|
||||
#ifndef MLOG_MASK_PREFIX
|
||||
#define MLOG_MASK_PREFIX 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* When logging is disabled, force the bit test to 0 for anything other
|
||||
* than errors and notices, allowing gcc to remove the code completely.
|
||||
* When enabled, allow all masks.
|
||||
*/
|
||||
#if defined(CONFIG_OCFS2_DEBUG_MASKLOG)
|
||||
#define ML_ALLOWED_BITS ~0
|
||||
#else
|
||||
#define ML_ALLOWED_BITS (ML_ERROR|ML_NOTICE)
|
||||
#endif
|
||||
|
||||
#define MLOG_MAX_BITS 64
|
||||
|
||||
struct mlog_bits {
|
||||
unsigned long words[MLOG_MAX_BITS / BITS_PER_LONG];
|
||||
};
|
||||
|
||||
extern struct mlog_bits mlog_and_bits, mlog_not_bits;
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
|
||||
#define __mlog_test_u64(mask, bits) \
|
||||
( (u32)(mask & 0xffffffff) & bits.words[0] || \
|
||||
((u64)(mask) >> 32) & bits.words[1] )
|
||||
#define __mlog_set_u64(mask, bits) do { \
|
||||
bits.words[0] |= (u32)(mask & 0xffffffff); \
|
||||
bits.words[1] |= (u64)(mask) >> 32; \
|
||||
} while (0)
|
||||
#define __mlog_clear_u64(mask, bits) do { \
|
||||
bits.words[0] &= ~((u32)(mask & 0xffffffff)); \
|
||||
bits.words[1] &= ~((u64)(mask) >> 32); \
|
||||
} while (0)
|
||||
#define MLOG_BITS_RHS(mask) { \
|
||||
{ \
|
||||
[0] = (u32)(mask & 0xffffffff), \
|
||||
[1] = (u64)(mask) >> 32, \
|
||||
} \
|
||||
}
|
||||
|
||||
#else /* 32bit long above, 64bit long below */
|
||||
|
||||
#define __mlog_test_u64(mask, bits) ((mask) & bits.words[0])
|
||||
#define __mlog_set_u64(mask, bits) do { \
|
||||
bits.words[0] |= (mask); \
|
||||
} while (0)
|
||||
#define __mlog_clear_u64(mask, bits) do { \
|
||||
bits.words[0] &= ~(mask); \
|
||||
} while (0)
|
||||
#define MLOG_BITS_RHS(mask) { { (mask) } }
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* smp_processor_id() "helpfully" screams when called outside preemptible
|
||||
* regions in current kernels. sles doesn't have the variants that don't
|
||||
* scream. just do this instead of trying to guess which we're building
|
||||
* against.. *sigh*.
|
||||
*/
|
||||
#define __mlog_cpu_guess ({ \
|
||||
unsigned long _cpu = get_cpu(); \
|
||||
put_cpu(); \
|
||||
_cpu; \
|
||||
})
|
||||
|
||||
/* In the following two macros, the whitespace after the ',' just
|
||||
* before ##args is intentional. Otherwise, gcc 2.95 will eat the
|
||||
* previous token if args expands to nothing.
|
||||
*/
|
||||
#define __mlog_printk(level, fmt, args...) \
|
||||
printk(level "(%s,%u,%lu):%s:%d " fmt, current->comm, \
|
||||
task_pid_nr(current), __mlog_cpu_guess, \
|
||||
__PRETTY_FUNCTION__, __LINE__ , ##args)
|
||||
|
||||
#define mlog(mask, fmt, args...) do { \
|
||||
u64 __m = MLOG_MASK_PREFIX | (mask); \
|
||||
if ((__m & ML_ALLOWED_BITS) && \
|
||||
__mlog_test_u64(__m, mlog_and_bits) && \
|
||||
!__mlog_test_u64(__m, mlog_not_bits)) { \
|
||||
if (__m & ML_ERROR) \
|
||||
__mlog_printk(KERN_ERR, "ERROR: "fmt , ##args); \
|
||||
else if (__m & ML_NOTICE) \
|
||||
__mlog_printk(KERN_NOTICE, fmt , ##args); \
|
||||
else __mlog_printk(KERN_INFO, fmt , ##args); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define mlog_errno(st) do { \
|
||||
int _st = (st); \
|
||||
if (_st != -ERESTARTSYS && _st != -EINTR && \
|
||||
_st != AOP_TRUNCATED_PAGE && _st != -ENOSPC) \
|
||||
mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
|
||||
} while (0)
|
||||
|
||||
#define mlog_bug_on_msg(cond, fmt, args...) do { \
|
||||
if (cond) { \
|
||||
mlog(ML_ERROR, "bug expression: " #cond "\n"); \
|
||||
mlog(ML_ERROR, fmt, ##args); \
|
||||
BUG(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
int mlog_sys_init(struct kset *o2cb_subsys);
|
||||
void mlog_sys_shutdown(void);
|
||||
|
||||
#endif /* O2CLUSTER_MASKLOG_H */
|
|
@ -1,579 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* netdebug.c
|
||||
*
|
||||
* debug functionality for o2net
|
||||
*
|
||||
* Copyright (C) 2005, 2008 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include "tcp.h"
|
||||
#include "nodemanager.h"
|
||||
#define MLOG_MASK_PREFIX ML_TCP
|
||||
#include "masklog.h"
|
||||
|
||||
#include "tcp_internal.h"
|
||||
|
||||
#define O2NET_DEBUG_DIR "o2net"
|
||||
#define SC_DEBUG_NAME "sock_containers"
|
||||
#define NST_DEBUG_NAME "send_tracking"
|
||||
#define STATS_DEBUG_NAME "stats"
|
||||
#define NODES_DEBUG_NAME "connected_nodes"
|
||||
|
||||
#define SHOW_SOCK_CONTAINERS 0
|
||||
#define SHOW_SOCK_STATS 1
|
||||
|
||||
static struct dentry *o2net_dentry;
|
||||
static struct dentry *sc_dentry;
|
||||
static struct dentry *nst_dentry;
|
||||
static struct dentry *stats_dentry;
|
||||
static struct dentry *nodes_dentry;
|
||||
|
||||
static DEFINE_SPINLOCK(o2net_debug_lock);
|
||||
|
||||
static LIST_HEAD(sock_containers);
|
||||
static LIST_HEAD(send_tracking);
|
||||
|
||||
void o2net_debug_add_nst(struct o2net_send_tracking *nst)
|
||||
{
|
||||
spin_lock(&o2net_debug_lock);
|
||||
list_add(&nst->st_net_debug_item, &send_tracking);
|
||||
spin_unlock(&o2net_debug_lock);
|
||||
}
|
||||
|
||||
void o2net_debug_del_nst(struct o2net_send_tracking *nst)
|
||||
{
|
||||
spin_lock(&o2net_debug_lock);
|
||||
if (!list_empty(&nst->st_net_debug_item))
|
||||
list_del_init(&nst->st_net_debug_item);
|
||||
spin_unlock(&o2net_debug_lock);
|
||||
}
|
||||
|
||||
static struct o2net_send_tracking
|
||||
*next_nst(struct o2net_send_tracking *nst_start)
|
||||
{
|
||||
struct o2net_send_tracking *nst, *ret = NULL;
|
||||
|
||||
assert_spin_locked(&o2net_debug_lock);
|
||||
|
||||
list_for_each_entry(nst, &nst_start->st_net_debug_item,
|
||||
st_net_debug_item) {
|
||||
/* discover the head of the list */
|
||||
if (&nst->st_net_debug_item == &send_tracking)
|
||||
break;
|
||||
|
||||
/* use st_task to detect real nsts in the list */
|
||||
if (nst->st_task != NULL) {
|
||||
ret = nst;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *nst_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
{
|
||||
struct o2net_send_tracking *nst, *dummy_nst = seq->private;
|
||||
|
||||
spin_lock(&o2net_debug_lock);
|
||||
nst = next_nst(dummy_nst);
|
||||
spin_unlock(&o2net_debug_lock);
|
||||
|
||||
return nst;
|
||||
}
|
||||
|
||||
static void *nst_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
struct o2net_send_tracking *nst, *dummy_nst = seq->private;
|
||||
|
||||
spin_lock(&o2net_debug_lock);
|
||||
nst = next_nst(dummy_nst);
|
||||
list_del_init(&dummy_nst->st_net_debug_item);
|
||||
if (nst)
|
||||
list_add(&dummy_nst->st_net_debug_item,
|
||||
&nst->st_net_debug_item);
|
||||
spin_unlock(&o2net_debug_lock);
|
||||
|
||||
return nst; /* unused, just needs to be null when done */
|
||||
}
|
||||
|
||||
static int nst_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct o2net_send_tracking *nst, *dummy_nst = seq->private;
|
||||
ktime_t now;
|
||||
s64 sock, send, status;
|
||||
|
||||
spin_lock(&o2net_debug_lock);
|
||||
nst = next_nst(dummy_nst);
|
||||
if (!nst)
|
||||
goto out;
|
||||
|
||||
now = ktime_get();
|
||||
sock = ktime_to_us(ktime_sub(now, nst->st_sock_time));
|
||||
send = ktime_to_us(ktime_sub(now, nst->st_send_time));
|
||||
status = ktime_to_us(ktime_sub(now, nst->st_status_time));
|
||||
|
||||
/* get_task_comm isn't exported. oh well. */
|
||||
seq_printf(seq, "%p:\n"
|
||||
" pid: %lu\n"
|
||||
" tgid: %lu\n"
|
||||
" process name: %s\n"
|
||||
" node: %u\n"
|
||||
" sc: %p\n"
|
||||
" message id: %d\n"
|
||||
" message type: %u\n"
|
||||
" message key: 0x%08x\n"
|
||||
" sock acquiry: %lld usecs ago\n"
|
||||
" send start: %lld usecs ago\n"
|
||||
" wait start: %lld usecs ago\n",
|
||||
nst, (unsigned long)task_pid_nr(nst->st_task),
|
||||
(unsigned long)nst->st_task->tgid,
|
||||
nst->st_task->comm, nst->st_node,
|
||||
nst->st_sc, nst->st_id, nst->st_msg_type,
|
||||
nst->st_msg_key,
|
||||
(long long)sock,
|
||||
(long long)send,
|
||||
(long long)status);
|
||||
|
||||
out:
|
||||
spin_unlock(&o2net_debug_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nst_seq_stop(struct seq_file *seq, void *v)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct seq_operations nst_seq_ops = {
|
||||
.start = nst_seq_start,
|
||||
.next = nst_seq_next,
|
||||
.stop = nst_seq_stop,
|
||||
.show = nst_seq_show,
|
||||
};
|
||||
|
||||
static int nst_fop_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct o2net_send_tracking *dummy_nst;
|
||||
struct seq_file *seq;
|
||||
int ret;
|
||||
|
||||
dummy_nst = kmalloc(sizeof(struct o2net_send_tracking), GFP_KERNEL);
|
||||
if (dummy_nst == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
dummy_nst->st_task = NULL;
|
||||
|
||||
ret = seq_open(file, &nst_seq_ops);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
seq = file->private_data;
|
||||
seq->private = dummy_nst;
|
||||
o2net_debug_add_nst(dummy_nst);
|
||||
|
||||
dummy_nst = NULL;
|
||||
|
||||
out:
|
||||
kfree(dummy_nst);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nst_fop_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct seq_file *seq = file->private_data;
|
||||
struct o2net_send_tracking *dummy_nst = seq->private;
|
||||
|
||||
o2net_debug_del_nst(dummy_nst);
|
||||
return seq_release_private(inode, file);
|
||||
}
|
||||
|
||||
static const struct file_operations nst_seq_fops = {
|
||||
.open = nst_fop_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = nst_fop_release,
|
||||
};
|
||||
|
||||
void o2net_debug_add_sc(struct o2net_sock_container *sc)
|
||||
{
|
||||
spin_lock(&o2net_debug_lock);
|
||||
list_add(&sc->sc_net_debug_item, &sock_containers);
|
||||
spin_unlock(&o2net_debug_lock);
|
||||
}
|
||||
|
||||
void o2net_debug_del_sc(struct o2net_sock_container *sc)
|
||||
{
|
||||
spin_lock(&o2net_debug_lock);
|
||||
list_del_init(&sc->sc_net_debug_item);
|
||||
spin_unlock(&o2net_debug_lock);
|
||||
}
|
||||
|
||||
struct o2net_sock_debug {
|
||||
int dbg_ctxt;
|
||||
struct o2net_sock_container *dbg_sock;
|
||||
};
|
||||
|
||||
static struct o2net_sock_container
|
||||
*next_sc(struct o2net_sock_container *sc_start)
|
||||
{
|
||||
struct o2net_sock_container *sc, *ret = NULL;
|
||||
|
||||
assert_spin_locked(&o2net_debug_lock);
|
||||
|
||||
list_for_each_entry(sc, &sc_start->sc_net_debug_item,
|
||||
sc_net_debug_item) {
|
||||
/* discover the head of the list miscast as a sc */
|
||||
if (&sc->sc_net_debug_item == &sock_containers)
|
||||
break;
|
||||
|
||||
/* use sc_page to detect real scs in the list */
|
||||
if (sc->sc_page != NULL) {
|
||||
ret = sc;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *sc_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
{
|
||||
struct o2net_sock_debug *sd = seq->private;
|
||||
struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
|
||||
|
||||
spin_lock(&o2net_debug_lock);
|
||||
sc = next_sc(dummy_sc);
|
||||
spin_unlock(&o2net_debug_lock);
|
||||
|
||||
return sc;
|
||||
}
|
||||
|
||||
static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
struct o2net_sock_debug *sd = seq->private;
|
||||
struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
|
||||
|
||||
spin_lock(&o2net_debug_lock);
|
||||
sc = next_sc(dummy_sc);
|
||||
list_del_init(&dummy_sc->sc_net_debug_item);
|
||||
if (sc)
|
||||
list_add(&dummy_sc->sc_net_debug_item, &sc->sc_net_debug_item);
|
||||
spin_unlock(&o2net_debug_lock);
|
||||
|
||||
return sc; /* unused, just needs to be null when done */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OCFS2_FS_STATS
|
||||
# define sc_send_count(_s) ((_s)->sc_send_count)
|
||||
# define sc_recv_count(_s) ((_s)->sc_recv_count)
|
||||
# define sc_tv_acquiry_total_ns(_s) (ktime_to_ns((_s)->sc_tv_acquiry_total))
|
||||
# define sc_tv_send_total_ns(_s) (ktime_to_ns((_s)->sc_tv_send_total))
|
||||
# define sc_tv_status_total_ns(_s) (ktime_to_ns((_s)->sc_tv_status_total))
|
||||
# define sc_tv_process_total_ns(_s) (ktime_to_ns((_s)->sc_tv_process_total))
|
||||
#else
|
||||
# define sc_send_count(_s) (0U)
|
||||
# define sc_recv_count(_s) (0U)
|
||||
# define sc_tv_acquiry_total_ns(_s) (0LL)
|
||||
# define sc_tv_send_total_ns(_s) (0LL)
|
||||
# define sc_tv_status_total_ns(_s) (0LL)
|
||||
# define sc_tv_process_total_ns(_s) (0LL)
|
||||
#endif
|
||||
|
||||
/* So that debugfs.ocfs2 can determine which format is being used */
|
||||
#define O2NET_STATS_STR_VERSION 1
|
||||
static void sc_show_sock_stats(struct seq_file *seq,
|
||||
struct o2net_sock_container *sc)
|
||||
{
|
||||
if (!sc)
|
||||
return;
|
||||
|
||||
seq_printf(seq, "%d,%u,%lu,%lld,%lld,%lld,%lu,%lld\n", O2NET_STATS_STR_VERSION,
|
||||
sc->sc_node->nd_num, (unsigned long)sc_send_count(sc),
|
||||
(long long)sc_tv_acquiry_total_ns(sc),
|
||||
(long long)sc_tv_send_total_ns(sc),
|
||||
(long long)sc_tv_status_total_ns(sc),
|
||||
(unsigned long)sc_recv_count(sc),
|
||||
(long long)sc_tv_process_total_ns(sc));
|
||||
}
|
||||
|
||||
static void sc_show_sock_container(struct seq_file *seq,
|
||||
struct o2net_sock_container *sc)
|
||||
{
|
||||
struct inet_sock *inet = NULL;
|
||||
__be32 saddr = 0, daddr = 0;
|
||||
__be16 sport = 0, dport = 0;
|
||||
|
||||
if (!sc)
|
||||
return;
|
||||
|
||||
if (sc->sc_sock) {
|
||||
inet = inet_sk(sc->sc_sock->sk);
|
||||
/* the stack's structs aren't sparse endian clean */
|
||||
saddr = (__force __be32)inet->inet_saddr;
|
||||
daddr = (__force __be32)inet->inet_daddr;
|
||||
sport = (__force __be16)inet->inet_sport;
|
||||
dport = (__force __be16)inet->inet_dport;
|
||||
}
|
||||
|
||||
/* XXX sigh, inet-> doesn't have sparse annotation so any
|
||||
* use of it here generates a warning with -Wbitwise */
|
||||
seq_printf(seq, "%p:\n"
|
||||
" krefs: %d\n"
|
||||
" sock: %pI4:%u -> "
|
||||
"%pI4:%u\n"
|
||||
" remote node: %s\n"
|
||||
" page off: %zu\n"
|
||||
" handshake ok: %u\n"
|
||||
" timer: %lld usecs\n"
|
||||
" data ready: %lld usecs\n"
|
||||
" advance start: %lld usecs\n"
|
||||
" advance stop: %lld usecs\n"
|
||||
" func start: %lld usecs\n"
|
||||
" func stop: %lld usecs\n"
|
||||
" func key: 0x%08x\n"
|
||||
" func type: %u\n",
|
||||
sc,
|
||||
atomic_read(&sc->sc_kref.refcount),
|
||||
&saddr, inet ? ntohs(sport) : 0,
|
||||
&daddr, inet ? ntohs(dport) : 0,
|
||||
sc->sc_node->nd_name,
|
||||
sc->sc_page_off,
|
||||
sc->sc_handshake_ok,
|
||||
(long long)ktime_to_us(sc->sc_tv_timer),
|
||||
(long long)ktime_to_us(sc->sc_tv_data_ready),
|
||||
(long long)ktime_to_us(sc->sc_tv_advance_start),
|
||||
(long long)ktime_to_us(sc->sc_tv_advance_stop),
|
||||
(long long)ktime_to_us(sc->sc_tv_func_start),
|
||||
(long long)ktime_to_us(sc->sc_tv_func_stop),
|
||||
sc->sc_msg_key,
|
||||
sc->sc_msg_type);
|
||||
}
|
||||
|
||||
static int sc_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct o2net_sock_debug *sd = seq->private;
|
||||
struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
|
||||
|
||||
spin_lock(&o2net_debug_lock);
|
||||
sc = next_sc(dummy_sc);
|
||||
|
||||
if (sc) {
|
||||
if (sd->dbg_ctxt == SHOW_SOCK_CONTAINERS)
|
||||
sc_show_sock_container(seq, sc);
|
||||
else
|
||||
sc_show_sock_stats(seq, sc);
|
||||
}
|
||||
|
||||
spin_unlock(&o2net_debug_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sc_seq_stop(struct seq_file *seq, void *v)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct seq_operations sc_seq_ops = {
|
||||
.start = sc_seq_start,
|
||||
.next = sc_seq_next,
|
||||
.stop = sc_seq_stop,
|
||||
.show = sc_seq_show,
|
||||
};
|
||||
|
||||
static int sc_common_open(struct file *file, struct o2net_sock_debug *sd)
|
||||
{
|
||||
struct o2net_sock_container *dummy_sc;
|
||||
struct seq_file *seq;
|
||||
int ret;
|
||||
|
||||
dummy_sc = kmalloc(sizeof(struct o2net_sock_container), GFP_KERNEL);
|
||||
if (dummy_sc == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
dummy_sc->sc_page = NULL;
|
||||
|
||||
ret = seq_open(file, &sc_seq_ops);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
seq = file->private_data;
|
||||
seq->private = sd;
|
||||
sd->dbg_sock = dummy_sc;
|
||||
o2net_debug_add_sc(dummy_sc);
|
||||
|
||||
dummy_sc = NULL;
|
||||
|
||||
out:
|
||||
kfree(dummy_sc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sc_fop_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct seq_file *seq = file->private_data;
|
||||
struct o2net_sock_debug *sd = seq->private;
|
||||
struct o2net_sock_container *dummy_sc = sd->dbg_sock;
|
||||
|
||||
o2net_debug_del_sc(dummy_sc);
|
||||
return seq_release_private(inode, file);
|
||||
}
|
||||
|
||||
static int stats_fop_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct o2net_sock_debug *sd;
|
||||
|
||||
sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL);
|
||||
if (sd == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
sd->dbg_ctxt = SHOW_SOCK_STATS;
|
||||
sd->dbg_sock = NULL;
|
||||
|
||||
return sc_common_open(file, sd);
|
||||
}
|
||||
|
||||
static const struct file_operations stats_seq_fops = {
|
||||
.open = stats_fop_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = sc_fop_release,
|
||||
};
|
||||
|
||||
static int sc_fop_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct o2net_sock_debug *sd;
|
||||
|
||||
sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL);
|
||||
if (sd == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
sd->dbg_ctxt = SHOW_SOCK_CONTAINERS;
|
||||
sd->dbg_sock = NULL;
|
||||
|
||||
return sc_common_open(file, sd);
|
||||
}
|
||||
|
||||
static const struct file_operations sc_seq_fops = {
|
||||
.open = sc_fop_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = sc_fop_release,
|
||||
};
|
||||
|
||||
static int o2net_fill_bitmap(char *buf, int len)
|
||||
{
|
||||
unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
||||
int i = -1, out = 0;
|
||||
|
||||
o2net_fill_node_map(map, sizeof(map));
|
||||
|
||||
while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES)
|
||||
out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i);
|
||||
out += snprintf(buf + out, PAGE_SIZE - out, "\n");
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
static int nodes_fop_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
char *buf;
|
||||
|
||||
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE));
|
||||
|
||||
file->private_data = buf;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int o2net_debug_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
kfree(file->private_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t o2net_debug_read(struct file *file, char __user *buf,
|
||||
size_t nbytes, loff_t *ppos)
|
||||
{
|
||||
return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
|
||||
i_size_read(file->f_mapping->host));
|
||||
}
|
||||
|
||||
static const struct file_operations nodes_fops = {
|
||||
.open = nodes_fop_open,
|
||||
.release = o2net_debug_release,
|
||||
.read = o2net_debug_read,
|
||||
.llseek = generic_file_llseek,
|
||||
};
|
||||
|
||||
void o2net_debugfs_exit(void)
|
||||
{
|
||||
debugfs_remove(nodes_dentry);
|
||||
debugfs_remove(stats_dentry);
|
||||
debugfs_remove(sc_dentry);
|
||||
debugfs_remove(nst_dentry);
|
||||
debugfs_remove(o2net_dentry);
|
||||
}
|
||||
|
||||
int o2net_debugfs_init(void)
|
||||
{
|
||||
mode_t mode = S_IFREG|S_IRUSR;
|
||||
|
||||
o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
|
||||
if (o2net_dentry)
|
||||
nst_dentry = debugfs_create_file(NST_DEBUG_NAME, mode,
|
||||
o2net_dentry, NULL, &nst_seq_fops);
|
||||
if (nst_dentry)
|
||||
sc_dentry = debugfs_create_file(SC_DEBUG_NAME, mode,
|
||||
o2net_dentry, NULL, &sc_seq_fops);
|
||||
if (sc_dentry)
|
||||
stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, mode,
|
||||
o2net_dentry, NULL, &stats_seq_fops);
|
||||
if (stats_dentry)
|
||||
nodes_dentry = debugfs_create_file(NODES_DEBUG_NAME, mode,
|
||||
o2net_dentry, NULL, &nodes_fops);
|
||||
if (nodes_dentry)
|
||||
return 0;
|
||||
|
||||
o2net_debugfs_exit();
|
||||
mlog_errno(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_DEBUG_FS */
|
|
@ -1,989 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* Copyright (C) 2004, 2005 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/configfs.h>
|
||||
|
||||
#include "tcp.h"
|
||||
#include "nodemanager.h"
|
||||
#include "heartbeat.h"
|
||||
#include "masklog.h"
|
||||
#include "sys.h"
|
||||
#include "ver.h"
|
||||
|
||||
/* for now we operate under the assertion that there can be only one
|
||||
* cluster active at a time. Changing this will require trickling
|
||||
* cluster references throughout where nodes are looked up */
|
||||
struct o2nm_cluster *o2nm_single_cluster = NULL;
|
||||
|
||||
char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
|
||||
"reset", /* O2NM_FENCE_RESET */
|
||||
"panic", /* O2NM_FENCE_PANIC */
|
||||
};
|
||||
|
||||
struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
|
||||
{
|
||||
struct o2nm_node *node = NULL;
|
||||
|
||||
if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL)
|
||||
goto out;
|
||||
|
||||
read_lock(&o2nm_single_cluster->cl_nodes_lock);
|
||||
node = o2nm_single_cluster->cl_nodes[node_num];
|
||||
if (node)
|
||||
config_item_get(&node->nd_item);
|
||||
read_unlock(&o2nm_single_cluster->cl_nodes_lock);
|
||||
out:
|
||||
return node;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(o2nm_get_node_by_num);
|
||||
|
||||
int o2nm_configured_node_map(unsigned long *map, unsigned bytes)
|
||||
{
|
||||
struct o2nm_cluster *cluster = o2nm_single_cluster;
|
||||
|
||||
BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
|
||||
|
||||
if (cluster == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
read_lock(&cluster->cl_nodes_lock);
|
||||
memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap));
|
||||
read_unlock(&cluster->cl_nodes_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(o2nm_configured_node_map);
|
||||
|
||||
static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster,
|
||||
__be32 ip_needle,
|
||||
struct rb_node ***ret_p,
|
||||
struct rb_node **ret_parent)
|
||||
{
|
||||
struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct o2nm_node *node, *ret = NULL;
|
||||
|
||||
while (*p) {
|
||||
int cmp;
|
||||
|
||||
parent = *p;
|
||||
node = rb_entry(parent, struct o2nm_node, nd_ip_node);
|
||||
|
||||
cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
|
||||
sizeof(ip_needle));
|
||||
if (cmp < 0)
|
||||
p = &(*p)->rb_left;
|
||||
else if (cmp > 0)
|
||||
p = &(*p)->rb_right;
|
||||
else {
|
||||
ret = node;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret_p != NULL)
|
||||
*ret_p = p;
|
||||
if (ret_parent != NULL)
|
||||
*ret_parent = parent;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct o2nm_node *o2nm_get_node_by_ip(__be32 addr)
|
||||
{
|
||||
struct o2nm_node *node = NULL;
|
||||
struct o2nm_cluster *cluster = o2nm_single_cluster;
|
||||
|
||||
if (cluster == NULL)
|
||||
goto out;
|
||||
|
||||
read_lock(&cluster->cl_nodes_lock);
|
||||
node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
|
||||
if (node)
|
||||
config_item_get(&node->nd_item);
|
||||
read_unlock(&cluster->cl_nodes_lock);
|
||||
|
||||
out:
|
||||
return node;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip);
|
||||
|
||||
void o2nm_node_put(struct o2nm_node *node)
|
||||
{
|
||||
config_item_put(&node->nd_item);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(o2nm_node_put);
|
||||
|
||||
void o2nm_node_get(struct o2nm_node *node)
|
||||
{
|
||||
config_item_get(&node->nd_item);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(o2nm_node_get);
|
||||
|
||||
u8 o2nm_this_node(void)
|
||||
{
|
||||
u8 node_num = O2NM_MAX_NODES;
|
||||
|
||||
if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local)
|
||||
node_num = o2nm_single_cluster->cl_local_node;
|
||||
|
||||
return node_num;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(o2nm_this_node);
|
||||
|
||||
/* node configfs bits */
|
||||
|
||||
static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item)
|
||||
{
|
||||
return item ?
|
||||
container_of(to_config_group(item), struct o2nm_cluster,
|
||||
cl_group)
|
||||
: NULL;
|
||||
}
|
||||
|
||||
static struct o2nm_node *to_o2nm_node(struct config_item *item)
|
||||
{
|
||||
return item ? container_of(item, struct o2nm_node, nd_item) : NULL;
|
||||
}
|
||||
|
||||
static void o2nm_node_release(struct config_item *item)
|
||||
{
|
||||
struct o2nm_node *node = to_o2nm_node(item);
|
||||
kfree(node);
|
||||
}
|
||||
|
||||
static ssize_t o2nm_node_num_read(struct o2nm_node *node, char *page)
|
||||
{
|
||||
return sprintf(page, "%d\n", node->nd_num);
|
||||
}
|
||||
|
||||
static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
|
||||
{
|
||||
/* through the first node_set .parent
|
||||
* mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
|
||||
return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
|
||||
}
|
||||
|
||||
enum {
|
||||
O2NM_NODE_ATTR_NUM = 0,
|
||||
O2NM_NODE_ATTR_PORT,
|
||||
O2NM_NODE_ATTR_ADDRESS,
|
||||
O2NM_NODE_ATTR_LOCAL,
|
||||
};
|
||||
|
||||
static ssize_t o2nm_node_num_write(struct o2nm_node *node, const char *page,
|
||||
size_t count)
|
||||
{
|
||||
struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
|
||||
unsigned long tmp;
|
||||
char *p = (char *)page;
|
||||
|
||||
tmp = simple_strtoul(p, &p, 0);
|
||||
if (!p || (*p && (*p != '\n')))
|
||||
return -EINVAL;
|
||||
|
||||
if (tmp >= O2NM_MAX_NODES)
|
||||
return -ERANGE;
|
||||
|
||||
/* once we're in the cl_nodes tree networking can look us up by
|
||||
* node number and try to use our address and port attributes
|
||||
* to connect to this node.. make sure that they've been set
|
||||
* before writing the node attribute? */
|
||||
if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
|
||||
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
|
||||
return -EINVAL; /* XXX */
|
||||
|
||||
write_lock(&cluster->cl_nodes_lock);
|
||||
if (cluster->cl_nodes[tmp])
|
||||
p = NULL;
|
||||
else {
|
||||
cluster->cl_nodes[tmp] = node;
|
||||
node->nd_num = tmp;
|
||||
set_bit(tmp, cluster->cl_nodes_bitmap);
|
||||
}
|
||||
write_unlock(&cluster->cl_nodes_lock);
|
||||
if (p == NULL)
|
||||
return -EEXIST;
|
||||
|
||||
return count;
|
||||
}
|
||||
static ssize_t o2nm_node_ipv4_port_read(struct o2nm_node *node, char *page)
|
||||
{
|
||||
return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port));
|
||||
}
|
||||
|
||||
static ssize_t o2nm_node_ipv4_port_write(struct o2nm_node *node,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
unsigned long tmp;
|
||||
char *p = (char *)page;
|
||||
|
||||
tmp = simple_strtoul(p, &p, 0);
|
||||
if (!p || (*p && (*p != '\n')))
|
||||
return -EINVAL;
|
||||
|
||||
if (tmp == 0)
|
||||
return -EINVAL;
|
||||
if (tmp >= (u16)-1)
|
||||
return -ERANGE;
|
||||
|
||||
node->nd_ipv4_port = htons(tmp);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t o2nm_node_ipv4_address_read(struct o2nm_node *node, char *page)
|
||||
{
|
||||
return sprintf(page, "%pI4\n", &node->nd_ipv4_address);
|
||||
}
|
||||
|
||||
static ssize_t o2nm_node_ipv4_address_write(struct o2nm_node *node,
|
||||
const char *page,
|
||||
size_t count)
|
||||
{
|
||||
struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
|
||||
int ret, i;
|
||||
struct rb_node **p, *parent;
|
||||
unsigned int octets[4];
|
||||
__be32 ipv4_addr = 0;
|
||||
|
||||
ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
|
||||
&octets[1], &octets[0]);
|
||||
if (ret != 4)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(octets); i++) {
|
||||
if (octets[i] > 255)
|
||||
return -ERANGE;
|
||||
be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
write_lock(&cluster->cl_nodes_lock);
|
||||
if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
|
||||
ret = -EEXIST;
|
||||
else {
|
||||
rb_link_node(&node->nd_ip_node, parent, p);
|
||||
rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
|
||||
}
|
||||
write_unlock(&cluster->cl_nodes_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t o2nm_node_local_read(struct o2nm_node *node, char *page)
|
||||
{
|
||||
return sprintf(page, "%d\n", node->nd_local);
|
||||
}
|
||||
|
||||
static ssize_t o2nm_node_local_write(struct o2nm_node *node, const char *page,
|
||||
size_t count)
|
||||
{
|
||||
struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
|
||||
unsigned long tmp;
|
||||
char *p = (char *)page;
|
||||
ssize_t ret;
|
||||
|
||||
tmp = simple_strtoul(p, &p, 0);
|
||||
if (!p || (*p && (*p != '\n')))
|
||||
return -EINVAL;
|
||||
|
||||
tmp = !!tmp; /* boolean of whether this node wants to be local */
|
||||
|
||||
/* setting local turns on networking rx for now so we require having
|
||||
* set everything else first */
|
||||
if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
|
||||
!test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
|
||||
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
|
||||
return -EINVAL; /* XXX */
|
||||
|
||||
/* the only failure case is trying to set a new local node
|
||||
* when a different one is already set */
|
||||
if (tmp && tmp == cluster->cl_has_local &&
|
||||
cluster->cl_local_node != node->nd_num)
|
||||
return -EBUSY;
|
||||
|
||||
/* bring up the rx thread if we're setting the new local node. */
|
||||
if (tmp && !cluster->cl_has_local) {
|
||||
ret = o2net_start_listening(node);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!tmp && cluster->cl_has_local &&
|
||||
cluster->cl_local_node == node->nd_num) {
|
||||
o2net_stop_listening(node);
|
||||
cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
|
||||
}
|
||||
|
||||
node->nd_local = tmp;
|
||||
if (node->nd_local) {
|
||||
cluster->cl_has_local = tmp;
|
||||
cluster->cl_local_node = node->nd_num;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
struct o2nm_node_attribute {
|
||||
struct configfs_attribute attr;
|
||||
ssize_t (*show)(struct o2nm_node *, char *);
|
||||
ssize_t (*store)(struct o2nm_node *, const char *, size_t);
|
||||
};
|
||||
|
||||
static struct o2nm_node_attribute o2nm_node_attr_num = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "num",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = o2nm_node_num_read,
|
||||
.store = o2nm_node_num_write,
|
||||
};
|
||||
|
||||
static struct o2nm_node_attribute o2nm_node_attr_ipv4_port = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "ipv4_port",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = o2nm_node_ipv4_port_read,
|
||||
.store = o2nm_node_ipv4_port_write,
|
||||
};
|
||||
|
||||
static struct o2nm_node_attribute o2nm_node_attr_ipv4_address = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "ipv4_address",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = o2nm_node_ipv4_address_read,
|
||||
.store = o2nm_node_ipv4_address_write,
|
||||
};
|
||||
|
||||
static struct o2nm_node_attribute o2nm_node_attr_local = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "local",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = o2nm_node_local_read,
|
||||
.store = o2nm_node_local_write,
|
||||
};
|
||||
|
||||
static struct configfs_attribute *o2nm_node_attrs[] = {
|
||||
[O2NM_NODE_ATTR_NUM] = &o2nm_node_attr_num.attr,
|
||||
[O2NM_NODE_ATTR_PORT] = &o2nm_node_attr_ipv4_port.attr,
|
||||
[O2NM_NODE_ATTR_ADDRESS] = &o2nm_node_attr_ipv4_address.attr,
|
||||
[O2NM_NODE_ATTR_LOCAL] = &o2nm_node_attr_local.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int o2nm_attr_index(struct configfs_attribute *attr)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < ARRAY_SIZE(o2nm_node_attrs); i++) {
|
||||
if (attr == o2nm_node_attrs[i])
|
||||
return i;
|
||||
}
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t o2nm_node_show(struct config_item *item,
|
||||
struct configfs_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct o2nm_node *node = to_o2nm_node(item);
|
||||
struct o2nm_node_attribute *o2nm_node_attr =
|
||||
container_of(attr, struct o2nm_node_attribute, attr);
|
||||
ssize_t ret = 0;
|
||||
|
||||
if (o2nm_node_attr->show)
|
||||
ret = o2nm_node_attr->show(node, page);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t o2nm_node_store(struct config_item *item,
|
||||
struct configfs_attribute *attr,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
struct o2nm_node *node = to_o2nm_node(item);
|
||||
struct o2nm_node_attribute *o2nm_node_attr =
|
||||
container_of(attr, struct o2nm_node_attribute, attr);
|
||||
ssize_t ret;
|
||||
int attr_index = o2nm_attr_index(attr);
|
||||
|
||||
if (o2nm_node_attr->store == NULL) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (test_bit(attr_index, &node->nd_set_attributes))
|
||||
return -EBUSY;
|
||||
|
||||
ret = o2nm_node_attr->store(node, page, count);
|
||||
if (ret < count)
|
||||
goto out;
|
||||
|
||||
set_bit(attr_index, &node->nd_set_attributes);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct configfs_item_operations o2nm_node_item_ops = {
|
||||
.release = o2nm_node_release,
|
||||
.show_attribute = o2nm_node_show,
|
||||
.store_attribute = o2nm_node_store,
|
||||
};
|
||||
|
||||
static struct config_item_type o2nm_node_type = {
|
||||
.ct_item_ops = &o2nm_node_item_ops,
|
||||
.ct_attrs = o2nm_node_attrs,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/* node set */
|
||||
|
||||
struct o2nm_node_group {
|
||||
struct config_group ns_group;
|
||||
/* some stuff? */
|
||||
};
|
||||
|
||||
#if 0
|
||||
static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group)
|
||||
{
|
||||
return group ?
|
||||
container_of(group, struct o2nm_node_group, ns_group)
|
||||
: NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct o2nm_cluster_attribute {
|
||||
struct configfs_attribute attr;
|
||||
ssize_t (*show)(struct o2nm_cluster *, char *);
|
||||
ssize_t (*store)(struct o2nm_cluster *, const char *, size_t);
|
||||
};
|
||||
|
||||
static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count,
|
||||
unsigned int *val)
|
||||
{
|
||||
unsigned long tmp;
|
||||
char *p = (char *)page;
|
||||
|
||||
tmp = simple_strtoul(p, &p, 0);
|
||||
if (!p || (*p && (*p != '\n')))
|
||||
return -EINVAL;
|
||||
|
||||
if (tmp == 0)
|
||||
return -EINVAL;
|
||||
if (tmp >= (u32)-1)
|
||||
return -ERANGE;
|
||||
|
||||
*val = tmp;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t o2nm_cluster_attr_idle_timeout_ms_read(
|
||||
struct o2nm_cluster *cluster, char *page)
|
||||
{
|
||||
return sprintf(page, "%u\n", cluster->cl_idle_timeout_ms);
|
||||
}
|
||||
|
||||
static ssize_t o2nm_cluster_attr_idle_timeout_ms_write(
|
||||
struct o2nm_cluster *cluster, const char *page, size_t count)
|
||||
{
|
||||
ssize_t ret;
|
||||
unsigned int val;
|
||||
|
||||
ret = o2nm_cluster_attr_write(page, count, &val);
|
||||
|
||||
if (ret > 0) {
|
||||
if (cluster->cl_idle_timeout_ms != val
|
||||
&& o2net_num_connected_peers()) {
|
||||
mlog(ML_NOTICE,
|
||||
"o2net: cannot change idle timeout after "
|
||||
"the first peer has agreed to it."
|
||||
" %d connected peers\n",
|
||||
o2net_num_connected_peers());
|
||||
ret = -EINVAL;
|
||||
} else if (val <= cluster->cl_keepalive_delay_ms) {
|
||||
mlog(ML_NOTICE, "o2net: idle timeout must be larger "
|
||||
"than keepalive delay\n");
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
cluster->cl_idle_timeout_ms = val;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t o2nm_cluster_attr_keepalive_delay_ms_read(
|
||||
struct o2nm_cluster *cluster, char *page)
|
||||
{
|
||||
return sprintf(page, "%u\n", cluster->cl_keepalive_delay_ms);
|
||||
}
|
||||
|
||||
static ssize_t o2nm_cluster_attr_keepalive_delay_ms_write(
|
||||
struct o2nm_cluster *cluster, const char *page, size_t count)
|
||||
{
|
||||
ssize_t ret;
|
||||
unsigned int val;
|
||||
|
||||
ret = o2nm_cluster_attr_write(page, count, &val);
|
||||
|
||||
if (ret > 0) {
|
||||
if (cluster->cl_keepalive_delay_ms != val
|
||||
&& o2net_num_connected_peers()) {
|
||||
mlog(ML_NOTICE,
|
||||
"o2net: cannot change keepalive delay after"
|
||||
" the first peer has agreed to it."
|
||||
" %d connected peers\n",
|
||||
o2net_num_connected_peers());
|
||||
ret = -EINVAL;
|
||||
} else if (val >= cluster->cl_idle_timeout_ms) {
|
||||
mlog(ML_NOTICE, "o2net: keepalive delay must be "
|
||||
"smaller than idle timeout\n");
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
cluster->cl_keepalive_delay_ms = val;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t o2nm_cluster_attr_reconnect_delay_ms_read(
|
||||
struct o2nm_cluster *cluster, char *page)
|
||||
{
|
||||
return sprintf(page, "%u\n", cluster->cl_reconnect_delay_ms);
|
||||
}
|
||||
|
||||
static ssize_t o2nm_cluster_attr_reconnect_delay_ms_write(
|
||||
struct o2nm_cluster *cluster, const char *page, size_t count)
|
||||
{
|
||||
return o2nm_cluster_attr_write(page, count,
|
||||
&cluster->cl_reconnect_delay_ms);
|
||||
}
|
||||
|
||||
static ssize_t o2nm_cluster_attr_fence_method_read(
|
||||
struct o2nm_cluster *cluster, char *page)
|
||||
{
|
||||
ssize_t ret = 0;
|
||||
|
||||
if (cluster)
|
||||
ret = sprintf(page, "%s\n",
|
||||
o2nm_fence_method_desc[cluster->cl_fence_method]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t o2nm_cluster_attr_fence_method_write(
|
||||
struct o2nm_cluster *cluster, const char *page, size_t count)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (page[count - 1] != '\n')
|
||||
goto bail;
|
||||
|
||||
for (i = 0; i < O2NM_FENCE_METHODS; ++i) {
|
||||
if (count != strlen(o2nm_fence_method_desc[i]) + 1)
|
||||
continue;
|
||||
if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1))
|
||||
continue;
|
||||
if (cluster->cl_fence_method != i) {
|
||||
printk(KERN_INFO "ocfs2: Changing fence method to %s\n",
|
||||
o2nm_fence_method_desc[i]);
|
||||
cluster->cl_fence_method = i;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
bail:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct o2nm_cluster_attribute o2nm_cluster_attr_idle_timeout_ms = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "idle_timeout_ms",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = o2nm_cluster_attr_idle_timeout_ms_read,
|
||||
.store = o2nm_cluster_attr_idle_timeout_ms_write,
|
||||
};
|
||||
|
||||
static struct o2nm_cluster_attribute o2nm_cluster_attr_keepalive_delay_ms = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "keepalive_delay_ms",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = o2nm_cluster_attr_keepalive_delay_ms_read,
|
||||
.store = o2nm_cluster_attr_keepalive_delay_ms_write,
|
||||
};
|
||||
|
||||
static struct o2nm_cluster_attribute o2nm_cluster_attr_reconnect_delay_ms = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "reconnect_delay_ms",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = o2nm_cluster_attr_reconnect_delay_ms_read,
|
||||
.store = o2nm_cluster_attr_reconnect_delay_ms_write,
|
||||
};
|
||||
|
||||
static struct o2nm_cluster_attribute o2nm_cluster_attr_fence_method = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "fence_method",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = o2nm_cluster_attr_fence_method_read,
|
||||
.store = o2nm_cluster_attr_fence_method_write,
|
||||
};
|
||||
|
||||
static struct configfs_attribute *o2nm_cluster_attrs[] = {
|
||||
&o2nm_cluster_attr_idle_timeout_ms.attr,
|
||||
&o2nm_cluster_attr_keepalive_delay_ms.attr,
|
||||
&o2nm_cluster_attr_reconnect_delay_ms.attr,
|
||||
&o2nm_cluster_attr_fence_method.attr,
|
||||
NULL,
|
||||
};
|
||||
static ssize_t o2nm_cluster_show(struct config_item *item,
|
||||
struct configfs_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct o2nm_cluster *cluster = to_o2nm_cluster(item);
|
||||
struct o2nm_cluster_attribute *o2nm_cluster_attr =
|
||||
container_of(attr, struct o2nm_cluster_attribute, attr);
|
||||
ssize_t ret = 0;
|
||||
|
||||
if (o2nm_cluster_attr->show)
|
||||
ret = o2nm_cluster_attr->show(cluster, page);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t o2nm_cluster_store(struct config_item *item,
|
||||
struct configfs_attribute *attr,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
struct o2nm_cluster *cluster = to_o2nm_cluster(item);
|
||||
struct o2nm_cluster_attribute *o2nm_cluster_attr =
|
||||
container_of(attr, struct o2nm_cluster_attribute, attr);
|
||||
ssize_t ret;
|
||||
|
||||
if (o2nm_cluster_attr->store == NULL) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = o2nm_cluster_attr->store(cluster, page, count);
|
||||
if (ret < count)
|
||||
goto out;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct config_item *o2nm_node_group_make_item(struct config_group *group,
|
||||
const char *name)
|
||||
{
|
||||
struct o2nm_node *node = NULL;
|
||||
|
||||
if (strlen(name) > O2NM_MAX_NAME_LEN)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
|
||||
node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL);
|
||||
if (node == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
|
||||
config_item_init_type_name(&node->nd_item, name, &o2nm_node_type);
|
||||
spin_lock_init(&node->nd_lock);
|
||||
|
||||
mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name);
|
||||
|
||||
return &node->nd_item;
|
||||
}
|
||||
|
||||
static void o2nm_node_group_drop_item(struct config_group *group,
|
||||
struct config_item *item)
|
||||
{
|
||||
struct o2nm_node *node = to_o2nm_node(item);
|
||||
struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
|
||||
|
||||
o2net_disconnect_node(node);
|
||||
|
||||
if (cluster->cl_has_local &&
|
||||
(cluster->cl_local_node == node->nd_num)) {
|
||||
cluster->cl_has_local = 0;
|
||||
cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
|
||||
o2net_stop_listening(node);
|
||||
}
|
||||
|
||||
/* XXX call into net to stop this node from trading messages */
|
||||
|
||||
write_lock(&cluster->cl_nodes_lock);
|
||||
|
||||
/* XXX sloppy */
|
||||
if (node->nd_ipv4_address)
|
||||
rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
|
||||
|
||||
/* nd_num might be 0 if the node number hasn't been set.. */
|
||||
if (cluster->cl_nodes[node->nd_num] == node) {
|
||||
cluster->cl_nodes[node->nd_num] = NULL;
|
||||
clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
|
||||
}
|
||||
write_unlock(&cluster->cl_nodes_lock);
|
||||
|
||||
mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n",
|
||||
config_item_name(&node->nd_item));
|
||||
|
||||
config_item_put(item);
|
||||
}
|
||||
|
||||
static struct configfs_group_operations o2nm_node_group_group_ops = {
|
||||
.make_item = o2nm_node_group_make_item,
|
||||
.drop_item = o2nm_node_group_drop_item,
|
||||
};
|
||||
|
||||
static struct config_item_type o2nm_node_group_type = {
|
||||
.ct_group_ops = &o2nm_node_group_group_ops,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/* cluster */
|
||||
|
||||
static void o2nm_cluster_release(struct config_item *item)
|
||||
{
|
||||
struct o2nm_cluster *cluster = to_o2nm_cluster(item);
|
||||
|
||||
kfree(cluster->cl_group.default_groups);
|
||||
kfree(cluster);
|
||||
}
|
||||
|
||||
static struct configfs_item_operations o2nm_cluster_item_ops = {
|
||||
.release = o2nm_cluster_release,
|
||||
.show_attribute = o2nm_cluster_show,
|
||||
.store_attribute = o2nm_cluster_store,
|
||||
};
|
||||
|
||||
static struct config_item_type o2nm_cluster_type = {
|
||||
.ct_item_ops = &o2nm_cluster_item_ops,
|
||||
.ct_attrs = o2nm_cluster_attrs,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/* cluster set */
|
||||
|
||||
struct o2nm_cluster_group {
|
||||
struct configfs_subsystem cs_subsys;
|
||||
/* some stuff? */
|
||||
};
|
||||
|
||||
#if 0
|
||||
static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group)
|
||||
{
|
||||
return group ?
|
||||
container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys)
|
||||
: NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct config_group *o2nm_cluster_group_make_group(struct config_group *group,
|
||||
const char *name)
|
||||
{
|
||||
struct o2nm_cluster *cluster = NULL;
|
||||
struct o2nm_node_group *ns = NULL;
|
||||
struct config_group *o2hb_group = NULL, *ret = NULL;
|
||||
void *defs = NULL;
|
||||
|
||||
/* this runs under the parent dir's i_mutex; there can be only
|
||||
* one caller in here at a time */
|
||||
if (o2nm_single_cluster)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL);
|
||||
ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL);
|
||||
defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
|
||||
o2hb_group = o2hb_alloc_hb_set();
|
||||
if (cluster == NULL || ns == NULL || o2hb_group == NULL || defs == NULL)
|
||||
goto out;
|
||||
|
||||
config_group_init_type_name(&cluster->cl_group, name,
|
||||
&o2nm_cluster_type);
|
||||
config_group_init_type_name(&ns->ns_group, "node",
|
||||
&o2nm_node_group_type);
|
||||
|
||||
cluster->cl_group.default_groups = defs;
|
||||
cluster->cl_group.default_groups[0] = &ns->ns_group;
|
||||
cluster->cl_group.default_groups[1] = o2hb_group;
|
||||
cluster->cl_group.default_groups[2] = NULL;
|
||||
rwlock_init(&cluster->cl_nodes_lock);
|
||||
cluster->cl_node_ip_tree = RB_ROOT;
|
||||
cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT;
|
||||
cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT;
|
||||
cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT;
|
||||
cluster->cl_fence_method = O2NM_FENCE_RESET;
|
||||
|
||||
ret = &cluster->cl_group;
|
||||
o2nm_single_cluster = cluster;
|
||||
|
||||
out:
|
||||
if (ret == NULL) {
|
||||
kfree(cluster);
|
||||
kfree(ns);
|
||||
o2hb_free_hb_set(o2hb_group);
|
||||
kfree(defs);
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item)
|
||||
{
|
||||
struct o2nm_cluster *cluster = to_o2nm_cluster(item);
|
||||
int i;
|
||||
struct config_item *killme;
|
||||
|
||||
BUG_ON(o2nm_single_cluster != cluster);
|
||||
o2nm_single_cluster = NULL;
|
||||
|
||||
for (i = 0; cluster->cl_group.default_groups[i]; i++) {
|
||||
killme = &cluster->cl_group.default_groups[i]->cg_item;
|
||||
cluster->cl_group.default_groups[i] = NULL;
|
||||
config_item_put(killme);
|
||||
}
|
||||
|
||||
config_item_put(item);
|
||||
}
|
||||
|
||||
static struct configfs_group_operations o2nm_cluster_group_group_ops = {
|
||||
.make_group = o2nm_cluster_group_make_group,
|
||||
.drop_item = o2nm_cluster_group_drop_item,
|
||||
};
|
||||
|
||||
static struct config_item_type o2nm_cluster_group_type = {
|
||||
.ct_group_ops = &o2nm_cluster_group_group_ops,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct o2nm_cluster_group o2nm_cluster_group = {
|
||||
.cs_subsys = {
|
||||
.su_group = {
|
||||
.cg_item = {
|
||||
.ci_namebuf = "cluster",
|
||||
.ci_type = &o2nm_cluster_group_type,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
int o2nm_depend_item(struct config_item *item)
|
||||
{
|
||||
return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
|
||||
}
|
||||
|
||||
void o2nm_undepend_item(struct config_item *item)
|
||||
{
|
||||
configfs_undepend_item(&o2nm_cluster_group.cs_subsys, item);
|
||||
}
|
||||
|
||||
int o2nm_depend_this_node(void)
|
||||
{
|
||||
int ret = 0;
|
||||
struct o2nm_node *local_node;
|
||||
|
||||
local_node = o2nm_get_node_by_num(o2nm_this_node());
|
||||
if (!local_node) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = o2nm_depend_item(&local_node->nd_item);
|
||||
o2nm_node_put(local_node);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void o2nm_undepend_this_node(void)
|
||||
{
|
||||
struct o2nm_node *local_node;
|
||||
|
||||
local_node = o2nm_get_node_by_num(o2nm_this_node());
|
||||
BUG_ON(!local_node);
|
||||
|
||||
o2nm_undepend_item(&local_node->nd_item);
|
||||
o2nm_node_put(local_node);
|
||||
}
|
||||
|
||||
|
||||
static void __exit exit_o2nm(void)
|
||||
{
|
||||
/* XXX sync with hb callbacks and shut down hb? */
|
||||
o2net_unregister_hb_callbacks();
|
||||
configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
|
||||
o2cb_sys_shutdown();
|
||||
|
||||
o2net_exit();
|
||||
o2hb_exit();
|
||||
}
|
||||
|
||||
static int __init init_o2nm(void)
|
||||
{
|
||||
int ret = -1;
|
||||
|
||||
cluster_print_version();
|
||||
|
||||
ret = o2hb_init();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = o2net_init();
|
||||
if (ret)
|
||||
goto out_o2hb;
|
||||
|
||||
ret = o2net_register_hb_callbacks();
|
||||
if (ret)
|
||||
goto out_o2net;
|
||||
|
||||
config_group_init(&o2nm_cluster_group.cs_subsys.su_group);
|
||||
mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex);
|
||||
ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
|
||||
goto out_callbacks;
|
||||
}
|
||||
|
||||
ret = o2cb_sys_init();
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
|
||||
out_callbacks:
|
||||
o2net_unregister_hb_callbacks();
|
||||
out_o2net:
|
||||
o2net_exit();
|
||||
out_o2hb:
|
||||
o2hb_exit();
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Oracle");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
module_init(init_o2nm)
|
||||
module_exit(exit_o2nm)
|
|
@ -1,88 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* nodemanager.h
|
||||
*
|
||||
* Function prototypes
|
||||
*
|
||||
* Copyright (C) 2004 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef O2CLUSTER_NODEMANAGER_H
|
||||
#define O2CLUSTER_NODEMANAGER_H
|
||||
|
||||
#include "ocfs2_nodemanager.h"
|
||||
|
||||
/* This totally doesn't belong here. */
|
||||
#include <linux/configfs.h>
|
||||
#include <linux/rbtree.h>
|
||||
|
||||
enum o2nm_fence_method {
|
||||
O2NM_FENCE_RESET = 0,
|
||||
O2NM_FENCE_PANIC,
|
||||
O2NM_FENCE_METHODS, /* Number of fence methods */
|
||||
};
|
||||
|
||||
struct o2nm_node {
|
||||
spinlock_t nd_lock;
|
||||
struct config_item nd_item;
|
||||
char nd_name[O2NM_MAX_NAME_LEN+1]; /* replace? */
|
||||
__u8 nd_num;
|
||||
/* only one address per node, as attributes, for now. */
|
||||
__be32 nd_ipv4_address;
|
||||
__be16 nd_ipv4_port;
|
||||
struct rb_node nd_ip_node;
|
||||
/* there can be only one local node for now */
|
||||
int nd_local;
|
||||
|
||||
unsigned long nd_set_attributes;
|
||||
};
|
||||
|
||||
struct o2nm_cluster {
|
||||
struct config_group cl_group;
|
||||
unsigned cl_has_local:1;
|
||||
u8 cl_local_node;
|
||||
rwlock_t cl_nodes_lock;
|
||||
struct o2nm_node *cl_nodes[O2NM_MAX_NODES];
|
||||
struct rb_root cl_node_ip_tree;
|
||||
unsigned int cl_idle_timeout_ms;
|
||||
unsigned int cl_keepalive_delay_ms;
|
||||
unsigned int cl_reconnect_delay_ms;
|
||||
enum o2nm_fence_method cl_fence_method;
|
||||
|
||||
/* this bitmap is part of a hack for disk bitmap.. will go eventually. - zab */
|
||||
unsigned long cl_nodes_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
||||
};
|
||||
|
||||
extern struct o2nm_cluster *o2nm_single_cluster;
|
||||
|
||||
u8 o2nm_this_node(void);
|
||||
|
||||
int o2nm_configured_node_map(unsigned long *map, unsigned bytes);
|
||||
struct o2nm_node *o2nm_get_node_by_num(u8 node_num);
|
||||
struct o2nm_node *o2nm_get_node_by_ip(__be32 addr);
|
||||
void o2nm_node_get(struct o2nm_node *node);
|
||||
void o2nm_node_put(struct o2nm_node *node);
|
||||
|
||||
int o2nm_depend_item(struct config_item *item);
|
||||
void o2nm_undepend_item(struct config_item *item);
|
||||
int o2nm_depend_this_node(void);
|
||||
void o2nm_undepend_this_node(void);
|
||||
|
||||
#endif /* O2CLUSTER_NODEMANAGER_H */
|
|
@ -1,38 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* ocfs2_heartbeat.h
|
||||
*
|
||||
* On-disk structures for ocfs2_heartbeat
|
||||
*
|
||||
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
#ifndef _OCFS2_HEARTBEAT_H
|
||||
#define _OCFS2_HEARTBEAT_H
|
||||
|
||||
struct o2hb_disk_heartbeat_block {
|
||||
__le64 hb_seq;
|
||||
__u8 hb_node;
|
||||
__u8 hb_pad1[3];
|
||||
__le32 hb_cksum;
|
||||
__le64 hb_generation;
|
||||
__le32 hb_dead_ms;
|
||||
};
|
||||
|
||||
#endif /* _OCFS2_HEARTBEAT_H */
|
|
@ -1,45 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* ocfs2_nodemanager.h
|
||||
*
|
||||
* Header describing the interface between userspace and the kernel
|
||||
* for the ocfs2_nodemanager module.
|
||||
*
|
||||
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _OCFS2_NODEMANAGER_H
|
||||
#define _OCFS2_NODEMANAGER_H
|
||||
|
||||
#define O2NM_API_VERSION 5
|
||||
|
||||
#define O2NM_MAX_NODES 255
|
||||
#define O2NM_INVALID_NODE_NUM 255
|
||||
|
||||
/* host name, group name, cluster name all 64 bytes */
|
||||
#define O2NM_MAX_NAME_LEN 64 // __NEW_UTS_LEN
|
||||
|
||||
/*
|
||||
* Maximum number of global heartbeat regions allowed.
|
||||
* **CAUTION** Changing this number will break dlm compatibility.
|
||||
*/
|
||||
#define O2NM_MAX_REGIONS 32
|
||||
|
||||
#endif /* _OCFS2_NODEMANAGER_H */
|
|
@ -1,331 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
*
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* Copyright (C) 2005 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
/* This quorum hack is only here until we transition to some more rational
|
||||
* approach that is driven from userspace. Honest. No foolin'.
|
||||
*
|
||||
* Imagine two nodes lose network connectivity to each other but they're still
|
||||
* up and operating in every other way. Presumably a network timeout indicates
|
||||
* that a node is broken and should be recovered. They can't both recover each
|
||||
* other and both carry on without serialising their access to the file system.
|
||||
* They need to decide who is authoritative. Now extend that problem to
|
||||
* arbitrary groups of nodes losing connectivity between each other.
|
||||
*
|
||||
* So we declare that a node which has given up on connecting to a majority
|
||||
* of nodes who are still heartbeating will fence itself.
|
||||
*
|
||||
* There are huge opportunities for races here. After we give up on a node's
|
||||
* connection we need to wait long enough to give heartbeat an opportunity
|
||||
* to declare the node as truly dead. We also need to be careful with the
|
||||
* race between when we see a node start heartbeating and when we connect
|
||||
* to it.
|
||||
*
|
||||
* So nodes that are in this transtion put a hold on the quorum decision
|
||||
* with a counter. As they fall out of this transition they drop the count
|
||||
* and if they're the last, they fire off the decision.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/reboot.h>
|
||||
|
||||
#include "heartbeat.h"
|
||||
#include "nodemanager.h"
|
||||
#define MLOG_MASK_PREFIX ML_QUORUM
|
||||
#include "masklog.h"
|
||||
#include "quorum.h"
|
||||
|
||||
static struct o2quo_state {
|
||||
spinlock_t qs_lock;
|
||||
struct work_struct qs_work;
|
||||
int qs_pending;
|
||||
int qs_heartbeating;
|
||||
unsigned long qs_hb_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
||||
int qs_connected;
|
||||
unsigned long qs_conn_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
||||
int qs_holds;
|
||||
unsigned long qs_hold_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
||||
} o2quo_state;
|
||||
|
||||
/* this is horribly heavy-handed. It should instead flip the file
|
||||
* system RO and call some userspace script. */
|
||||
static void o2quo_fence_self(void)
|
||||
{
|
||||
/* panic spins with interrupts enabled. with preempt
|
||||
* threads can still schedule, etc, etc */
|
||||
o2hb_stop_all_regions();
|
||||
|
||||
switch (o2nm_single_cluster->cl_fence_method) {
|
||||
case O2NM_FENCE_PANIC:
|
||||
panic("*** ocfs2 is very sorry to be fencing this system by "
|
||||
"panicing ***\n");
|
||||
break;
|
||||
default:
|
||||
WARN_ON(o2nm_single_cluster->cl_fence_method >=
|
||||
O2NM_FENCE_METHODS);
|
||||
case O2NM_FENCE_RESET:
|
||||
printk(KERN_ERR "*** ocfs2 is very sorry to be fencing this "
|
||||
"system by restarting ***\n");
|
||||
emergency_restart();
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
/* Indicate that a timeout occurred on a hearbeat region write. The
|
||||
* other nodes in the cluster may consider us dead at that time so we
|
||||
* want to "fence" ourselves so that we don't scribble on the disk
|
||||
* after they think they've recovered us. This can't solve all
|
||||
* problems related to writeout after recovery but this hack can at
|
||||
* least close some of those gaps. When we have real fencing, this can
|
||||
* go away as our node would be fenced externally before other nodes
|
||||
* begin recovery. */
|
||||
void o2quo_disk_timeout(void)
|
||||
{
|
||||
o2quo_fence_self();
|
||||
}
|
||||
|
||||
static void o2quo_make_decision(struct work_struct *work)
|
||||
{
|
||||
int quorum;
|
||||
int lowest_hb, lowest_reachable = 0, fence = 0;
|
||||
struct o2quo_state *qs = &o2quo_state;
|
||||
|
||||
spin_lock(&qs->qs_lock);
|
||||
|
||||
lowest_hb = find_first_bit(qs->qs_hb_bm, O2NM_MAX_NODES);
|
||||
if (lowest_hb != O2NM_MAX_NODES)
|
||||
lowest_reachable = test_bit(lowest_hb, qs->qs_conn_bm);
|
||||
|
||||
mlog(0, "heartbeating: %d, connected: %d, "
|
||||
"lowest: %d (%sreachable)\n", qs->qs_heartbeating,
|
||||
qs->qs_connected, lowest_hb, lowest_reachable ? "" : "un");
|
||||
|
||||
if (!test_bit(o2nm_this_node(), qs->qs_hb_bm) ||
|
||||
qs->qs_heartbeating == 1)
|
||||
goto out;
|
||||
|
||||
if (qs->qs_heartbeating & 1) {
|
||||
/* the odd numbered cluster case is straight forward --
|
||||
* if we can't talk to the majority we're hosed */
|
||||
quorum = (qs->qs_heartbeating + 1)/2;
|
||||
if (qs->qs_connected < quorum) {
|
||||
mlog(ML_ERROR, "fencing this node because it is "
|
||||
"only connected to %u nodes and %u is needed "
|
||||
"to make a quorum out of %u heartbeating nodes\n",
|
||||
qs->qs_connected, quorum,
|
||||
qs->qs_heartbeating);
|
||||
fence = 1;
|
||||
}
|
||||
} else {
|
||||
/* the even numbered cluster adds the possibility of each half
|
||||
* of the cluster being able to talk amongst themselves.. in
|
||||
* that case we're hosed if we can't talk to the group that has
|
||||
* the lowest numbered node */
|
||||
quorum = qs->qs_heartbeating / 2;
|
||||
if (qs->qs_connected < quorum) {
|
||||
mlog(ML_ERROR, "fencing this node because it is "
|
||||
"only connected to %u nodes and %u is needed "
|
||||
"to make a quorum out of %u heartbeating nodes\n",
|
||||
qs->qs_connected, quorum,
|
||||
qs->qs_heartbeating);
|
||||
fence = 1;
|
||||
}
|
||||
else if ((qs->qs_connected == quorum) &&
|
||||
!lowest_reachable) {
|
||||
mlog(ML_ERROR, "fencing this node because it is "
|
||||
"connected to a half-quorum of %u out of %u "
|
||||
"nodes which doesn't include the lowest active "
|
||||
"node %u\n", quorum, qs->qs_heartbeating,
|
||||
lowest_hb);
|
||||
fence = 1;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock(&qs->qs_lock);
|
||||
if (fence)
|
||||
o2quo_fence_self();
|
||||
}
|
||||
|
||||
static void o2quo_set_hold(struct o2quo_state *qs, u8 node)
|
||||
{
|
||||
assert_spin_locked(&qs->qs_lock);
|
||||
|
||||
if (!test_and_set_bit(node, qs->qs_hold_bm)) {
|
||||
qs->qs_holds++;
|
||||
mlog_bug_on_msg(qs->qs_holds == O2NM_MAX_NODES,
|
||||
"node %u\n", node);
|
||||
mlog(0, "node %u, %d total\n", node, qs->qs_holds);
|
||||
}
|
||||
}
|
||||
|
||||
static void o2quo_clear_hold(struct o2quo_state *qs, u8 node)
|
||||
{
|
||||
assert_spin_locked(&qs->qs_lock);
|
||||
|
||||
if (test_and_clear_bit(node, qs->qs_hold_bm)) {
|
||||
mlog(0, "node %u, %d total\n", node, qs->qs_holds - 1);
|
||||
if (--qs->qs_holds == 0) {
|
||||
if (qs->qs_pending) {
|
||||
qs->qs_pending = 0;
|
||||
schedule_work(&qs->qs_work);
|
||||
}
|
||||
}
|
||||
mlog_bug_on_msg(qs->qs_holds < 0, "node %u, holds %d\n",
|
||||
node, qs->qs_holds);
|
||||
}
|
||||
}
|
||||
|
||||
/* as a node comes up we delay the quorum decision until we know the fate of
|
||||
* the connection. the hold will be droped in conn_up or hb_down. it might be
|
||||
* perpetuated by con_err until hb_down. if we already have a conn, we might
|
||||
* be dropping a hold that conn_up got. */
|
||||
void o2quo_hb_up(u8 node)
|
||||
{
|
||||
struct o2quo_state *qs = &o2quo_state;
|
||||
|
||||
spin_lock(&qs->qs_lock);
|
||||
|
||||
qs->qs_heartbeating++;
|
||||
mlog_bug_on_msg(qs->qs_heartbeating == O2NM_MAX_NODES,
|
||||
"node %u\n", node);
|
||||
mlog_bug_on_msg(test_bit(node, qs->qs_hb_bm), "node %u\n", node);
|
||||
set_bit(node, qs->qs_hb_bm);
|
||||
|
||||
mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
|
||||
|
||||
if (!test_bit(node, qs->qs_conn_bm))
|
||||
o2quo_set_hold(qs, node);
|
||||
else
|
||||
o2quo_clear_hold(qs, node);
|
||||
|
||||
spin_unlock(&qs->qs_lock);
|
||||
}
|
||||
|
||||
/* hb going down releases any holds we might have had due to this node from
|
||||
* conn_up, conn_err, or hb_up */
|
||||
void o2quo_hb_down(u8 node)
|
||||
{
|
||||
struct o2quo_state *qs = &o2quo_state;
|
||||
|
||||
spin_lock(&qs->qs_lock);
|
||||
|
||||
qs->qs_heartbeating--;
|
||||
mlog_bug_on_msg(qs->qs_heartbeating < 0,
|
||||
"node %u, %d heartbeating\n",
|
||||
node, qs->qs_heartbeating);
|
||||
mlog_bug_on_msg(!test_bit(node, qs->qs_hb_bm), "node %u\n", node);
|
||||
clear_bit(node, qs->qs_hb_bm);
|
||||
|
||||
mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
|
||||
|
||||
o2quo_clear_hold(qs, node);
|
||||
|
||||
spin_unlock(&qs->qs_lock);
|
||||
}
|
||||
|
||||
/* this tells us that we've decided that the node is still heartbeating
|
||||
* even though we've lost it's conn. it must only be called after conn_err
|
||||
* and indicates that we must now make a quorum decision in the future,
|
||||
* though we might be doing so after waiting for holds to drain. Here
|
||||
* we'll be dropping the hold from conn_err. */
|
||||
void o2quo_hb_still_up(u8 node)
|
||||
{
|
||||
struct o2quo_state *qs = &o2quo_state;
|
||||
|
||||
spin_lock(&qs->qs_lock);
|
||||
|
||||
mlog(0, "node %u\n", node);
|
||||
|
||||
qs->qs_pending = 1;
|
||||
o2quo_clear_hold(qs, node);
|
||||
|
||||
spin_unlock(&qs->qs_lock);
|
||||
}
|
||||
|
||||
/* This is analogous to hb_up. as a node's connection comes up we delay the
|
||||
* quorum decision until we see it heartbeating. the hold will be droped in
|
||||
* hb_up or hb_down. it might be perpetuated by con_err until hb_down. if
|
||||
* it's already heartbeating we we might be dropping a hold that conn_up got.
|
||||
* */
|
||||
void o2quo_conn_up(u8 node)
|
||||
{
|
||||
struct o2quo_state *qs = &o2quo_state;
|
||||
|
||||
spin_lock(&qs->qs_lock);
|
||||
|
||||
qs->qs_connected++;
|
||||
mlog_bug_on_msg(qs->qs_connected == O2NM_MAX_NODES,
|
||||
"node %u\n", node);
|
||||
mlog_bug_on_msg(test_bit(node, qs->qs_conn_bm), "node %u\n", node);
|
||||
set_bit(node, qs->qs_conn_bm);
|
||||
|
||||
mlog(0, "node %u, %d total\n", node, qs->qs_connected);
|
||||
|
||||
if (!test_bit(node, qs->qs_hb_bm))
|
||||
o2quo_set_hold(qs, node);
|
||||
else
|
||||
o2quo_clear_hold(qs, node);
|
||||
|
||||
spin_unlock(&qs->qs_lock);
|
||||
}
|
||||
|
||||
/* we've decided that we won't ever be connecting to the node again. if it's
|
||||
* still heartbeating we grab a hold that will delay decisions until either the
|
||||
* node stops heartbeating from hb_down or the caller decides that the node is
|
||||
* still up and calls still_up */
|
||||
void o2quo_conn_err(u8 node)
|
||||
{
|
||||
struct o2quo_state *qs = &o2quo_state;
|
||||
|
||||
spin_lock(&qs->qs_lock);
|
||||
|
||||
if (test_bit(node, qs->qs_conn_bm)) {
|
||||
qs->qs_connected--;
|
||||
mlog_bug_on_msg(qs->qs_connected < 0,
|
||||
"node %u, connected %d\n",
|
||||
node, qs->qs_connected);
|
||||
|
||||
clear_bit(node, qs->qs_conn_bm);
|
||||
}
|
||||
|
||||
mlog(0, "node %u, %d total\n", node, qs->qs_connected);
|
||||
|
||||
if (test_bit(node, qs->qs_hb_bm))
|
||||
o2quo_set_hold(qs, node);
|
||||
|
||||
spin_unlock(&qs->qs_lock);
|
||||
}
|
||||
|
||||
void o2quo_init(void)
|
||||
{
|
||||
struct o2quo_state *qs = &o2quo_state;
|
||||
|
||||
spin_lock_init(&qs->qs_lock);
|
||||
INIT_WORK(&qs->qs_work, o2quo_make_decision);
|
||||
}
|
||||
|
||||
void o2quo_exit(void)
|
||||
{
|
||||
struct o2quo_state *qs = &o2quo_state;
|
||||
|
||||
flush_work_sync(&qs->qs_work);
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* Copyright (C) 2005 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef O2CLUSTER_QUORUM_H
|
||||
#define O2CLUSTER_QUORUM_H
|
||||
|
||||
void o2quo_init(void);
|
||||
void o2quo_exit(void);
|
||||
|
||||
void o2quo_hb_up(u8 node);
|
||||
void o2quo_hb_down(u8 node);
|
||||
void o2quo_hb_still_up(u8 node);
|
||||
void o2quo_conn_up(u8 node);
|
||||
void o2quo_conn_err(u8 node);
|
||||
void o2quo_disk_timeout(void);
|
||||
|
||||
#endif /* O2CLUSTER_QUORUM_H */
|
|
@ -1,82 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* sys.c
|
||||
*
|
||||
* OCFS2 cluster sysfs interface
|
||||
*
|
||||
* Copyright (C) 2005 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation,
|
||||
* version 2 of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "ocfs2_nodemanager.h"
|
||||
#include "masklog.h"
|
||||
#include "sys.h"
|
||||
|
||||
|
||||
static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", O2NM_API_VERSION);
|
||||
}
|
||||
static struct kobj_attribute attr_version =
|
||||
__ATTR(interface_revision, S_IFREG | S_IRUGO, version_show, NULL);
|
||||
|
||||
static struct attribute *o2cb_attrs[] = {
|
||||
&attr_version.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group o2cb_attr_group = {
|
||||
.attrs = o2cb_attrs,
|
||||
};
|
||||
|
||||
static struct kset *o2cb_kset;
|
||||
|
||||
void o2cb_sys_shutdown(void)
|
||||
{
|
||||
mlog_sys_shutdown();
|
||||
kset_unregister(o2cb_kset);
|
||||
}
|
||||
|
||||
int o2cb_sys_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
o2cb_kset = kset_create_and_add("o2cb", NULL, fs_kobj);
|
||||
if (!o2cb_kset)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = sysfs_create_group(&o2cb_kset->kobj, &o2cb_attr_group);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = mlog_sys_init(o2cb_kset);
|
||||
if (ret)
|
||||
goto error;
|
||||
return 0;
|
||||
error:
|
||||
kset_unregister(o2cb_kset);
|
||||
return ret;
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* sys.h
|
||||
*
|
||||
* Function prototypes for o2cb sysfs interface
|
||||
*
|
||||
* Copyright (C) 2005 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation,
|
||||
* version 2 of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef O2CLUSTER_SYS_H
|
||||
#define O2CLUSTER_SYS_H
|
||||
|
||||
void o2cb_sys_shutdown(void);
|
||||
int o2cb_sys_init(void);
|
||||
|
||||
#endif /* O2CLUSTER_SYS_H */
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,160 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* tcp.h
|
||||
*
|
||||
* Function prototypes
|
||||
*
|
||||
* Copyright (C) 2004 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef O2CLUSTER_TCP_H
|
||||
#define O2CLUSTER_TCP_H
|
||||
|
||||
#include <linux/socket.h>
|
||||
#ifdef __KERNEL__
|
||||
#include <net/sock.h>
|
||||
#include <linux/tcp.h>
|
||||
#else
|
||||
#include <sys/socket.h>
|
||||
#endif
|
||||
#include <linux/inet.h>
|
||||
#include <linux/in.h>
|
||||
|
||||
struct o2net_msg
|
||||
{
|
||||
__be16 magic;
|
||||
__be16 data_len;
|
||||
__be16 msg_type;
|
||||
__be16 pad1;
|
||||
__be32 sys_status;
|
||||
__be32 status;
|
||||
__be32 key;
|
||||
__be32 msg_num;
|
||||
__u8 buf[0];
|
||||
};
|
||||
|
||||
typedef int (o2net_msg_handler_func)(struct o2net_msg *msg, u32 len, void *data,
|
||||
void **ret_data);
|
||||
typedef void (o2net_post_msg_handler_func)(int status, void *data,
|
||||
void *ret_data);
|
||||
|
||||
#define O2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(struct o2net_msg))
|
||||
|
||||
/* same as hb delay, we're waiting for another node to recognize our hb */
|
||||
#define O2NET_RECONNECT_DELAY_MS_DEFAULT 2000
|
||||
|
||||
#define O2NET_KEEPALIVE_DELAY_MS_DEFAULT 2000
|
||||
#define O2NET_IDLE_TIMEOUT_MS_DEFAULT 30000
|
||||
|
||||
|
||||
/* TODO: figure this out.... */
|
||||
static inline int o2net_link_down(int err, struct socket *sock)
|
||||
{
|
||||
if (sock) {
|
||||
if (sock->sk->sk_state != TCP_ESTABLISHED &&
|
||||
sock->sk->sk_state != TCP_CLOSE_WAIT)
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (err >= 0)
|
||||
return 0;
|
||||
switch (err) {
|
||||
/* ????????????????????????? */
|
||||
case -ERESTARTSYS:
|
||||
case -EBADF:
|
||||
/* When the server has died, an ICMP port unreachable
|
||||
* message prompts ECONNREFUSED. */
|
||||
case -ECONNREFUSED:
|
||||
case -ENOTCONN:
|
||||
case -ECONNRESET:
|
||||
case -EPIPE:
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum {
|
||||
O2NET_DRIVER_UNINITED,
|
||||
O2NET_DRIVER_READY,
|
||||
};
|
||||
|
||||
int o2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
|
||||
u8 target_node, int *status);
|
||||
int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *vec,
|
||||
size_t veclen, u8 target_node, int *status);
|
||||
|
||||
int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
|
||||
o2net_msg_handler_func *func, void *data,
|
||||
o2net_post_msg_handler_func *post_func,
|
||||
struct list_head *unreg_list);
|
||||
void o2net_unregister_handler_list(struct list_head *list);
|
||||
|
||||
void o2net_fill_node_map(unsigned long *map, unsigned bytes);
|
||||
|
||||
#ifdef CONFIG_RAMSTER
|
||||
void o2net_force_data_magic(struct o2net_msg *, u16, u32);
|
||||
void o2net_hb_node_up_manual(int);
|
||||
struct o2net_node *o2net_nn_from_num(u8);
|
||||
#endif
|
||||
|
||||
struct o2nm_node;
|
||||
int o2net_register_hb_callbacks(void);
|
||||
void o2net_unregister_hb_callbacks(void);
|
||||
int o2net_start_listening(struct o2nm_node *node);
|
||||
void o2net_stop_listening(struct o2nm_node *node);
|
||||
void o2net_disconnect_node(struct o2nm_node *node);
|
||||
int o2net_num_connected_peers(void);
|
||||
|
||||
int o2net_init(void);
|
||||
void o2net_exit(void);
|
||||
|
||||
struct o2net_send_tracking;
|
||||
struct o2net_sock_container;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int o2net_debugfs_init(void);
|
||||
void o2net_debugfs_exit(void);
|
||||
void o2net_debug_add_nst(struct o2net_send_tracking *nst);
|
||||
void o2net_debug_del_nst(struct o2net_send_tracking *nst);
|
||||
void o2net_debug_add_sc(struct o2net_sock_container *sc);
|
||||
void o2net_debug_del_sc(struct o2net_sock_container *sc);
|
||||
#else
|
||||
static inline int o2net_debugfs_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void o2net_debugfs_exit(void)
|
||||
{
|
||||
}
|
||||
static inline void o2net_debug_add_nst(struct o2net_send_tracking *nst)
|
||||
{
|
||||
}
|
||||
static inline void o2net_debug_del_nst(struct o2net_send_tracking *nst)
|
||||
{
|
||||
}
|
||||
static inline void o2net_debug_add_sc(struct o2net_sock_container *sc)
|
||||
{
|
||||
}
|
||||
static inline void o2net_debug_del_sc(struct o2net_sock_container *sc)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
#endif /* O2CLUSTER_TCP_H */
|
|
@ -1,249 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* Copyright (C) 2005 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
#ifndef O2CLUSTER_TCP_INTERNAL_H
|
||||
#define O2CLUSTER_TCP_INTERNAL_H
|
||||
|
||||
#define O2NET_MSG_MAGIC ((u16)0xfa55)
|
||||
#define O2NET_MSG_STATUS_MAGIC ((u16)0xfa56)
|
||||
#define O2NET_MSG_KEEP_REQ_MAGIC ((u16)0xfa57)
|
||||
#define O2NET_MSG_KEEP_RESP_MAGIC ((u16)0xfa58)
|
||||
#ifdef CONFIG_RAMSTER
|
||||
/*
|
||||
* "data magic" is a long version of "status magic" where the message
|
||||
* payload actually contains data to be passed in reply to certain messages
|
||||
*/
|
||||
#define O2NET_MSG_DATA_MAGIC ((u16)0xfa59)
|
||||
#endif
|
||||
|
||||
/* we're delaying our quorum decision so that heartbeat will have timed
|
||||
* out truly dead nodes by the time we come around to making decisions
|
||||
* on their number */
|
||||
#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
|
||||
|
||||
/*
|
||||
* This version number represents quite a lot, unfortunately. It not
|
||||
* only represents the raw network message protocol on the wire but also
|
||||
* locking semantics of the file system using the protocol. It should
|
||||
* be somewhere else, I'm sure, but right now it isn't.
|
||||
*
|
||||
* With version 11, we separate out the filesystem locking portion. The
|
||||
* filesystem now has a major.minor version it negotiates. Version 11
|
||||
* introduces this negotiation to the o2dlm protocol, and as such the
|
||||
* version here in tcp_internal.h should not need to be bumped for
|
||||
* filesystem locking changes.
|
||||
*
|
||||
* New in version 11
|
||||
* - Negotiation of filesystem locking in the dlm join.
|
||||
*
|
||||
* New in version 10:
|
||||
* - Meta/data locks combined
|
||||
*
|
||||
* New in version 9:
|
||||
* - All votes removed
|
||||
*
|
||||
* New in version 8:
|
||||
* - Replace delete inode votes with a cluster lock
|
||||
*
|
||||
* New in version 7:
|
||||
* - DLM join domain includes the live nodemap
|
||||
*
|
||||
* New in version 6:
|
||||
* - DLM lockres remote refcount fixes.
|
||||
*
|
||||
* New in version 5:
|
||||
* - Network timeout checking protocol
|
||||
*
|
||||
* New in version 4:
|
||||
* - Remove i_generation from lock names for better stat performance.
|
||||
*
|
||||
* New in version 3:
|
||||
* - Replace dentry votes with a cluster lock
|
||||
*
|
||||
* New in version 2:
|
||||
* - full 64 bit i_size in the metadata lock lvbs
|
||||
* - introduction of "rw" lock and pushing meta/data locking down
|
||||
*/
|
||||
#define O2NET_PROTOCOL_VERSION 11ULL
|
||||
struct o2net_handshake {
|
||||
__be64 protocol_version;
|
||||
__be64 connector_id;
|
||||
__be32 o2hb_heartbeat_timeout_ms;
|
||||
__be32 o2net_idle_timeout_ms;
|
||||
__be32 o2net_keepalive_delay_ms;
|
||||
__be32 o2net_reconnect_delay_ms;
|
||||
};
|
||||
|
||||
struct o2net_node {
|
||||
/* this is never called from int/bh */
|
||||
spinlock_t nn_lock;
|
||||
|
||||
/* set the moment an sc is allocated and a connect is started */
|
||||
struct o2net_sock_container *nn_sc;
|
||||
/* _valid is only set after the handshake passes and tx can happen */
|
||||
unsigned nn_sc_valid:1;
|
||||
/* if this is set tx just returns it */
|
||||
int nn_persistent_error;
|
||||
/* It is only set to 1 after the idle time out. */
|
||||
atomic_t nn_timeout;
|
||||
|
||||
/* threads waiting for an sc to arrive wait on the wq for generation
|
||||
* to increase. it is increased when a connecting socket succeeds
|
||||
* or fails or when an accepted socket is attached. */
|
||||
wait_queue_head_t nn_sc_wq;
|
||||
|
||||
struct idr nn_status_idr;
|
||||
struct list_head nn_status_list;
|
||||
|
||||
/* connects are attempted from when heartbeat comes up until either hb
|
||||
* goes down, the node is unconfigured, no connect attempts succeed
|
||||
* before O2NET_CONN_IDLE_DELAY, or a connect succeeds. connect_work
|
||||
* is queued from set_nn_state both from hb up and from itself if a
|
||||
* connect attempt fails and so can be self-arming. shutdown is
|
||||
* careful to first mark the nn such that no connects will be attempted
|
||||
* before canceling delayed connect work and flushing the queue. */
|
||||
struct delayed_work nn_connect_work;
|
||||
unsigned long nn_last_connect_attempt;
|
||||
|
||||
/* this is queued as nodes come up and is canceled when a connection is
|
||||
* established. this expiring gives up on the node and errors out
|
||||
* transmits */
|
||||
struct delayed_work nn_connect_expired;
|
||||
|
||||
/* after we give up on a socket we wait a while before deciding
|
||||
* that it is still heartbeating and that we should do some
|
||||
* quorum work */
|
||||
struct delayed_work nn_still_up;
|
||||
};
|
||||
|
||||
struct o2net_sock_container {
|
||||
struct kref sc_kref;
|
||||
/* the next two are valid for the life time of the sc */
|
||||
struct socket *sc_sock;
|
||||
struct o2nm_node *sc_node;
|
||||
|
||||
/* all of these sc work structs hold refs on the sc while they are
|
||||
* queued. they should not be able to ref a freed sc. the teardown
|
||||
* race is with o2net_wq destruction in o2net_stop_listening() */
|
||||
|
||||
/* rx and connect work are generated from socket callbacks. sc
|
||||
* shutdown removes the callbacks and then flushes the work queue */
|
||||
struct work_struct sc_rx_work;
|
||||
struct work_struct sc_connect_work;
|
||||
/* shutdown work is triggered in two ways. the simple way is
|
||||
* for a code path calls ensure_shutdown which gets a lock, removes
|
||||
* the sc from the nn, and queues the work. in this case the
|
||||
* work is single-shot. the work is also queued from a sock
|
||||
* callback, though, and in this case the work will find the sc
|
||||
* still on the nn and will call ensure_shutdown itself.. this
|
||||
* ends up triggering the shutdown work again, though nothing
|
||||
* will be done in that second iteration. so work queue teardown
|
||||
* has to be careful to remove the sc from the nn before waiting
|
||||
* on the work queue so that the shutdown work doesn't remove the
|
||||
* sc and rearm itself.
|
||||
*/
|
||||
struct work_struct sc_shutdown_work;
|
||||
|
||||
struct timer_list sc_idle_timeout;
|
||||
struct delayed_work sc_keepalive_work;
|
||||
|
||||
unsigned sc_handshake_ok:1;
|
||||
|
||||
struct page *sc_page;
|
||||
size_t sc_page_off;
|
||||
|
||||
/* original handlers for the sockets */
|
||||
void (*sc_state_change)(struct sock *sk);
|
||||
void (*sc_data_ready)(struct sock *sk, int bytes);
|
||||
|
||||
u32 sc_msg_key;
|
||||
u16 sc_msg_type;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct list_head sc_net_debug_item;
|
||||
ktime_t sc_tv_timer;
|
||||
ktime_t sc_tv_data_ready;
|
||||
ktime_t sc_tv_advance_start;
|
||||
ktime_t sc_tv_advance_stop;
|
||||
ktime_t sc_tv_func_start;
|
||||
ktime_t sc_tv_func_stop;
|
||||
#endif
|
||||
#ifdef CONFIG_OCFS2_FS_STATS
|
||||
ktime_t sc_tv_acquiry_total;
|
||||
ktime_t sc_tv_send_total;
|
||||
ktime_t sc_tv_status_total;
|
||||
u32 sc_send_count;
|
||||
u32 sc_recv_count;
|
||||
ktime_t sc_tv_process_total;
|
||||
#endif
|
||||
struct mutex sc_send_lock;
|
||||
};
|
||||
|
||||
struct o2net_msg_handler {
|
||||
struct rb_node nh_node;
|
||||
u32 nh_max_len;
|
||||
u32 nh_msg_type;
|
||||
u32 nh_key;
|
||||
o2net_msg_handler_func *nh_func;
|
||||
o2net_msg_handler_func *nh_func_data;
|
||||
o2net_post_msg_handler_func
|
||||
*nh_post_func;
|
||||
struct kref nh_kref;
|
||||
struct list_head nh_unregister_item;
|
||||
};
|
||||
|
||||
enum o2net_system_error {
|
||||
O2NET_ERR_NONE = 0,
|
||||
O2NET_ERR_NO_HNDLR,
|
||||
O2NET_ERR_OVERFLOW,
|
||||
O2NET_ERR_DIED,
|
||||
O2NET_ERR_MAX
|
||||
};
|
||||
|
||||
struct o2net_status_wait {
|
||||
enum o2net_system_error ns_sys_status;
|
||||
s32 ns_status;
|
||||
int ns_id;
|
||||
wait_queue_head_t ns_wq;
|
||||
struct list_head ns_node_item;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/* just for state dumps */
|
||||
struct o2net_send_tracking {
|
||||
struct list_head st_net_debug_item;
|
||||
struct task_struct *st_task;
|
||||
struct o2net_sock_container *st_sc;
|
||||
u32 st_id;
|
||||
u32 st_msg_type;
|
||||
u32 st_msg_key;
|
||||
u8 st_node;
|
||||
ktime_t st_sock_time;
|
||||
ktime_t st_send_time;
|
||||
ktime_t st_status_time;
|
||||
};
|
||||
#else
|
||||
struct o2net_send_tracking {
|
||||
u32 dummy;
|
||||
};
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
#endif /* O2CLUSTER_TCP_INTERNAL_H */
|
|
@ -1,42 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* ver.c
|
||||
*
|
||||
* version string
|
||||
*
|
||||
* Copyright (C) 2002, 2005 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include "ver.h"
|
||||
|
||||
#define CLUSTER_BUILD_VERSION "1.5.0"
|
||||
|
||||
#define VERSION_STR "OCFS2 Node Manager " CLUSTER_BUILD_VERSION
|
||||
|
||||
void cluster_print_version(void)
|
||||
{
|
||||
printk(KERN_INFO "%s\n", VERSION_STR);
|
||||
}
|
||||
|
||||
MODULE_DESCRIPTION(VERSION_STR);
|
||||
|
||||
MODULE_VERSION(CLUSTER_BUILD_VERSION);
|
|
@ -1,31 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* ver.h
|
||||
*
|
||||
* Function prototypes
|
||||
*
|
||||
* Copyright (C) 2005 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
#ifndef O2CLUSTER_VER_H
|
||||
#define O2CLUSTER_VER_H
|
||||
|
||||
void cluster_print_version(void);
|
||||
|
||||
#endif /* O2CLUSTER_VER_H */
|
|
@ -1,117 +0,0 @@
|
|||
/*
|
||||
* ramster.h
|
||||
*
|
||||
* Peer-to-peer transcendent memory
|
||||
*
|
||||
* Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
|
||||
*/
|
||||
|
||||
#ifndef _RAMSTER_H_
|
||||
#define _RAMSTER_H_
|
||||
|
||||
/*
|
||||
* format of remote pampd:
|
||||
* bit 0 == intransit
|
||||
* bit 1 == is_remote... if this bit is set, then
|
||||
* bit 2-9 == remotenode
|
||||
* bit 10-22 == size
|
||||
* bit 23-30 == cksum
|
||||
*/
|
||||
#define FAKE_PAMPD_INTRANSIT_BITS 1
|
||||
#define FAKE_PAMPD_ISREMOTE_BITS 1
|
||||
#define FAKE_PAMPD_REMOTENODE_BITS 8
|
||||
#define FAKE_PAMPD_REMOTESIZE_BITS 13
|
||||
#define FAKE_PAMPD_CHECKSUM_BITS 8
|
||||
|
||||
#define FAKE_PAMPD_INTRANSIT_SHIFT 0
|
||||
#define FAKE_PAMPD_ISREMOTE_SHIFT (FAKE_PAMPD_INTRANSIT_SHIFT + \
|
||||
FAKE_PAMPD_INTRANSIT_BITS)
|
||||
#define FAKE_PAMPD_REMOTENODE_SHIFT (FAKE_PAMPD_ISREMOTE_SHIFT + \
|
||||
FAKE_PAMPD_ISREMOTE_BITS)
|
||||
#define FAKE_PAMPD_REMOTESIZE_SHIFT (FAKE_PAMPD_REMOTENODE_SHIFT + \
|
||||
FAKE_PAMPD_REMOTENODE_BITS)
|
||||
#define FAKE_PAMPD_CHECKSUM_SHIFT (FAKE_PAMPD_REMOTESIZE_SHIFT + \
|
||||
FAKE_PAMPD_REMOTESIZE_BITS)
|
||||
|
||||
#define FAKE_PAMPD_MASK(x) ((1UL << (x)) - 1)
|
||||
|
||||
static inline void *pampd_make_remote(int remotenode, size_t size,
|
||||
unsigned char cksum)
|
||||
{
|
||||
unsigned long fake_pampd = 0;
|
||||
fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
|
||||
fake_pampd |= ((unsigned long)remotenode &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS)) <<
|
||||
FAKE_PAMPD_REMOTENODE_SHIFT;
|
||||
fake_pampd |= ((unsigned long)size &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS)) <<
|
||||
FAKE_PAMPD_REMOTESIZE_SHIFT;
|
||||
fake_pampd |= ((unsigned long)cksum &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS)) <<
|
||||
FAKE_PAMPD_CHECKSUM_SHIFT;
|
||||
return (void *)fake_pampd;
|
||||
}
|
||||
|
||||
static inline unsigned int pampd_remote_node(void *pampd)
|
||||
{
|
||||
unsigned long fake_pampd = (unsigned long)pampd;
|
||||
return (fake_pampd >> FAKE_PAMPD_REMOTENODE_SHIFT) &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS);
|
||||
}
|
||||
|
||||
static inline unsigned int pampd_remote_size(void *pampd)
|
||||
{
|
||||
unsigned long fake_pampd = (unsigned long)pampd;
|
||||
return (fake_pampd >> FAKE_PAMPD_REMOTESIZE_SHIFT) &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS);
|
||||
}
|
||||
|
||||
static inline unsigned char pampd_remote_cksum(void *pampd)
|
||||
{
|
||||
unsigned long fake_pampd = (unsigned long)pampd;
|
||||
return (fake_pampd >> FAKE_PAMPD_CHECKSUM_SHIFT) &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS);
|
||||
}
|
||||
|
||||
static inline bool pampd_is_remote(void *pampd)
|
||||
{
|
||||
unsigned long fake_pampd = (unsigned long)pampd;
|
||||
return (fake_pampd >> FAKE_PAMPD_ISREMOTE_SHIFT) &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_ISREMOTE_BITS);
|
||||
}
|
||||
|
||||
static inline bool pampd_is_intransit(void *pampd)
|
||||
{
|
||||
unsigned long fake_pampd = (unsigned long)pampd;
|
||||
return (fake_pampd >> FAKE_PAMPD_INTRANSIT_SHIFT) &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_INTRANSIT_BITS);
|
||||
}
|
||||
|
||||
/* note that it is a BUG for intransit to be set without isremote also set */
|
||||
static inline void *pampd_mark_intransit(void *pampd)
|
||||
{
|
||||
unsigned long fake_pampd = (unsigned long)pampd;
|
||||
|
||||
fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
|
||||
fake_pampd |= 1UL << FAKE_PAMPD_INTRANSIT_SHIFT;
|
||||
return (void *)fake_pampd;
|
||||
}
|
||||
|
||||
static inline void *pampd_mask_intransit_and_remote(void *marked_pampd)
|
||||
{
|
||||
unsigned long pampd = (unsigned long)marked_pampd;
|
||||
|
||||
pampd &= ~(1UL << FAKE_PAMPD_INTRANSIT_SHIFT);
|
||||
pampd &= ~(1UL << FAKE_PAMPD_ISREMOTE_SHIFT);
|
||||
return (void *)pampd;
|
||||
}
|
||||
|
||||
extern int ramster_remote_async_get(struct tmem_xhandle *,
|
||||
bool, int, size_t, uint8_t, void *extra);
|
||||
extern int ramster_remote_put(struct tmem_xhandle *, char *, size_t,
|
||||
bool, int *);
|
||||
extern int ramster_remote_flush(struct tmem_xhandle *, int);
|
||||
extern int ramster_remote_flush_object(struct tmem_xhandle *, int);
|
||||
extern int ramster_o2net_register_handlers(void);
|
||||
|
||||
#endif /* _TMEM_H */
|
|
@ -1,402 +0,0 @@
|
|||
/*
|
||||
* ramster_o2net.c
|
||||
*
|
||||
* Copyright (c) 2011, Dan Magenheimer, Oracle Corp.
|
||||
*
|
||||
* Ramster_o2net provides an interface between zcache and o2net.
|
||||
*
|
||||
* FIXME: support more than two nodes
|
||||
*/
|
||||
|
||||
#include <linux/list.h>
|
||||
#include "cluster/tcp.h"
|
||||
#include "cluster/nodemanager.h"
|
||||
#include "tmem.h"
|
||||
#include "zcache.h"
|
||||
#include "ramster.h"
|
||||
|
||||
#define RAMSTER_TESTING
|
||||
|
||||
#define RMSTR_KEY 0x77347734
|
||||
|
||||
enum {
|
||||
RMSTR_TMEM_PUT_EPH = 100,
|
||||
RMSTR_TMEM_PUT_PERS,
|
||||
RMSTR_TMEM_ASYNC_GET_REQUEST,
|
||||
RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST,
|
||||
RMSTR_TMEM_ASYNC_GET_REPLY,
|
||||
RMSTR_TMEM_FLUSH,
|
||||
RMSTR_TMEM_FLOBJ,
|
||||
RMSTR_TMEM_DESTROY_POOL,
|
||||
};
|
||||
|
||||
#define RMSTR_O2NET_MAX_LEN \
|
||||
(O2NET_MAX_PAYLOAD_BYTES - sizeof(struct tmem_xhandle))
|
||||
|
||||
#include "cluster/tcp_internal.h"
|
||||
|
||||
static struct o2nm_node *ramster_choose_node(int *nodenum,
|
||||
struct tmem_xhandle *xh)
|
||||
{
|
||||
struct o2nm_node *node = NULL;
|
||||
int i;
|
||||
|
||||
/* FIXME reproducibly pick a node based on xh that is NOT this node */
|
||||
i = o2nm_this_node();
|
||||
i = !i; /* FIXME ONLY FOR TWO NODES */
|
||||
node = o2nm_get_node_by_num(i);
|
||||
/* WARNING: THIS DOES NOT CHECK TO ENSURE CONNECTED */
|
||||
if (node != NULL)
|
||||
*nodenum = i;
|
||||
return node;
|
||||
}
|
||||
|
||||
static void ramster_put_node(struct o2nm_node *node)
|
||||
{
|
||||
o2nm_node_put(node);
|
||||
}
|
||||
|
||||
/* FIXME following buffer should be per-cpu, protected by preempt_disable */
|
||||
static char ramster_async_get_buf[O2NET_MAX_PAYLOAD_BYTES];
|
||||
|
||||
static int ramster_remote_async_get_request_handler(struct o2net_msg *msg,
|
||||
u32 len, void *data, void **ret_data)
|
||||
{
|
||||
char *pdata;
|
||||
struct tmem_xhandle xh;
|
||||
int found;
|
||||
size_t size = RMSTR_O2NET_MAX_LEN;
|
||||
u16 msgtype = be16_to_cpu(msg->msg_type);
|
||||
bool get_and_free = (msgtype == RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST);
|
||||
unsigned long flags;
|
||||
|
||||
xh = *(struct tmem_xhandle *)msg->buf;
|
||||
if (xh.xh_data_size > RMSTR_O2NET_MAX_LEN)
|
||||
BUG();
|
||||
pdata = ramster_async_get_buf;
|
||||
*(struct tmem_xhandle *)pdata = xh;
|
||||
pdata += sizeof(struct tmem_xhandle);
|
||||
local_irq_save(flags);
|
||||
found = zcache_get(xh.client_id, xh.pool_id, &xh.oid, xh.index,
|
||||
pdata, &size, 1, get_and_free ? 1 : -1);
|
||||
local_irq_restore(flags);
|
||||
if (found < 0) {
|
||||
/* a zero size indicates the get failed */
|
||||
size = 0;
|
||||
}
|
||||
if (size > RMSTR_O2NET_MAX_LEN)
|
||||
BUG();
|
||||
*ret_data = pdata - sizeof(struct tmem_xhandle);
|
||||
/* now make caller (o2net_process_message) handle specially */
|
||||
o2net_force_data_magic(msg, RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY);
|
||||
return size + sizeof(struct tmem_xhandle);
|
||||
}
|
||||
|
||||
static int ramster_remote_async_get_reply_handler(struct o2net_msg *msg,
|
||||
u32 len, void *data, void **ret_data)
|
||||
{
|
||||
char *in = (char *)msg->buf;
|
||||
int datalen = len - sizeof(struct o2net_msg);
|
||||
int ret = -1;
|
||||
struct tmem_xhandle *xh = (struct tmem_xhandle *)in;
|
||||
|
||||
in += sizeof(struct tmem_xhandle);
|
||||
datalen -= sizeof(struct tmem_xhandle);
|
||||
BUG_ON(datalen < 0 || datalen > PAGE_SIZE);
|
||||
ret = zcache_localify(xh->pool_id, &xh->oid, xh->index,
|
||||
in, datalen, xh->extra);
|
||||
#ifdef RAMSTER_TESTING
|
||||
if (ret == -EEXIST)
|
||||
pr_err("TESTING ArrgREP, aborted overwrite on racy put\n");
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ramster_remote_put_handler(struct o2net_msg *msg,
|
||||
u32 len, void *data, void **ret_data)
|
||||
{
|
||||
struct tmem_xhandle *xh;
|
||||
char *p = (char *)msg->buf;
|
||||
int datalen = len - sizeof(struct o2net_msg) -
|
||||
sizeof(struct tmem_xhandle);
|
||||
u16 msgtype = be16_to_cpu(msg->msg_type);
|
||||
bool ephemeral = (msgtype == RMSTR_TMEM_PUT_EPH);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
xh = (struct tmem_xhandle *)p;
|
||||
p += sizeof(struct tmem_xhandle);
|
||||
zcache_autocreate_pool(xh->client_id, xh->pool_id, ephemeral);
|
||||
local_irq_save(flags);
|
||||
ret = zcache_put(xh->client_id, xh->pool_id, &xh->oid, xh->index,
|
||||
p, datalen, 1, ephemeral ? 1 : -1);
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ramster_remote_flush_handler(struct o2net_msg *msg,
|
||||
u32 len, void *data, void **ret_data)
|
||||
{
|
||||
struct tmem_xhandle *xh;
|
||||
char *p = (char *)msg->buf;
|
||||
|
||||
xh = (struct tmem_xhandle *)p;
|
||||
p += sizeof(struct tmem_xhandle);
|
||||
(void)zcache_flush(xh->client_id, xh->pool_id, &xh->oid, xh->index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ramster_remote_flobj_handler(struct o2net_msg *msg,
|
||||
u32 len, void *data, void **ret_data)
|
||||
{
|
||||
struct tmem_xhandle *xh;
|
||||
char *p = (char *)msg->buf;
|
||||
|
||||
xh = (struct tmem_xhandle *)p;
|
||||
p += sizeof(struct tmem_xhandle);
|
||||
(void)zcache_flush_object(xh->client_id, xh->pool_id, &xh->oid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ramster_remote_async_get(struct tmem_xhandle *xh, bool free, int remotenode,
|
||||
size_t expect_size, uint8_t expect_cksum,
|
||||
void *extra)
|
||||
{
|
||||
int ret = -1, status;
|
||||
struct o2nm_node *node = NULL;
|
||||
struct kvec vec[1];
|
||||
size_t veclen = 1;
|
||||
u32 msg_type;
|
||||
|
||||
node = o2nm_get_node_by_num(remotenode);
|
||||
if (node == NULL)
|
||||
goto out;
|
||||
xh->client_id = o2nm_this_node(); /* which node is getting */
|
||||
xh->xh_data_cksum = expect_cksum;
|
||||
xh->xh_data_size = expect_size;
|
||||
xh->extra = extra;
|
||||
vec[0].iov_len = sizeof(*xh);
|
||||
vec[0].iov_base = xh;
|
||||
if (free)
|
||||
msg_type = RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST;
|
||||
else
|
||||
msg_type = RMSTR_TMEM_ASYNC_GET_REQUEST;
|
||||
ret = o2net_send_message_vec(msg_type, RMSTR_KEY,
|
||||
vec, veclen, remotenode, &status);
|
||||
ramster_put_node(node);
|
||||
if (ret < 0) {
|
||||
/* FIXME handle bad message possibilities here? */
|
||||
pr_err("UNTESTED ret<0 in ramster_remote_async_get\n");
|
||||
}
|
||||
ret = status;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef RAMSTER_TESTING
|
||||
/* leave me here to see if it catches a weird crash */
|
||||
static void ramster_check_irq_counts(void)
|
||||
{
|
||||
static int last_hardirq_cnt, last_softirq_cnt, last_preempt_cnt;
|
||||
int cur_hardirq_cnt, cur_softirq_cnt, cur_preempt_cnt;
|
||||
|
||||
cur_hardirq_cnt = hardirq_count() >> HARDIRQ_SHIFT;
|
||||
if (cur_hardirq_cnt > last_hardirq_cnt) {
|
||||
last_hardirq_cnt = cur_hardirq_cnt;
|
||||
if (!(last_hardirq_cnt&(last_hardirq_cnt-1)))
|
||||
pr_err("RAMSTER TESTING RRP hardirq_count=%d\n",
|
||||
last_hardirq_cnt);
|
||||
}
|
||||
cur_softirq_cnt = softirq_count() >> SOFTIRQ_SHIFT;
|
||||
if (cur_softirq_cnt > last_softirq_cnt) {
|
||||
last_softirq_cnt = cur_softirq_cnt;
|
||||
if (!(last_softirq_cnt&(last_softirq_cnt-1)))
|
||||
pr_err("RAMSTER TESTING RRP softirq_count=%d\n",
|
||||
last_softirq_cnt);
|
||||
}
|
||||
cur_preempt_cnt = preempt_count() & PREEMPT_MASK;
|
||||
if (cur_preempt_cnt > last_preempt_cnt) {
|
||||
last_preempt_cnt = cur_preempt_cnt;
|
||||
if (!(last_preempt_cnt&(last_preempt_cnt-1)))
|
||||
pr_err("RAMSTER TESTING RRP preempt_count=%d\n",
|
||||
last_preempt_cnt);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int ramster_remote_put(struct tmem_xhandle *xh, char *data, size_t size,
|
||||
bool ephemeral, int *remotenode)
|
||||
{
|
||||
int nodenum, ret = -1, status;
|
||||
struct o2nm_node *node = NULL;
|
||||
struct kvec vec[2];
|
||||
size_t veclen = 2;
|
||||
u32 msg_type;
|
||||
#ifdef RAMSTER_TESTING
|
||||
struct o2net_node *nn;
|
||||
#endif
|
||||
|
||||
BUG_ON(size > RMSTR_O2NET_MAX_LEN);
|
||||
xh->client_id = o2nm_this_node(); /* which node is putting */
|
||||
vec[0].iov_len = sizeof(*xh);
|
||||
vec[0].iov_base = xh;
|
||||
vec[1].iov_len = size;
|
||||
vec[1].iov_base = data;
|
||||
node = ramster_choose_node(&nodenum, xh);
|
||||
if (!node)
|
||||
goto out;
|
||||
|
||||
#ifdef RAMSTER_TESTING
|
||||
nn = o2net_nn_from_num(nodenum);
|
||||
WARN_ON_ONCE(nn->nn_persistent_error || !nn->nn_sc_valid);
|
||||
#endif
|
||||
|
||||
if (ephemeral)
|
||||
msg_type = RMSTR_TMEM_PUT_EPH;
|
||||
else
|
||||
msg_type = RMSTR_TMEM_PUT_PERS;
|
||||
#ifdef RAMSTER_TESTING
|
||||
/* leave me here to see if it catches a weird crash */
|
||||
ramster_check_irq_counts();
|
||||
#endif
|
||||
|
||||
ret = o2net_send_message_vec(msg_type, RMSTR_KEY,
|
||||
vec, veclen, nodenum, &status);
|
||||
#ifdef RAMSTER_TESTING
|
||||
if (ret != 0) {
|
||||
static unsigned long cnt;
|
||||
cnt++;
|
||||
if (!(cnt&(cnt-1)))
|
||||
pr_err("ramster_remote_put: message failed, "
|
||||
"ret=%d, cnt=%lu\n", ret, cnt);
|
||||
ret = -1;
|
||||
}
|
||||
#endif
|
||||
if (ret < 0)
|
||||
ret = -1;
|
||||
else {
|
||||
ret = status;
|
||||
*remotenode = nodenum;
|
||||
}
|
||||
|
||||
ramster_put_node(node);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ramster_remote_flush(struct tmem_xhandle *xh, int remotenode)
|
||||
{
|
||||
int ret = -1, status;
|
||||
struct o2nm_node *node = NULL;
|
||||
struct kvec vec[1];
|
||||
size_t veclen = 1;
|
||||
|
||||
node = o2nm_get_node_by_num(remotenode);
|
||||
BUG_ON(node == NULL);
|
||||
xh->client_id = o2nm_this_node(); /* which node is flushing */
|
||||
vec[0].iov_len = sizeof(*xh);
|
||||
vec[0].iov_base = xh;
|
||||
BUG_ON(irqs_disabled());
|
||||
BUG_ON(in_softirq());
|
||||
ret = o2net_send_message_vec(RMSTR_TMEM_FLUSH, RMSTR_KEY,
|
||||
vec, veclen, remotenode, &status);
|
||||
ramster_put_node(node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ramster_remote_flush_object(struct tmem_xhandle *xh, int remotenode)
|
||||
{
|
||||
int ret = -1, status;
|
||||
struct o2nm_node *node = NULL;
|
||||
struct kvec vec[1];
|
||||
size_t veclen = 1;
|
||||
|
||||
node = o2nm_get_node_by_num(remotenode);
|
||||
BUG_ON(node == NULL);
|
||||
xh->client_id = o2nm_this_node(); /* which node is flobjing */
|
||||
vec[0].iov_len = sizeof(*xh);
|
||||
vec[0].iov_base = xh;
|
||||
ret = o2net_send_message_vec(RMSTR_TMEM_FLOBJ, RMSTR_KEY,
|
||||
vec, veclen, remotenode, &status);
|
||||
ramster_put_node(node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handler registration
|
||||
*/
|
||||
|
||||
static LIST_HEAD(ramster_o2net_unreg_list);
|
||||
|
||||
static void ramster_o2net_unregister_handlers(void)
|
||||
{
|
||||
o2net_unregister_handler_list(&ramster_o2net_unreg_list);
|
||||
}
|
||||
|
||||
int ramster_o2net_register_handlers(void)
|
||||
{
|
||||
int status;
|
||||
|
||||
status = o2net_register_handler(RMSTR_TMEM_PUT_EPH, RMSTR_KEY,
|
||||
RMSTR_O2NET_MAX_LEN,
|
||||
ramster_remote_put_handler,
|
||||
NULL, NULL, &ramster_o2net_unreg_list);
|
||||
if (status)
|
||||
goto bail;
|
||||
|
||||
status = o2net_register_handler(RMSTR_TMEM_PUT_PERS, RMSTR_KEY,
|
||||
RMSTR_O2NET_MAX_LEN,
|
||||
ramster_remote_put_handler,
|
||||
NULL, NULL, &ramster_o2net_unreg_list);
|
||||
if (status)
|
||||
goto bail;
|
||||
|
||||
status = o2net_register_handler(RMSTR_TMEM_ASYNC_GET_REQUEST, RMSTR_KEY,
|
||||
RMSTR_O2NET_MAX_LEN,
|
||||
ramster_remote_async_get_request_handler,
|
||||
NULL, NULL,
|
||||
&ramster_o2net_unreg_list);
|
||||
if (status)
|
||||
goto bail;
|
||||
|
||||
status = o2net_register_handler(RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST,
|
||||
RMSTR_KEY, RMSTR_O2NET_MAX_LEN,
|
||||
ramster_remote_async_get_request_handler,
|
||||
NULL, NULL,
|
||||
&ramster_o2net_unreg_list);
|
||||
if (status)
|
||||
goto bail;
|
||||
|
||||
status = o2net_register_handler(RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY,
|
||||
RMSTR_O2NET_MAX_LEN,
|
||||
ramster_remote_async_get_reply_handler,
|
||||
NULL, NULL,
|
||||
&ramster_o2net_unreg_list);
|
||||
if (status)
|
||||
goto bail;
|
||||
|
||||
status = o2net_register_handler(RMSTR_TMEM_FLUSH, RMSTR_KEY,
|
||||
RMSTR_O2NET_MAX_LEN,
|
||||
ramster_remote_flush_handler,
|
||||
NULL, NULL,
|
||||
&ramster_o2net_unreg_list);
|
||||
if (status)
|
||||
goto bail;
|
||||
|
||||
status = o2net_register_handler(RMSTR_TMEM_FLOBJ, RMSTR_KEY,
|
||||
RMSTR_O2NET_MAX_LEN,
|
||||
ramster_remote_flobj_handler,
|
||||
NULL, NULL,
|
||||
&ramster_o2net_unreg_list);
|
||||
if (status)
|
||||
goto bail;
|
||||
|
||||
pr_info("ramster_o2net: handlers registered\n");
|
||||
|
||||
bail:
|
||||
if (status) {
|
||||
ramster_o2net_unregister_handlers();
|
||||
pr_err("ramster_o2net: couldn't register handlers\n");
|
||||
}
|
||||
return status;
|
||||
}
|
|
@ -1,851 +0,0 @@
|
|||
/*
|
||||
* In-kernel transcendent memory (generic implementation)
|
||||
*
|
||||
* Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
|
||||
*
|
||||
* The primary purpose of Transcedent Memory ("tmem") is to map object-oriented
|
||||
* "handles" (triples containing a pool id, and object id, and an index), to
|
||||
* pages in a page-accessible memory (PAM). Tmem references the PAM pages via
|
||||
* an abstract "pampd" (PAM page-descriptor), which can be operated on by a
|
||||
* set of functions (pamops). Each pampd contains some representation of
|
||||
* PAGE_SIZE bytes worth of data. Tmem must support potentially millions of
|
||||
* pages and must be able to insert, find, and delete these pages at a
|
||||
* potential frequency of thousands per second concurrently across many CPUs,
|
||||
* (and, if used with KVM, across many vcpus across many guests).
|
||||
* Tmem is tracked with a hierarchy of data structures, organized by
|
||||
* the elements in a handle-tuple: pool_id, object_id, and page index.
|
||||
* One or more "clients" (e.g. guests) each provide one or more tmem_pools.
|
||||
* Each pool, contains a hash table of rb_trees of tmem_objs. Each
|
||||
* tmem_obj contains a radix-tree-like tree of pointers, with intermediate
|
||||
* nodes called tmem_objnodes. Each leaf pointer in this tree points to
|
||||
* a pampd, which is accessible only through a small set of callbacks
|
||||
* registered by the PAM implementation (see tmem_register_pamops). Tmem
|
||||
* does all memory allocation via a set of callbacks registered by the tmem
|
||||
* host implementation (e.g. see tmem_register_hostops).
|
||||
*/
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "tmem.h"
|
||||
|
||||
/* data structure sentinels used for debugging... see tmem.h */
|
||||
#define POOL_SENTINEL 0x87658765
|
||||
#define OBJ_SENTINEL 0x12345678
|
||||
#define OBJNODE_SENTINEL 0xfedcba09
|
||||
|
||||
/*
|
||||
* A tmem host implementation must use this function to register callbacks
|
||||
* for memory allocation.
|
||||
*/
|
||||
static struct tmem_hostops tmem_hostops;
|
||||
|
||||
static void tmem_objnode_tree_init(void);
|
||||
|
||||
void tmem_register_hostops(struct tmem_hostops *m)
|
||||
{
|
||||
tmem_objnode_tree_init();
|
||||
tmem_hostops = *m;
|
||||
}
|
||||
|
||||
/*
|
||||
* A tmem host implementation must use this function to register
|
||||
* callbacks for a page-accessible memory (PAM) implementation
|
||||
*/
|
||||
static struct tmem_pamops tmem_pamops;
|
||||
|
||||
void tmem_register_pamops(struct tmem_pamops *m)
|
||||
{
|
||||
tmem_pamops = *m;
|
||||
}
|
||||
|
||||
/*
|
||||
* Oid's are potentially very sparse and tmem_objs may have an indeterminately
|
||||
* short life, being added and deleted at a relatively high frequency.
|
||||
* So an rb_tree is an ideal data structure to manage tmem_objs. But because
|
||||
* of the potentially huge number of tmem_objs, each pool manages a hashtable
|
||||
* of rb_trees to reduce search, insert, delete, and rebalancing time.
|
||||
* Each hashbucket also has a lock to manage concurrent access.
|
||||
*
|
||||
* The following routines manage tmem_objs. When any tmem_obj is accessed,
|
||||
* the hashbucket lock must be held.
|
||||
*/
|
||||
|
||||
/* searches for object==oid in pool, returns locked object if found */
|
||||
static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
|
||||
struct tmem_oid *oidp)
|
||||
{
|
||||
struct rb_node *rbnode;
|
||||
struct tmem_obj *obj;
|
||||
|
||||
rbnode = hb->obj_rb_root.rb_node;
|
||||
while (rbnode) {
|
||||
BUG_ON(RB_EMPTY_NODE(rbnode));
|
||||
obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
|
||||
switch (tmem_oid_compare(oidp, &obj->oid)) {
|
||||
case 0: /* equal */
|
||||
goto out;
|
||||
case -1:
|
||||
rbnode = rbnode->rb_left;
|
||||
break;
|
||||
case 1:
|
||||
rbnode = rbnode->rb_right;
|
||||
break;
|
||||
}
|
||||
}
|
||||
obj = NULL;
|
||||
out:
|
||||
return obj;
|
||||
}
|
||||
|
||||
static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *);
|
||||
|
||||
/* free an object that has no more pampds in it */
|
||||
static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
|
||||
{
|
||||
struct tmem_pool *pool;
|
||||
|
||||
BUG_ON(obj == NULL);
|
||||
ASSERT_SENTINEL(obj, OBJ);
|
||||
BUG_ON(obj->pampd_count > 0);
|
||||
pool = obj->pool;
|
||||
BUG_ON(pool == NULL);
|
||||
if (obj->objnode_tree_root != NULL) /* may be "stump" with no leaves */
|
||||
tmem_pampd_destroy_all_in_obj(obj);
|
||||
BUG_ON(obj->objnode_tree_root != NULL);
|
||||
BUG_ON((long)obj->objnode_count != 0);
|
||||
atomic_dec(&pool->obj_count);
|
||||
BUG_ON(atomic_read(&pool->obj_count) < 0);
|
||||
INVERT_SENTINEL(obj, OBJ);
|
||||
obj->pool = NULL;
|
||||
tmem_oid_set_invalid(&obj->oid);
|
||||
rb_erase(&obj->rb_tree_node, &hb->obj_rb_root);
|
||||
}
|
||||
|
||||
/*
|
||||
* initialize, and insert an tmem_object_root (called only if find failed)
|
||||
*/
|
||||
static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
|
||||
struct tmem_pool *pool,
|
||||
struct tmem_oid *oidp)
|
||||
{
|
||||
struct rb_root *root = &hb->obj_rb_root;
|
||||
struct rb_node **new = &(root->rb_node), *parent = NULL;
|
||||
struct tmem_obj *this;
|
||||
|
||||
BUG_ON(pool == NULL);
|
||||
atomic_inc(&pool->obj_count);
|
||||
obj->objnode_tree_height = 0;
|
||||
obj->objnode_tree_root = NULL;
|
||||
obj->pool = pool;
|
||||
obj->oid = *oidp;
|
||||
obj->objnode_count = 0;
|
||||
obj->pampd_count = 0;
|
||||
(*tmem_pamops.new_obj)(obj);
|
||||
SET_SENTINEL(obj, OBJ);
|
||||
while (*new) {
|
||||
BUG_ON(RB_EMPTY_NODE(*new));
|
||||
this = rb_entry(*new, struct tmem_obj, rb_tree_node);
|
||||
parent = *new;
|
||||
switch (tmem_oid_compare(oidp, &this->oid)) {
|
||||
case 0:
|
||||
BUG(); /* already present; should never happen! */
|
||||
break;
|
||||
case -1:
|
||||
new = &(*new)->rb_left;
|
||||
break;
|
||||
case 1:
|
||||
new = &(*new)->rb_right;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rb_link_node(&obj->rb_tree_node, parent, new);
|
||||
rb_insert_color(&obj->rb_tree_node, root);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tmem is managed as a set of tmem_pools with certain attributes, such as
|
||||
* "ephemeral" vs "persistent". These attributes apply to all tmem_objs
|
||||
* and all pampds that belong to a tmem_pool. A tmem_pool is created
|
||||
* or deleted relatively rarely (for example, when a filesystem is
|
||||
* mounted or unmounted.
|
||||
*/
|
||||
|
||||
/* flush all data from a pool and, optionally, free it */
|
||||
static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
|
||||
{
|
||||
struct rb_node *rbnode;
|
||||
struct tmem_obj *obj;
|
||||
struct tmem_hashbucket *hb = &pool->hashbucket[0];
|
||||
int i;
|
||||
|
||||
BUG_ON(pool == NULL);
|
||||
for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
|
||||
spin_lock(&hb->lock);
|
||||
rbnode = rb_first(&hb->obj_rb_root);
|
||||
while (rbnode != NULL) {
|
||||
obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
|
||||
rbnode = rb_next(rbnode);
|
||||
tmem_pampd_destroy_all_in_obj(obj);
|
||||
tmem_obj_free(obj, hb);
|
||||
(*tmem_hostops.obj_free)(obj, pool);
|
||||
}
|
||||
spin_unlock(&hb->lock);
|
||||
}
|
||||
if (destroy)
|
||||
list_del(&pool->pool_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* A tmem_obj contains a radix-tree-like tree in which the intermediate
|
||||
* nodes are called tmem_objnodes. (The kernel lib/radix-tree.c implementation
|
||||
* is very specialized and tuned for specific uses and is not particularly
|
||||
* suited for use from this code, though some code from the core algorithms has
|
||||
* been reused, thus the copyright notices below). Each tmem_objnode contains
|
||||
* a set of pointers which point to either a set of intermediate tmem_objnodes
|
||||
* or a set of of pampds.
|
||||
*
|
||||
* Portions Copyright (C) 2001 Momchil Velikov
|
||||
* Portions Copyright (C) 2001 Christoph Hellwig
|
||||
* Portions Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
|
||||
*/
|
||||
|
||||
struct tmem_objnode_tree_path {
|
||||
struct tmem_objnode *objnode;
|
||||
int offset;
|
||||
};
|
||||
|
||||
/* objnode height_to_maxindex translation */
|
||||
static unsigned long tmem_objnode_tree_h2max[OBJNODE_TREE_MAX_PATH + 1];
|
||||
|
||||
static void tmem_objnode_tree_init(void)
|
||||
{
|
||||
unsigned int ht, tmp;
|
||||
|
||||
for (ht = 0; ht < ARRAY_SIZE(tmem_objnode_tree_h2max); ht++) {
|
||||
tmp = ht * OBJNODE_TREE_MAP_SHIFT;
|
||||
if (tmp >= OBJNODE_TREE_INDEX_BITS)
|
||||
tmem_objnode_tree_h2max[ht] = ~0UL;
|
||||
else
|
||||
tmem_objnode_tree_h2max[ht] =
|
||||
(~0UL >> (OBJNODE_TREE_INDEX_BITS - tmp - 1)) >> 1;
|
||||
}
|
||||
}
|
||||
|
||||
static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
|
||||
{
|
||||
struct tmem_objnode *objnode;
|
||||
|
||||
ASSERT_SENTINEL(obj, OBJ);
|
||||
BUG_ON(obj->pool == NULL);
|
||||
ASSERT_SENTINEL(obj->pool, POOL);
|
||||
objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
|
||||
if (unlikely(objnode == NULL))
|
||||
goto out;
|
||||
objnode->obj = obj;
|
||||
SET_SENTINEL(objnode, OBJNODE);
|
||||
memset(&objnode->slots, 0, sizeof(objnode->slots));
|
||||
objnode->slots_in_use = 0;
|
||||
obj->objnode_count++;
|
||||
out:
|
||||
return objnode;
|
||||
}
|
||||
|
||||
static void tmem_objnode_free(struct tmem_objnode *objnode)
|
||||
{
|
||||
struct tmem_pool *pool;
|
||||
int i;
|
||||
|
||||
BUG_ON(objnode == NULL);
|
||||
for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++)
|
||||
BUG_ON(objnode->slots[i] != NULL);
|
||||
ASSERT_SENTINEL(objnode, OBJNODE);
|
||||
INVERT_SENTINEL(objnode, OBJNODE);
|
||||
BUG_ON(objnode->obj == NULL);
|
||||
ASSERT_SENTINEL(objnode->obj, OBJ);
|
||||
pool = objnode->obj->pool;
|
||||
BUG_ON(pool == NULL);
|
||||
ASSERT_SENTINEL(pool, POOL);
|
||||
objnode->obj->objnode_count--;
|
||||
objnode->obj = NULL;
|
||||
(*tmem_hostops.objnode_free)(objnode, pool);
|
||||
}
|
||||
|
||||
/*
|
||||
* lookup index in object and return associated pampd (or NULL if not found)
|
||||
*/
|
||||
static void **__tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
|
||||
{
|
||||
unsigned int height, shift;
|
||||
struct tmem_objnode **slot = NULL;
|
||||
|
||||
BUG_ON(obj == NULL);
|
||||
ASSERT_SENTINEL(obj, OBJ);
|
||||
BUG_ON(obj->pool == NULL);
|
||||
ASSERT_SENTINEL(obj->pool, POOL);
|
||||
|
||||
height = obj->objnode_tree_height;
|
||||
if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height])
|
||||
goto out;
|
||||
if (height == 0 && obj->objnode_tree_root) {
|
||||
slot = &obj->objnode_tree_root;
|
||||
goto out;
|
||||
}
|
||||
shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
|
||||
slot = &obj->objnode_tree_root;
|
||||
while (height > 0) {
|
||||
if (*slot == NULL)
|
||||
goto out;
|
||||
slot = (struct tmem_objnode **)
|
||||
((*slot)->slots +
|
||||
((index >> shift) & OBJNODE_TREE_MAP_MASK));
|
||||
shift -= OBJNODE_TREE_MAP_SHIFT;
|
||||
height--;
|
||||
}
|
||||
out:
|
||||
return slot != NULL ? (void **)slot : NULL;
|
||||
}
|
||||
|
||||
static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
|
||||
{
|
||||
struct tmem_objnode **slot;
|
||||
|
||||
slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
|
||||
return slot != NULL ? *slot : NULL;
|
||||
}
|
||||
|
||||
static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
|
||||
void *new_pampd, bool no_free)
|
||||
{
|
||||
struct tmem_objnode **slot;
|
||||
void *ret = NULL;
|
||||
|
||||
slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
|
||||
if ((slot != NULL) && (*slot != NULL)) {
|
||||
void *old_pampd = *(void **)slot;
|
||||
*(void **)slot = new_pampd;
|
||||
if (!no_free)
|
||||
(*tmem_pamops.free)(old_pampd, obj->pool,
|
||||
NULL, 0, false);
|
||||
ret = new_pampd;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tmem_pampd_add_to_obj(struct tmem_obj *obj, uint32_t index,
|
||||
void *pampd)
|
||||
{
|
||||
int ret = 0;
|
||||
struct tmem_objnode *objnode = NULL, *newnode, *slot;
|
||||
unsigned int height, shift;
|
||||
int offset = 0;
|
||||
|
||||
/* if necessary, extend the tree to be higher */
|
||||
if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height]) {
|
||||
height = obj->objnode_tree_height + 1;
|
||||
if (index > tmem_objnode_tree_h2max[height])
|
||||
while (index > tmem_objnode_tree_h2max[height])
|
||||
height++;
|
||||
if (obj->objnode_tree_root == NULL) {
|
||||
obj->objnode_tree_height = height;
|
||||
goto insert;
|
||||
}
|
||||
do {
|
||||
newnode = tmem_objnode_alloc(obj);
|
||||
if (!newnode) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
newnode->slots[0] = obj->objnode_tree_root;
|
||||
newnode->slots_in_use = 1;
|
||||
obj->objnode_tree_root = newnode;
|
||||
obj->objnode_tree_height++;
|
||||
} while (height > obj->objnode_tree_height);
|
||||
}
|
||||
insert:
|
||||
slot = obj->objnode_tree_root;
|
||||
height = obj->objnode_tree_height;
|
||||
shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
|
||||
while (height > 0) {
|
||||
if (slot == NULL) {
|
||||
/* add a child objnode. */
|
||||
slot = tmem_objnode_alloc(obj);
|
||||
if (!slot) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
if (objnode) {
|
||||
|
||||
objnode->slots[offset] = slot;
|
||||
objnode->slots_in_use++;
|
||||
} else
|
||||
obj->objnode_tree_root = slot;
|
||||
}
|
||||
/* go down a level */
|
||||
offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
|
||||
objnode = slot;
|
||||
slot = objnode->slots[offset];
|
||||
shift -= OBJNODE_TREE_MAP_SHIFT;
|
||||
height--;
|
||||
}
|
||||
BUG_ON(slot != NULL);
|
||||
if (objnode) {
|
||||
objnode->slots_in_use++;
|
||||
objnode->slots[offset] = pampd;
|
||||
} else
|
||||
obj->objnode_tree_root = pampd;
|
||||
obj->pampd_count++;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *tmem_pampd_delete_from_obj(struct tmem_obj *obj, uint32_t index)
|
||||
{
|
||||
struct tmem_objnode_tree_path path[OBJNODE_TREE_MAX_PATH + 1];
|
||||
struct tmem_objnode_tree_path *pathp = path;
|
||||
struct tmem_objnode *slot = NULL;
|
||||
unsigned int height, shift;
|
||||
int offset;
|
||||
|
||||
BUG_ON(obj == NULL);
|
||||
ASSERT_SENTINEL(obj, OBJ);
|
||||
BUG_ON(obj->pool == NULL);
|
||||
ASSERT_SENTINEL(obj->pool, POOL);
|
||||
height = obj->objnode_tree_height;
|
||||
if (index > tmem_objnode_tree_h2max[height])
|
||||
goto out;
|
||||
slot = obj->objnode_tree_root;
|
||||
if (height == 0 && obj->objnode_tree_root) {
|
||||
obj->objnode_tree_root = NULL;
|
||||
goto out;
|
||||
}
|
||||
shift = (height - 1) * OBJNODE_TREE_MAP_SHIFT;
|
||||
pathp->objnode = NULL;
|
||||
do {
|
||||
if (slot == NULL)
|
||||
goto out;
|
||||
pathp++;
|
||||
offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
|
||||
pathp->offset = offset;
|
||||
pathp->objnode = slot;
|
||||
slot = slot->slots[offset];
|
||||
shift -= OBJNODE_TREE_MAP_SHIFT;
|
||||
height--;
|
||||
} while (height > 0);
|
||||
if (slot == NULL)
|
||||
goto out;
|
||||
while (pathp->objnode) {
|
||||
pathp->objnode->slots[pathp->offset] = NULL;
|
||||
pathp->objnode->slots_in_use--;
|
||||
if (pathp->objnode->slots_in_use) {
|
||||
if (pathp->objnode == obj->objnode_tree_root) {
|
||||
while (obj->objnode_tree_height > 0 &&
|
||||
obj->objnode_tree_root->slots_in_use == 1 &&
|
||||
obj->objnode_tree_root->slots[0]) {
|
||||
struct tmem_objnode *to_free =
|
||||
obj->objnode_tree_root;
|
||||
|
||||
obj->objnode_tree_root =
|
||||
to_free->slots[0];
|
||||
obj->objnode_tree_height--;
|
||||
to_free->slots[0] = NULL;
|
||||
to_free->slots_in_use = 0;
|
||||
tmem_objnode_free(to_free);
|
||||
}
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
tmem_objnode_free(pathp->objnode); /* 0 slots used, free it */
|
||||
pathp--;
|
||||
}
|
||||
obj->objnode_tree_height = 0;
|
||||
obj->objnode_tree_root = NULL;
|
||||
|
||||
out:
|
||||
if (slot != NULL)
|
||||
obj->pampd_count--;
|
||||
BUG_ON(obj->pampd_count < 0);
|
||||
return slot;
|
||||
}
|
||||
|
||||
/* recursively walk the objnode_tree destroying pampds and objnodes */
|
||||
static void tmem_objnode_node_destroy(struct tmem_obj *obj,
|
||||
struct tmem_objnode *objnode,
|
||||
unsigned int ht)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (ht == 0)
|
||||
return;
|
||||
for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++) {
|
||||
if (objnode->slots[i]) {
|
||||
if (ht == 1) {
|
||||
obj->pampd_count--;
|
||||
(*tmem_pamops.free)(objnode->slots[i],
|
||||
obj->pool, NULL, 0, true);
|
||||
objnode->slots[i] = NULL;
|
||||
continue;
|
||||
}
|
||||
tmem_objnode_node_destroy(obj, objnode->slots[i], ht-1);
|
||||
tmem_objnode_free(objnode->slots[i]);
|
||||
objnode->slots[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
|
||||
{
|
||||
if (obj->objnode_tree_root == NULL)
|
||||
return;
|
||||
if (obj->objnode_tree_height == 0) {
|
||||
obj->pampd_count--;
|
||||
(*tmem_pamops.free)(obj->objnode_tree_root,
|
||||
obj->pool, NULL, 0, true);
|
||||
} else {
|
||||
tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
|
||||
obj->objnode_tree_height);
|
||||
tmem_objnode_free(obj->objnode_tree_root);
|
||||
obj->objnode_tree_height = 0;
|
||||
}
|
||||
obj->objnode_tree_root = NULL;
|
||||
(*tmem_pamops.free_obj)(obj->pool, obj);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tmem is operated on by a set of well-defined actions:
|
||||
* "put", "get", "flush", "flush_object", "new pool" and "destroy pool".
|
||||
* (The tmem ABI allows for subpages and exchanges but these operations
|
||||
* are not included in this implementation.)
|
||||
*
|
||||
* These "tmem core" operations are implemented in the following functions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* "Put" a page, e.g. copy a page from the kernel into newly allocated
|
||||
* PAM space (if such space is available). Tmem_put is complicated by
|
||||
* a corner case: What if a page with matching handle already exists in
|
||||
* tmem? To guarantee coherency, one of two actions is necessary: Either
|
||||
* the data for the page must be overwritten, or the page must be
|
||||
* "flushed" so that the data is not accessible to a subsequent "get".
|
||||
* Since these "duplicate puts" are relatively rare, this implementation
|
||||
* always flushes for simplicity.
|
||||
*/
|
||||
int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
|
||||
char *data, size_t size, bool raw, int ephemeral)
|
||||
{
|
||||
struct tmem_obj *obj = NULL, *objfound = NULL, *objnew = NULL;
|
||||
void *pampd = NULL, *pampd_del = NULL;
|
||||
int ret = -ENOMEM;
|
||||
struct tmem_hashbucket *hb;
|
||||
|
||||
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
|
||||
spin_lock(&hb->lock);
|
||||
obj = objfound = tmem_obj_find(hb, oidp);
|
||||
if (obj != NULL) {
|
||||
pampd = tmem_pampd_lookup_in_obj(objfound, index);
|
||||
if (pampd != NULL) {
|
||||
/* if found, is a dup put, flush the old one */
|
||||
pampd_del = tmem_pampd_delete_from_obj(obj, index);
|
||||
BUG_ON(pampd_del != pampd);
|
||||
(*tmem_pamops.free)(pampd, pool, oidp, index, true);
|
||||
if (obj->pampd_count == 0) {
|
||||
objnew = obj;
|
||||
objfound = NULL;
|
||||
}
|
||||
pampd = NULL;
|
||||
}
|
||||
} else {
|
||||
obj = objnew = (*tmem_hostops.obj_alloc)(pool);
|
||||
if (unlikely(obj == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
tmem_obj_init(obj, hb, pool, oidp);
|
||||
}
|
||||
BUG_ON(obj == NULL);
|
||||
BUG_ON(((objnew != obj) && (objfound != obj)) || (objnew == objfound));
|
||||
pampd = (*tmem_pamops.create)(data, size, raw, ephemeral,
|
||||
obj->pool, &obj->oid, index);
|
||||
if (unlikely(pampd == NULL))
|
||||
goto free;
|
||||
ret = tmem_pampd_add_to_obj(obj, index, pampd);
|
||||
if (unlikely(ret == -ENOMEM))
|
||||
/* may have partially built objnode tree ("stump") */
|
||||
goto delete_and_free;
|
||||
goto out;
|
||||
|
||||
delete_and_free:
|
||||
(void)tmem_pampd_delete_from_obj(obj, index);
|
||||
free:
|
||||
if (pampd)
|
||||
(*tmem_pamops.free)(pampd, pool, NULL, 0, true);
|
||||
if (objnew) {
|
||||
tmem_obj_free(objnew, hb);
|
||||
(*tmem_hostops.obj_free)(objnew, pool);
|
||||
}
|
||||
out:
|
||||
spin_unlock(&hb->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *tmem_localify_get_pampd(struct tmem_pool *pool, struct tmem_oid *oidp,
|
||||
uint32_t index, struct tmem_obj **ret_obj,
|
||||
void **saved_hb)
|
||||
{
|
||||
struct tmem_hashbucket *hb;
|
||||
struct tmem_obj *obj = NULL;
|
||||
void *pampd = NULL;
|
||||
|
||||
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
|
||||
spin_lock(&hb->lock);
|
||||
obj = tmem_obj_find(hb, oidp);
|
||||
if (likely(obj != NULL))
|
||||
pampd = tmem_pampd_lookup_in_obj(obj, index);
|
||||
*ret_obj = obj;
|
||||
*saved_hb = (void *)hb;
|
||||
/* note, hashbucket remains locked */
|
||||
return pampd;
|
||||
}
|
||||
|
||||
void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
|
||||
void *pampd, void *saved_hb, bool delete)
|
||||
{
|
||||
struct tmem_hashbucket *hb = (struct tmem_hashbucket *)saved_hb;
|
||||
|
||||
BUG_ON(!spin_is_locked(&hb->lock));
|
||||
if (pampd != NULL) {
|
||||
BUG_ON(obj == NULL);
|
||||
(void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
|
||||
} else if (delete) {
|
||||
BUG_ON(obj == NULL);
|
||||
(void)tmem_pampd_delete_from_obj(obj, index);
|
||||
}
|
||||
spin_unlock(&hb->lock);
|
||||
}
|
||||
|
||||
static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
|
||||
struct tmem_pool *pool, struct tmem_oid *oidp,
|
||||
uint32_t index, bool free, char *data)
|
||||
{
|
||||
void *old_pampd = *ppampd, *new_pampd = NULL;
|
||||
bool intransit = false;
|
||||
int ret = 0;
|
||||
|
||||
|
||||
if (!is_ephemeral(pool))
|
||||
new_pampd = (*tmem_pamops.repatriate_preload)(
|
||||
old_pampd, pool, oidp, index, &intransit);
|
||||
if (intransit)
|
||||
ret = -EAGAIN;
|
||||
else if (new_pampd != NULL)
|
||||
*ppampd = new_pampd;
|
||||
/* must release the hb->lock else repatriate can't sleep */
|
||||
spin_unlock(&hb->lock);
|
||||
if (!intransit)
|
||||
ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
|
||||
oidp, index, free, data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* "Get" a page, e.g. if one can be found, copy the tmem page with the
|
||||
* matching handle from PAM space to the kernel. By tmem definition,
|
||||
* when a "get" is successful on an ephemeral page, the page is "flushed",
|
||||
* and when a "get" is successful on a persistent page, the page is retained
|
||||
* in tmem. Note that to preserve
|
||||
* coherency, "get" can never be skipped if tmem contains the data.
|
||||
* That is, if a get is done with a certain handle and fails, any
|
||||
* subsequent "get" must also fail (unless of course there is a
|
||||
* "put" done with the same handle).
|
||||
|
||||
*/
|
||||
int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
|
||||
char *data, size_t *size, bool raw, int get_and_free)
|
||||
{
|
||||
struct tmem_obj *obj;
|
||||
void *pampd;
|
||||
bool ephemeral = is_ephemeral(pool);
|
||||
int ret = -1;
|
||||
struct tmem_hashbucket *hb;
|
||||
bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
|
||||
bool lock_held = 0;
|
||||
void **ppampd;
|
||||
|
||||
again:
|
||||
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
|
||||
spin_lock(&hb->lock);
|
||||
lock_held = 1;
|
||||
obj = tmem_obj_find(hb, oidp);
|
||||
if (obj == NULL)
|
||||
goto out;
|
||||
ppampd = __tmem_pampd_lookup_in_obj(obj, index);
|
||||
if (ppampd == NULL)
|
||||
goto out;
|
||||
if (tmem_pamops.is_remote(*ppampd)) {
|
||||
ret = tmem_repatriate(ppampd, hb, pool, oidp,
|
||||
index, free, data);
|
||||
lock_held = 0; /* note hb->lock has been unlocked */
|
||||
if (ret == -EAGAIN) {
|
||||
/* rare I think, but should cond_resched()??? */
|
||||
usleep_range(10, 1000);
|
||||
goto again;
|
||||
} else if (ret != 0) {
|
||||
if (ret != -ENOENT)
|
||||
pr_err("UNTESTED case in tmem_get, ret=%d\n",
|
||||
ret);
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
if (free)
|
||||
pampd = tmem_pampd_delete_from_obj(obj, index);
|
||||
else
|
||||
pampd = tmem_pampd_lookup_in_obj(obj, index);
|
||||
if (pampd == NULL)
|
||||
goto out;
|
||||
if (free) {
|
||||
if (obj->pampd_count == 0) {
|
||||
tmem_obj_free(obj, hb);
|
||||
(*tmem_hostops.obj_free)(obj, pool);
|
||||
obj = NULL;
|
||||
}
|
||||
}
|
||||
if (free)
|
||||
ret = (*tmem_pamops.get_data_and_free)(
|
||||
data, size, raw, pampd, pool, oidp, index);
|
||||
else
|
||||
ret = (*tmem_pamops.get_data)(
|
||||
data, size, raw, pampd, pool, oidp, index);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ret = 0;
|
||||
out:
|
||||
if (lock_held)
|
||||
spin_unlock(&hb->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a page in tmem matches the handle, "flush" this page from tmem such
|
||||
* that any subsequent "get" does not succeed (unless, of course, there
|
||||
* was another "put" with the same handle).
|
||||
*/
|
||||
int tmem_flush_page(struct tmem_pool *pool,
|
||||
struct tmem_oid *oidp, uint32_t index)
|
||||
{
|
||||
struct tmem_obj *obj;
|
||||
void *pampd;
|
||||
int ret = -1;
|
||||
struct tmem_hashbucket *hb;
|
||||
|
||||
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
|
||||
spin_lock(&hb->lock);
|
||||
obj = tmem_obj_find(hb, oidp);
|
||||
if (obj == NULL)
|
||||
goto out;
|
||||
pampd = tmem_pampd_delete_from_obj(obj, index);
|
||||
if (pampd == NULL)
|
||||
goto out;
|
||||
(*tmem_pamops.free)(pampd, pool, oidp, index, true);
|
||||
if (obj->pampd_count == 0) {
|
||||
tmem_obj_free(obj, hb);
|
||||
(*tmem_hostops.obj_free)(obj, pool);
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
spin_unlock(&hb->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a page in tmem matches the handle, replace the page so that any
|
||||
* subsequent "get" gets the new page. Returns the new page if
|
||||
* there was a page to replace, else returns NULL.
|
||||
*/
|
||||
int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
|
||||
uint32_t index, void *new_pampd)
|
||||
{
|
||||
struct tmem_obj *obj;
|
||||
int ret = -1;
|
||||
struct tmem_hashbucket *hb;
|
||||
|
||||
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
|
||||
spin_lock(&hb->lock);
|
||||
obj = tmem_obj_find(hb, oidp);
|
||||
if (obj == NULL)
|
||||
goto out;
|
||||
new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
|
||||
ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
|
||||
out:
|
||||
spin_unlock(&hb->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* "Flush" all pages in tmem matching this oid.
|
||||
*/
|
||||
int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
|
||||
{
|
||||
struct tmem_obj *obj;
|
||||
struct tmem_hashbucket *hb;
|
||||
int ret = -1;
|
||||
|
||||
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
|
||||
spin_lock(&hb->lock);
|
||||
obj = tmem_obj_find(hb, oidp);
|
||||
if (obj == NULL)
|
||||
goto out;
|
||||
tmem_pampd_destroy_all_in_obj(obj);
|
||||
tmem_obj_free(obj, hb);
|
||||
(*tmem_hostops.obj_free)(obj, pool);
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
spin_unlock(&hb->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* "Flush" all pages (and tmem_objs) from this tmem_pool and disable
|
||||
* all subsequent access to this tmem_pool.
|
||||
*/
|
||||
int tmem_destroy_pool(struct tmem_pool *pool)
|
||||
{
|
||||
int ret = -1;
|
||||
|
||||
if (pool == NULL)
|
||||
goto out;
|
||||
tmem_pool_flush(pool, 1);
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static LIST_HEAD(tmem_global_pool_list);
|
||||
|
||||
/*
|
||||
* Create a new tmem_pool with the provided flag and return
|
||||
* a pool id provided by the tmem host implementation.
|
||||
*/
|
||||
void tmem_new_pool(struct tmem_pool *pool, uint32_t flags)
|
||||
{
|
||||
int persistent = flags & TMEM_POOL_PERSIST;
|
||||
int shared = flags & TMEM_POOL_SHARED;
|
||||
struct tmem_hashbucket *hb = &pool->hashbucket[0];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
|
||||
hb->obj_rb_root = RB_ROOT;
|
||||
spin_lock_init(&hb->lock);
|
||||
}
|
||||
INIT_LIST_HEAD(&pool->pool_list);
|
||||
atomic_set(&pool->obj_count, 0);
|
||||
SET_SENTINEL(pool, POOL);
|
||||
list_add_tail(&pool->pool_list, &tmem_global_pool_list);
|
||||
pool->persistent = persistent;
|
||||
pool->shared = shared;
|
||||
}
|
|
@ -1,244 +0,0 @@
|
|||
/*
|
||||
* tmem.h
|
||||
*
|
||||
* Transcendent memory
|
||||
*
|
||||
* Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
|
||||
*/
|
||||
|
||||
#ifndef _TMEM_H_
|
||||
#define _TMEM_H_
|
||||
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
/*
|
||||
* These are pre-defined by the Xen<->Linux ABI
|
||||
*/
|
||||
#define TMEM_PUT_PAGE 4
|
||||
#define TMEM_GET_PAGE 5
|
||||
#define TMEM_FLUSH_PAGE 6
|
||||
#define TMEM_FLUSH_OBJECT 7
|
||||
#define TMEM_POOL_PERSIST 1
|
||||
#define TMEM_POOL_SHARED 2
|
||||
#define TMEM_POOL_PRECOMPRESSED 4
|
||||
#define TMEM_POOL_PAGESIZE_SHIFT 4
|
||||
#define TMEM_POOL_PAGESIZE_MASK 0xf
|
||||
#define TMEM_POOL_RESERVED_BITS 0x00ffff00
|
||||
|
||||
/*
|
||||
* sentinels have proven very useful for debugging but can be removed
|
||||
* or disabled before final merge.
|
||||
*/
|
||||
#define SENTINELS
|
||||
#ifdef SENTINELS
|
||||
#define DECL_SENTINEL uint32_t sentinel;
|
||||
#define SET_SENTINEL(_x, _y) (_x->sentinel = _y##_SENTINEL)
|
||||
#define INVERT_SENTINEL(_x, _y) (_x->sentinel = ~_y##_SENTINEL)
|
||||
#define ASSERT_SENTINEL(_x, _y) WARN_ON(_x->sentinel != _y##_SENTINEL)
|
||||
#define ASSERT_INVERTED_SENTINEL(_x, _y) WARN_ON(_x->sentinel != ~_y##_SENTINEL)
|
||||
#else
|
||||
#define DECL_SENTINEL
|
||||
#define SET_SENTINEL(_x, _y) do { } while (0)
|
||||
#define INVERT_SENTINEL(_x, _y) do { } while (0)
|
||||
#define ASSERT_SENTINEL(_x, _y) do { } while (0)
|
||||
#define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0)
|
||||
#endif
|
||||
|
||||
#define ASSERT_SPINLOCK(_l) WARN_ON(!spin_is_locked(_l))
|
||||
|
||||
/*
|
||||
* A pool is the highest-level data structure managed by tmem and
|
||||
* usually corresponds to a large independent set of pages such as
|
||||
* a filesystem. Each pool has an id, and certain attributes and counters.
|
||||
* It also contains a set of hash buckets, each of which contains an rbtree
|
||||
* of objects and a lock to manage concurrency within the pool.
|
||||
*/
|
||||
|
||||
#define TMEM_HASH_BUCKET_BITS 8
|
||||
#define TMEM_HASH_BUCKETS (1<<TMEM_HASH_BUCKET_BITS)
|
||||
|
||||
struct tmem_hashbucket {
|
||||
struct rb_root obj_rb_root;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct tmem_pool {
|
||||
void *client; /* "up" for some clients, avoids table lookup */
|
||||
struct list_head pool_list;
|
||||
uint32_t pool_id;
|
||||
bool persistent;
|
||||
bool shared;
|
||||
atomic_t obj_count;
|
||||
atomic_t refcount;
|
||||
struct tmem_hashbucket hashbucket[TMEM_HASH_BUCKETS];
|
||||
DECL_SENTINEL
|
||||
};
|
||||
|
||||
#define is_persistent(_p) (_p->persistent)
|
||||
#define is_ephemeral(_p) (!(_p->persistent))
|
||||
|
||||
/*
|
||||
* An object id ("oid") is large: 192-bits (to ensure, for example, files
|
||||
* in a modern filesystem can be uniquely identified).
|
||||
*/
|
||||
|
||||
struct tmem_oid {
|
||||
uint64_t oid[3];
|
||||
};
|
||||
|
||||
struct tmem_xhandle {
|
||||
uint8_t client_id;
|
||||
uint8_t xh_data_cksum;
|
||||
uint16_t xh_data_size;
|
||||
uint16_t pool_id;
|
||||
struct tmem_oid oid;
|
||||
uint32_t index;
|
||||
void *extra;
|
||||
};
|
||||
|
||||
static inline struct tmem_xhandle tmem_xhandle_fill(uint16_t client_id,
|
||||
struct tmem_pool *pool,
|
||||
struct tmem_oid *oidp,
|
||||
uint32_t index)
|
||||
{
|
||||
struct tmem_xhandle xh;
|
||||
xh.client_id = client_id;
|
||||
xh.xh_data_cksum = (uint8_t)-1;
|
||||
xh.xh_data_size = (uint16_t)-1;
|
||||
xh.pool_id = pool->pool_id;
|
||||
xh.oid = *oidp;
|
||||
xh.index = index;
|
||||
return xh;
|
||||
}
|
||||
|
||||
static inline void tmem_oid_set_invalid(struct tmem_oid *oidp)
|
||||
{
|
||||
oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
|
||||
}
|
||||
|
||||
static inline bool tmem_oid_valid(struct tmem_oid *oidp)
|
||||
{
|
||||
return oidp->oid[0] != -1UL || oidp->oid[1] != -1UL ||
|
||||
oidp->oid[2] != -1UL;
|
||||
}
|
||||
|
||||
static inline int tmem_oid_compare(struct tmem_oid *left,
|
||||
struct tmem_oid *right)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (left->oid[2] == right->oid[2]) {
|
||||
if (left->oid[1] == right->oid[1]) {
|
||||
if (left->oid[0] == right->oid[0])
|
||||
ret = 0;
|
||||
else if (left->oid[0] < right->oid[0])
|
||||
ret = -1;
|
||||
else
|
||||
return 1;
|
||||
} else if (left->oid[1] < right->oid[1])
|
||||
ret = -1;
|
||||
else
|
||||
ret = 1;
|
||||
} else if (left->oid[2] < right->oid[2])
|
||||
ret = -1;
|
||||
else
|
||||
ret = 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned tmem_oid_hash(struct tmem_oid *oidp)
|
||||
{
|
||||
return hash_long(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
|
||||
TMEM_HASH_BUCKET_BITS);
|
||||
}
|
||||
|
||||
/*
|
||||
* A tmem_obj contains an identifier (oid), pointers to the parent
|
||||
* pool and the rb_tree to which it belongs, counters, and an ordered
|
||||
* set of pampds, structured in a radix-tree-like tree. The intermediate
|
||||
* nodes of the tree are called tmem_objnodes.
|
||||
*/
|
||||
|
||||
struct tmem_objnode;
|
||||
|
||||
struct tmem_obj {
|
||||
struct tmem_oid oid;
|
||||
struct tmem_pool *pool;
|
||||
struct rb_node rb_tree_node;
|
||||
struct tmem_objnode *objnode_tree_root;
|
||||
unsigned int objnode_tree_height;
|
||||
unsigned long objnode_count;
|
||||
long pampd_count;
|
||||
/* for current design of ramster, all pages belonging to
|
||||
* an object reside on the same remotenode and extra is
|
||||
* used to record the number of the remotenode so a
|
||||
* flush-object operation can specify it */
|
||||
void *extra; /* for use by pampd implementation */
|
||||
DECL_SENTINEL
|
||||
};
|
||||
|
||||
#define OBJNODE_TREE_MAP_SHIFT 6
|
||||
#define OBJNODE_TREE_MAP_SIZE (1UL << OBJNODE_TREE_MAP_SHIFT)
|
||||
#define OBJNODE_TREE_MAP_MASK (OBJNODE_TREE_MAP_SIZE-1)
|
||||
#define OBJNODE_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
|
||||
#define OBJNODE_TREE_MAX_PATH \
|
||||
(OBJNODE_TREE_INDEX_BITS/OBJNODE_TREE_MAP_SHIFT + 2)
|
||||
|
||||
struct tmem_objnode {
|
||||
struct tmem_obj *obj;
|
||||
DECL_SENTINEL
|
||||
void *slots[OBJNODE_TREE_MAP_SIZE];
|
||||
unsigned int slots_in_use;
|
||||
};
|
||||
|
||||
/* pampd abstract datatype methods provided by the PAM implementation */
|
||||
struct tmem_pamops {
|
||||
void *(*create)(char *, size_t, bool, int,
|
||||
struct tmem_pool *, struct tmem_oid *, uint32_t);
|
||||
int (*get_data)(char *, size_t *, bool, void *, struct tmem_pool *,
|
||||
struct tmem_oid *, uint32_t);
|
||||
int (*get_data_and_free)(char *, size_t *, bool, void *,
|
||||
struct tmem_pool *, struct tmem_oid *,
|
||||
uint32_t);
|
||||
void (*free)(void *, struct tmem_pool *,
|
||||
struct tmem_oid *, uint32_t, bool);
|
||||
void (*free_obj)(struct tmem_pool *, struct tmem_obj *);
|
||||
bool (*is_remote)(void *);
|
||||
void *(*repatriate_preload)(void *, struct tmem_pool *,
|
||||
struct tmem_oid *, uint32_t, bool *);
|
||||
int (*repatriate)(void *, void *, struct tmem_pool *,
|
||||
struct tmem_oid *, uint32_t, bool, void *);
|
||||
void (*new_obj)(struct tmem_obj *);
|
||||
int (*replace_in_obj)(void *, struct tmem_obj *);
|
||||
};
|
||||
extern void tmem_register_pamops(struct tmem_pamops *m);
|
||||
|
||||
/* memory allocation methods provided by the host implementation */
|
||||
struct tmem_hostops {
|
||||
struct tmem_obj *(*obj_alloc)(struct tmem_pool *);
|
||||
void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
|
||||
struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
|
||||
void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
|
||||
};
|
||||
extern void tmem_register_hostops(struct tmem_hostops *m);
|
||||
|
||||
/* core tmem accessor functions */
|
||||
extern int tmem_put(struct tmem_pool *, struct tmem_oid *, uint32_t index,
|
||||
char *, size_t, bool, int);
|
||||
extern int tmem_get(struct tmem_pool *, struct tmem_oid *, uint32_t index,
|
||||
char *, size_t *, bool, int);
|
||||
extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index,
|
||||
void *);
|
||||
extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *,
|
||||
uint32_t index, struct tmem_obj **,
|
||||
void **);
|
||||
extern void tmem_localify_finish(struct tmem_obj *, uint32_t index,
|
||||
void *, void *, bool);
|
||||
extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
|
||||
uint32_t index);
|
||||
extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
|
||||
extern int tmem_destroy_pool(struct tmem_pool *);
|
||||
extern void tmem_new_pool(struct tmem_pool *, uint32_t);
|
||||
#endif /* _TMEM_H */
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,22 +0,0 @@
|
|||
/*
|
||||
* zcache.h
|
||||
*
|
||||
* External zcache functions
|
||||
*
|
||||
* Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
|
||||
*/
|
||||
|
||||
#ifndef _ZCACHE_H_
|
||||
#define _ZCACHE_H_
|
||||
|
||||
extern int zcache_put(int, int, struct tmem_oid *, uint32_t,
|
||||
char *, size_t, bool, int);
|
||||
extern int zcache_autocreate_pool(int, int, bool);
|
||||
extern int zcache_get(int, int, struct tmem_oid *, uint32_t,
|
||||
char *, size_t *, bool, int);
|
||||
extern int zcache_flush(int, int, struct tmem_oid *, uint32_t);
|
||||
extern int zcache_flush_object(int, int, struct tmem_oid *);
|
||||
extern int zcache_localify(int, struct tmem_oid *, uint32_t,
|
||||
char *, size_t, void *);
|
||||
|
||||
#endif /* _ZCACHE_H */
|
Загрузка…
Ссылка в новой задаче