This commit is contained in:
Nofel Yaseen 2020-10-04 14:24:53 -04:00
Родитель ce12eff6e4
Коммит 4151de2f7e
22 изменённых файлов: 555 добавлений и 263 удалений

52
C++Verifier/README.md Normal file
Просмотреть файл

@ -0,0 +1,52 @@
# Local verifier
## main.cpp
Arguments in running the local verifer
| Argument | options | |
|-------------------------|:---------------------:|:------------------------------------------------------------------------|
| -h, --help | | show this help message and exit |
| --configDir | CONFIG_DIR | Set configDir |
| --configFile | CONFIG_FILE | config file (default: ../out/packetformat.json) |
| --inputPath | INPUT_FILE | Input file needed for LOCAL (default: ../datafiles/38-38.0.001000.csv) |
| --filesOrKafka | OUTPUT_TYPE | Write to file or kafka (default: file) |
| --KafkaAddress | BROKER_ADDRESS | IP:Port,... for kafka broker (default: localhost:9092) |
| --channel_per_invariant | CHANNEL_PER_INVARIANT | Channel Per Invariant (default: 10) |
| --prefix | OUTPUT_FOLDER_PATH | Set Path if the output is folder (default: ../out/globalInput/) |
| --inputType | INPUT_TYPE | Input is file or socket |
| --local_socket_ip | LOCAL_SOCKET_IP | IP for reading from a local socket (default: localhost) |
| --local_socket_port | LOCAL_SOCKET_PORT | Port for reading from a local socket (default: 10001) |
The program creates state machines according the invariants provided and processes them.
The input packets can come either from file or socket. If packet coming from file, the assumption is to they are delimited by semi-colon and use a specific parser. Ths needs to be changed in the future. Ideally, the delimiter should be an input argument and we are able to use the generic parser.
After receiving the packet, the packet is parsed using the `utils/PacketParser.cpp` class. The packet is passed to `main_process` function. It is responsible to call (or create) the appropriate SFA according the invariant violation specification, and then output the packet (if not suppressed) to file or kafka.
## SFAProcessor.cpp
Takes the invariant (SFA + transformation of the packets) and key for constructor. The function `processPacket` makes the transitions in SFA and returns whether to suppress packet or not. The packet is the function argument.
## utils/PacketParser.cpp
Takes the `packetformat.json` for the constructor. It uses `rapidjson` library to pase the json file. Creates the parse tree with the help of `utils/ParserHelper.cpp`. Parse tree consists of fields and conditional nodes. Field are simple parsing nodes and conditional nodes decides which branch of the tree to take.
The `parsePacket` function takes the raw data as input and return the Packet.
## utils/Packet.h
Class to store packet. It contain function to set and get fields in packet too. Also, contains function to change it into string format.
## DSFA/Invariant.cpp
Take the invariant name as input to create the invariant. A single invariant can contain multiple files. To parse each file, it sses `DSFA/SFAParser.cpp` to create the appropriate SFA.
## DSFA/SFAParser.cpp
Contains two functions `parseGlobalStateMachine` and `parseLocalStateMachine` to parse state machines. It uses `antlr` to build parsing tree for SFA and pass it to `expressions/ExprBuilder.h`.
## expressions/ExprBuilder.h
Contain the functions to create the SFA from the parsed tree. The parsed tree contain many kinds of nodes (binary/boolean operators). This file goes through them to create the SFA.

Просмотреть файл

@ -13,20 +13,16 @@ class SFAProcessor {
vector<string> constraintBuffer;
bool processElementSingle(shared_ptr<Packet> & p, int dsfaNum) {
// cout << "In single" << endl;
// Using raw pointer because don't want to remove the dsfa.
DSFA* dsfa = (&(inv -> dsfas[dsfaNum]));
int currentState;
try {
// cout << "Getting dsfaNum" << dsfaNum << endl;
currentState = currentStateMap.at(dsfaNum);
}
catch(const std::out_of_range& oor) {
// cout << "Excpetion caught" << endl;
currentState = dsfa -> startState;
}
// cout << "going in advanceAndCheckSuppress" << endl;
pair<int, bool> res = dsfa -> advanceAndCheckSuppress(p, currentState);
currentStateMap[dsfaNum] = res.first;
@ -34,7 +30,6 @@ class SFAProcessor {
}
bool processElementTree(shared_ptr<Packet> & p, int dsfaNum) {
// cout << "In tree" << endl;
// Using raw pointer because don't want to remove the dsfa.
DSFA* dsfa = (&(inv -> dsfas[dsfaNum]));
// If this is a new run, make a fresh tree
@ -56,7 +51,6 @@ class SFAProcessor {
}
}
// cout << "going in advanceConstraintTreeAndCheckSuppress" << endl;
bool suppressible = dsfa -> advanceConstraintTreeAndCheckSuppress(p, constraintBuffer, constraintTreeList);
constraintTreeListMap[dsfaNum] = constraintTreeList;
@ -70,30 +64,23 @@ class SFAProcessor {
};
bool processPacket(shared_ptr<Packet> & p) {
// cout << "in processPacket" << endl;
if (inv -> filter != nullptr && (!simplifyEvaluate(inv -> filter,p))) {
// cout << "Packet Filtered" << endl;
return false;
}
// cout << "Packet allowed to process" << endl;
bool suppressible = true;
if (inv -> dsfas.size() == 0) {
// cout << "No dsfa so cannot suppress" << endl;
suppressible = false;
}
for (int dsfaNum = 0; dsfaNum < inv -> dsfas.size(); dsfaNum++) {
// Using raw pointer because don't want to remove the dsfa.
DSFA* dsfa = (&(inv -> dsfas[dsfaNum]));
// cout << "Next step" << endl;
if (dsfa -> hasVariables()) {
suppressible &= processElementTree(p, dsfaNum);
} else {
suppressible &= processElementSingle(p, dsfaNum);
}
}
// cout << "After processing. Should I suppress?: " << suppressible << endl;
suppressible &= simplifyEvaluate(inv -> negatedLocExpr,p);
// cout << "After Negative filter. Should I suppress?: " << suppressible << endl;
if (!suppressible) {
return true;
} else {

Просмотреть файл

@ -44,81 +44,35 @@ namespace ExprBuilder {
shared_ptr<BoolExpr> ret = nullptr;
if (isValid(ctx -> boolOp())) {
// cout << "in boolop" << endl;
// ParOpen boolOp boolExpression+ ParClose
// cout << "Number of boolExpression: " << ctx ->boolExpression().size() << endl;
// shared_ptr<SimpleSMTLIBParser::BoolExpressionContext> firstBool();
ret = buildBoolExpr(ctx -> boolExpression(0), bindings, locationReferences,
variableComparisons);
// cout << "boolop created" << endl;
// cout << "checking text for: " << exprRaw << endl;
if (ctx -> boolOp() -> getText() == "and") {
// cout << "creating and of size: " << endl;
// if (ctx == nullptr) {
// cout << "ctx is nullptr" << endl;
// }
// else if ((ctx -> boolExpression()).empty()) {
// cout << "boolExpression is empty" << endl;
// }
// else if ((ctx -> boolExpression(0)) == nullptr) {
// cout << "0 is nullptr" << endl;
// } else if ((ctx -> boolExpression(1)) == nullptr) {
// cout << "1 is nullptr" << endl;
// }
vector<SimpleSMTLIBParser::BoolExpressionContext *> bringingOutVector = ctx -> boolExpression();
// cout << "vector is out" << endl;
// cout << bringingOutVector.size() << endl;
for (int i = 1; i < bringingOutVector.size(); ++i) {
// cout << "Creating ith object pointer:" << i << endl;
// shared_ptr<SimpleSMTLIBParser::BoolExpressionContext> ithObj();
// cout << "going to buildBoolExpr for ith object" << endl;
// cout << ithObj -> getText() << endl;
std::shared_ptr<BoolExpr> right = buildBoolExpr(ctx -> boolExpression(i), bindings,
locationReferences, variableComparisons);
// cout << "going in and constructor" << endl;
// cout << "creating AndExpr: " << endl;
// cout << "with left: " << ret -> toString() << endl;
// cout << "and right: " << right -> toString() << endl;
ret = shared_ptr<AndExpr>(new AndExpr(ret, right));
// cout << "and created" << endl;
}
// cout << "out of and loop" << endl;
} else if (ctx -> boolOp() -> getText() == "or") {
// cout << "creating or" << endl;
for (int i = 1; i < (ctx -> boolExpression()).size(); ++i) {
std::shared_ptr<BoolExpr> right = buildBoolExpr(ctx -> boolExpression(i), bindings,
locationReferences, variableComparisons);
// cout << "creating OrExpr: " << endl;
// cout << "with left: " << ret -> toString() << endl;
// cout << "and right: " << right -> toString() << endl;
ret = shared_ptr<OrExpr>(new OrExpr(ret, right));
}
// cout << "or created" << endl;
} else if (ctx -> boolOp() -> getText() == "not") {
// cout << "creating not" << endl;
assert (ctx -> boolExpression().size() == 1);
ret -> negated = true;
}
} else if (isValid(ctx -> compOp())) {
// cout << "in compop with size: " << ctx->arithExpression().size() << endl;
// shared_ptr<SimpleSMTLIBParser::ArithExpressionContext> arithPtr(());
shared_ptr<Expr> left = buildArithExpr(ctx -> arithExpression(0));
// cout << "left created" << endl;
// arithPtr = shared_ptr<SimpleSMTLIBParser::ArithExpressionContext>(ctx -> arithExpression(1));
// cout << "going in right now" << endl;
shared_ptr<Expr> right = buildArithExpr(ctx -> arithExpression(1));
// cout << "left and right created" << endl;
// cout << (ctx -> compOp() -> getText()) << endl;
if (ctx -> compOp() -> getText() == "=") {
// cout << "creating EqExpr: " << endl;
// cout << "with left: " << left -> toString() << endl;
// cout << "and right: " << right -> toString() << endl;
ret = shared_ptr<EqExpr>(new EqExpr(left, right, locationReferences, variableComparisons));
// cout << "EqExpr created" << endl;
} else if (ctx -> compOp() -> getText() == "<") {
ret = shared_ptr<LtExpr>(new LtExpr(left, right));
} else if (ctx -> compOp() -> getText() == "<=") {
@ -128,9 +82,7 @@ namespace ExprBuilder {
} else if (ctx -> compOp() -> getText() == ">=") {
ret = shared_ptr<GeExpr>(new GeExpr(left, right));
}
// cout << "compop created " << endl;
} else if (isValid(ctx -> GRW_Let())) {
// cout << "in let" << endl;
shared_ptr<unordered_map<string, shared_ptr<BoolExpr>>> newBindings(new unordered_map<string, shared_ptr<BoolExpr>>(*bindings));
for (SimpleSMTLIBParser::VarBindingContext* vb: ctx -> varBinding()) {
buildVarBinding(vb, bindings, newBindings, locationReferences, variableComparisons);
@ -139,22 +91,14 @@ namespace ExprBuilder {
ret = buildBoolExpr(ctx -> boolExpression(0), newBindings, locationReferences, variableComparisons);
} else if (isValid(ctx -> boundVar())) {
// cout << "in boundVar" << endl;
ret = bindings -> at(ctx -> boundVar() -> getText());
// ret = move(test);
// ret = bindings -> at(test);
} else if (isValid(ctx -> PS_True())) {
// // PS_True
// cout << "true" << endl;
ret = shared_ptr<TrueExpr>(new TrueExpr());
} else if (isValid(ctx -> PS_False())) {
// // PS_False
// cout << "false" << endl;
// PS_False
ret = shared_ptr<TrueExpr>(new TrueExpr());
ret -> toggleNegated();
}
// cout << "buildBoolExpr complete: " << exprRaw << endl;
// cout << "returning" << endl;
return ret;
}
@ -180,13 +124,11 @@ namespace ExprBuilder {
ret = std::shared_ptr<IteExpr>( new IteExpr(condition, left, right) );
} else if (isValid(ctx -> arithOp())) {
// cout << "in arithop " << endl;
if (ctx -> arithExpression().size() == 1) {
assert(ctx -> arithOp() -> getText() == "-");
shared_ptr<Expr> only = buildArithExpr(ctx -> arithExpression(0));
ret = shared_ptr<NegativeContext>(new NegativeContext(only));
} else if (ctx -> arithExpression().size() == 2) {
// cout << "in in muti arith " << endl;
ret = buildArithExpr(ctx -> arithExpression(0));
for (int i = 1; i < ctx -> arithExpression().size(); ++i) {
shared_ptr<Expr> right = buildArithExpr(ctx -> arithExpression(i));
@ -215,7 +157,6 @@ namespace ExprBuilder {
throw std::runtime_error("Unknown arithExpression syntax");
}
} else if (isValid(ctx -> terminal())) {
// cout << "in terminal " << endl;
if (isValid(ctx -> terminal() -> UndefinedSymbol())) {
ret = shared_ptr<StringExpr>(new StringExpr(ctx -> terminal() -> getText()));
} else {
@ -224,7 +165,6 @@ namespace ExprBuilder {
} else {
throw std::runtime_error("Unknown arithExpression syntax");
}
// cout << "buildArithExpr complete" << endl;
return ret;
}

Просмотреть файл

@ -27,13 +27,8 @@ void Packet::FirewallParser(string line) {
int index = 0;
setTime(tokens[index++]);
fieldMap["event_type"] = tokens[index++];
fieldMap["proto"] = tokens[index++];
fieldMap["transport_protocol"] = tokens[index++];
if (tokens[index].length() > 0) {
fieldMap["flow_id"] = tokens[index++];
} else {
index++;
}
if (tokens[index].length() > 0) {
fieldMap["flow_state"] = tokens[index++];
} else {
@ -42,24 +37,6 @@ void Packet::FirewallParser(string line) {
fieldMap["srcIp"] = tokens[index++];
fieldMap["dstIp"] = tokens[index++];
if (tokens[index].length() > 0) {
fieldMap["type"] = tokens[index++];
} else {
index++;
}
if (tokens[index].length() > 0) {
fieldMap["code"] = tokens[index++];
} else {
index++;
}
if (tokens[index].length() > 0) {
fieldMap["id"] = tokens[index++];
} else {
index++;
}
if (tokens[index].length() > 0) {
fieldMap["srcL4Port"] = tokens[index++];
} else {
@ -71,51 +48,6 @@ void Packet::FirewallParser(string line) {
} else {
index++;
}
if (tokens[index].length() > 0) {
fieldMap["replied"] = tokens[index++];;
} else {
index++;
}
fieldMap["reverse_srcIp"] = tokens[index++];
fieldMap["reverse_dstIp"] = tokens[index++];
if (tokens[index].length() > 0) {
fieldMap["reverse_type"] = tokens[index++];
} else {
index++;
}
if (tokens[index].length() > 0) {
fieldMap["reverse_code"] = tokens[index++];
} else {
index++;
}
if (tokens[index].length() > 0) {
fieldMap["reverse_id"] = tokens[index++];
} else {
index++;
}
if (tokens[index].length() > 0) {
fieldMap["reverse_srcL4Port"] = tokens[index++];
} else {
index++;
}
if (tokens[index].length() > 0) {
fieldMap["reverse_dstL4Port"] = tokens[index++];
} else {
index++;
}
if (tokens[index].length() > 0) {
fieldMap["assured"] = tokens[index++];
} else {
index++;
}
setLocation(tokens[index++]);
}

Просмотреть файл

@ -209,9 +209,9 @@ inline string convertByteToString(string field, unsigned char data[], int start,
return to_string((uint16_t) ((data[start+1] << 8) | data[start]));
} else if (end - start == 4 && (field.find("Ip") != std::string::npos)) {
return to_string((uint8_t) data[start]) + "." + to_string((uint8_t) data[start+1]) + "." + to_string((uint8_t) data[start+2]) + "." + to_string((uint8_t) data[start+3]);
} else if (end - start == 4 && (field.find("time") != std::string::npos)) {
float f;
unsigned char b[] = {data[start], data[start+1], data[start+2], data[start+3]};
} else if (end - start == 8 && (field.find("time") != std::string::npos)) {
double f;
unsigned char b[] = {data[start], data[start+1], data[start+2], data[start+3], data[start+4], data[start+5], data[start+6], data[start+7]};
memcpy(&f, b, sizeof(f));
return to_string(f);
} else {

Просмотреть файл

@ -19,7 +19,7 @@
## Setup
First step is to set up topology at cloudlab. If you do not access Cloudlab. Please reach out to me (Nofel Yaseen). I will add your ssh key to the required machines.
First step is to set up topology at cloudlab. If you do not access Cloudlab. Please reach out to me (Nofel Yaseen). I will add your ssh key to the required machines. The firewall experiment information can be found at `firewall/README.md`.
### How to set up cloud lab machines
First click on experiments and then create experiment profile.
@ -36,12 +36,10 @@ Wait for the experiment to get ready.
We have already compiled invariants for firewall and put it `out/` folder.
Invariant compilation was tested on MacOS. For compiling invariants by yourself, please follow these:
Invariant compilation was tested on MacOS. It requires JDK and maven to run. For compiling invariants by yourself, please follow these:
```
git clone --recurse-submodules https://github.com/microsoft/aaragog.git
cd aaragog
./install.sh
cd generateSFA
cd aaragog/generateSFA
./install_z3.sh
mvn clean package
```
@ -54,20 +52,24 @@ mvn exec:java -Dexec.args="--packet_format ../out/packetformat.json --invar_file
### Verifier Setup
Assuming Cloublab is up and running with ssh key in all the servers. Please paste the server list in Setup/servers.txt.
It can executed at both macOS and Linux. The default bash on macOS is still bash v3, the script need `>= 4`. Please follow the instructions [here](https://www.shell-tips.com/mac/upgrade-bash/)
```
cd Setup
./setup.sh
```
For experiment details, please see firewall/README.md
It installs the required software and sets up Apache Flink, Kafka and install the firewall rules accordingly.
## Run
It can executed at both macOS and Linux. The default bash on macOS is still bash v3, the script need `>= 4`. Please follow the instructions [here](https://www.shell-tips.com/mac/upgrade-bash/)
To run the firewall experiment:
```
cd runme
./runme.sh
```
### Authors
This code was mainly written by:

Двоичные данные
Setup/Create_Profile.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 93 KiB

150
Setup/cloudSetup.md Normal file
Просмотреть файл

@ -0,0 +1,150 @@
# Run the verifier for your cloud
Below are the instructions to set up all parts of running the verifier in your own cloud/middlebox:
1. Middlebox config file: Similar to `aaragog/config/firewall/packetformat.json`, define the packet format of the packets exported by the middlebox. Please refer to the section 5.1 for details.
1. Invariant Violation (IV) Specifications: Similar to `aaragog/config/firewall/*.invar`, define the IV specifications for the middlebox. Please refer to the section 5.2 for details.
1. Compile the invariants using generateSFA. Example compilation:
```
cd aaragog/generateSFA
mvn exec:java -Dexec.args="--packet_format ../out/packetformat.json --invar_file ../config/firewall/new_established.invar"
```
Arguments in generating the SFA.
| Argument | options | |
|-----------------|:-------------:|:---------------------------------------------------------------------|
| -h, --help | | show this help message and exit |
| --packet_format | PACKET_FORMAT | Packet format config file (default: ../config/SLB/packetformat.json) |
| --invar_file | INVAR_FILE | Input .invar file (default: ../config/SLB/primary_single.invar) |
| --debug | | Packet format config file (default: false) |
| --out_dir | OUT_DIR | Output directory (default: ../out/) |
| --examples | | Run examples only (default: false) |
4. Compile the local verifer at (or close to) middlbox location. Need to do this for each middlebox location.
```
git clone --recurse-submodules https://github.com/microsoft/aaragog.git
cd $HOME/aaragog/C++Verifier/
chmod u+x setup.sh
./setup.sh
make
```
5. Set up Zokeeper and Apache Kafka Cluster. All local verifier should be able to communicate with the Kafka cluster. It is fine to co-locate zookeeper and kafka at the same machine. At each Zookeepr and Kafka machine:
```
cd
wget https://ftp.wayne.edu/apache/kafka/2.5.0/kafka_2.12-2.5.0.tgz
tar -xzf kafka_2.12-2.5.0.tgz
```
Set up Zookeeper configuration file: `kafka_*/config/zookeeper.properties`. Example configuration:
```
tickTime=2000
dataDir=/data/zookeeper
clientPort=2181
maxClientCnxns=60
initLimit=10
syncLimit=5
server.1=your_zookeeper_node_1:2888:3888
server.2=your_zookeeper_node_2:2888:3888
server.3=your_zookeeper_node_3:2888:3888
```
On each zookeeper node `data/zookeeper` folder, run `echo <ID> > myid`, replace `<ID>` with the node ID. Zookeeper need odd number of nodes.
Set the listener IP, zookeeper IPs and replication factor in Kafka configuration file `kafka_*/config/server.properties`. Please keep the replication factor of at least 3 in production. The configuration file is easy to follow.
Once the configuration is done, start the zookeeper and kafka cluster on each machine.
```
cd kafka_*/
./bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
./bin/kafka-server-start.sh -daemon config/server.properties
```
In case any zookeeper or kafka broker goes down, you can start it again with the same command. It will automatically sync with other nodes to get the latest data.
6. Set up and start the Apache Flink at the global verifier. Have at least 3 nodes for the global verifier for fault tolerance and scalibility. Flink has Master-Worker model.
```
wget http://mirror.cc.columbia.edu/pub/software/apache/flink/flink-1.11.2/flink-1.11.2-bin-scala_2.11.tgz
tar -xzf flink-1.11.2-bin-scala_2.11.tgz
```
Write the IP of masters and worker in `flink*/conf/masters` and `flink*/conf/workers` at the flink job manager node.
Update `jobmanager.rpc.address` in `flink*/conf/flink-conf.yaml` witht the job manager address at all the flink nodes.
Make sure job manager node has ssh access to all the workers.
Finally, start Flink from the job manager:
```
cd flink*
./bin/start-cluster.sh
```
7. Compile and start the global verifier from the Flink job manager node.
Compilation
```
git clone --recurse-submodules https://github.com/microsoft/aaragog.git
cd aaragog/verification
mvn clean package
```
Place the compiled invariants and `packetformat.json` in config folder. Default is `out/`.
Running the global verifier.
Example Run:
```
cd aaragog
$HOME/flink-*/bin/flink run verification/target/verification-0.1.jar --config_dir out/ --out_path out/ --mode GLOBAL_KAFKA --broker_address 10.10.1.10:9092 --channel_per_invariant 10
```
Arguments in running the global verifer
| Argument | options | |
|-------------------------|:---------------------:|:---------------------------------------------------------------------------------------------------------------------------|
| -h, --help | | show this help message and exit |
| --config_dir | CONFIG_DIR | config directory (default: out/) |
| --out_path | OUT_PATH | Output file (default: ../out/) |
| --mode | MODE | Mode: LOCAL_FILE, LOCAL_SOCKET, LOCAL_FOLDER, GLOBAL_KAFKA, STANDALONE_FILE, STANDALONE_FOLDER (default: STANDALONE_FILE) |
| --input_file | INPUT_FILE | Input file needed for LOCAL or STANDALONE (default: ../notebook/38-38.0.001000.csv) |
| --input_folder | INPUT_FOLDER | Input folder needed for global (default: out/globalInput/) |
| --broker_address | BROKER_ADDRESS | IP:Port,... for kafka broker (default: localhost:9092) |
| --local_socket_ip | LOCAL_SOCKET_IP | IP for reading from a local socket (default: localhost) |
| --local_socket_port | LOCAL_SOCKET_PORT | Port for reading from a local socket (default: 10000) |
| --parser | PARSER | Select parser from generic or firewall (default: firewall) |
| --channel_per_invariant | CHANNEL_PER_INVARIANT | Channel Per Invariant (default: 10) |
The global verifier is now running.
8. Please follow the instructions to run PTP [here](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/ch-configuring_ptp_using_ptp4l). PTP needs to running at all the local verfiers and global verifier.
9. Local verifier was compiled in step 4. Now, we need to just run it. Place the compiled invariants and `packetformat.json` in config folder. Default is `out/`.
Running the local verifier.
Example Run:
```
cd $HOME/aaragog/C++Verifier
./build/main.out --filesOrKafka kafka --KafkaAddress 10.10.1.10:9092 --numberOfChannels 10 --inputType socket &
```
Arguments in running the local verifer
| Argument | options | |
|-------------------------|:---------------------:|:------------------------------------------------------------------------|
| -h, --help | | show this help message and exit |
| --configDir | CONFIG_DIR | Set configDir |
| --configFile | CONFIG_FILE | config file (default: ../out/packetformat.json) |
| --inputPath | INPUT_FILE | Input file needed for LOCAL (default: ../datafiles/38-38.0.001000.csv) |
| --filesOrKafka | OUTPUT_TYPE | Write to file or kafka (default: file) |
| --KafkaAddress | BROKER_ADDRESS | IP:Port,... for kafka broker (default: localhost:9092) |
| --channel_per_invariant | CHANNEL_PER_INVARIANT | Channel Per Invariant (default: 10) |
| --prefix | OUTPUT_FOLDER_PATH | Set Path if the output is folder (default: ../out/globalInput/) |
| --inputType | INPUT_TYPE | Input is file or socket |
| --local_socket_ip | LOCAL_SOCKET_IP | IP for reading from a local socket (default: localhost) |
| --local_socket_port | LOCAL_SOCKET_PORT | Port for reading from a local socket (default: 10001) |

Просмотреть файл

@ -5,11 +5,15 @@ if [ "$#" -ne 2 ]; then
exit 1
fi
# Get interface for port communicating locally
LOCAL=$(ifconfig | grep -B1 10.10 | grep -o "^\w*")
mkdir traffic
# Update IP
sudo ifconfig $LOCAL 10.10.$1.$2
# Add rules to make sure traffic is routed through firewall
sudo ip route add 10.10.1.0/24 via 10.10.$1.100 dev $LOCAL
# softwares required for traffic generation and measurements.
sudo apt update
sudo apt install -y socat

Просмотреть файл

@ -12,41 +12,50 @@ if [ $1 -gt 8 ] || [ $1 -lt 5 ]; then
exit 1
fi
# Get interface for port internet traffic
INT=$(ifconfig | grep -B1 128.* | grep -o "^\w*")
# Get interface for port communicating locally
LOCAL=$(ifconfig | grep -B1 10.10 | grep -o "^\w*")
# Update Local IP
sudo ifconfig $LOCAL 10.10.1.$1
# softwares required for local verifier and measurements.
sudo apt update
sudo apt install -y socat
sudo apt install -y python3-pip
pip3 install psutil
# Automate setup without prompt
echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
sudo apt install -y iptables-persistent
sudo apt install -y conntrack conntrackd keepalived
# Get the repository
git clone --recurse-submodules https://github.com/microsoft/aaragog.git
# Get the conntrackd primary-backup setup file
sudo cp /usr/share/doc/conntrackd/examples/sync/primary-backup.sh /etc/conntrackd/
# Setup and Compile the local verifier
cd $HOME/aaragog/C++Verifier/
chmod u+x setup.sh
./setup.sh
make
# Get the conntrackd configuration file
sudo cp $HOME/aaragog/firewall/conntrackd.conf /etc/conntrackd/
# Get the keepalived configuration file
sudo cp $HOME/aaragog/firewall/keepalived.conf /etc/keepalived/
# Get the firewall rules file
sudo cp $HOME/aaragog/firewall/rules.v4 /etc/iptables/
# Automatically replace the internface name in rules and configuration files
sudo sed -i -e "s/enp1s0/$INT/g" /etc/iptables/rules.v4
sudo sed -i -e s/enp1s0d1/$LOCAL/g /etc/conntrackd/conntrackd.conf
@ -83,6 +92,7 @@ elif [ $1 -eq 8 ]; then
sudo sed -i -e s/priority\ P/priority\ 50/g /etc/keepalived/keepalived.conf
fi
# Install rules
sudo iptables-restore < /etc/iptables/rules.v4
cd /etc/keepalived
nohup sudo keepalived -l &

Просмотреть файл

@ -5,11 +5,15 @@ if [ "$#" -ne 1 ]; then
exit 1
fi
# Get interface for port internet traffic
INT=$(ifconfig | grep -B1 128.* | grep -o "^\w*")
# Get interface for port communicating locally
LOCAL=$(ifconfig | grep -B1 10.10 | grep -o "^\w*")
# Update Local IP
sudo ifconfig $LOCAL 10.10.1.$1
# softwares required for global verifier and measurements.
sudo apt update
sudo apt install -y maven
sudo apt install -y openjdk-8-jdk-headless
@ -18,16 +22,20 @@ pip3 install psutil
cd
# Install Flink
wget https://archive.apache.org/dist/flink/flink-1.9.3/flink-1.9.3-bin-scala_2.11.tgz
tar -xzf flink-1.9.3-bin-scala_2.11.tgz
# Install Kafka
cd
wget https://ftp.wayne.edu/apache/kafka/2.5.0/kafka_2.12-2.5.0.tgz
tar -xzf kafka_2.12-2.5.0.tgz
# Get the repository
git clone --recurse-submodules https://github.com/microsoft/aaragog.git
# Compile the global verifier
cd aaragog/verification
mvn clean package
@ -35,6 +43,7 @@ cd
cd kafka_2.12-2.5.0/
# Set the Kafka listener IP
sed -i -e s@\#listeners=PLAINTEXT://@listeners=PLAINTEXT://10.10.1.$1@g $HOME/kafka_2.12-2.5.0/config/server.properties
cd

Просмотреть файл

@ -4,14 +4,18 @@ if [ "$#" -ne 1 ]; then
echo "Illegal number of parameters"
exit 1
fi
# Get interface for port communicating locally
LOCAL=$(ifconfig | grep -B1 10.10 | grep -o "^\w*")
mkdir traffic
# Update IP
sudo ifconfig $LOCAL 10.10.1.$1
# Add rules to make sure traffic is routed through firewall
sudo ip route add 10.10.4.0/24 via 10.10.1.100 dev $LOCAL
sudo ip route add 10.10.5.0/24 via 10.10.1.50 dev $LOCAL
# softwares required for traffic generation and measurements.
sudo apt update
sudo apt install -y socat
sudo apt install -y python3-pip

Просмотреть файл

@ -6,6 +6,7 @@ then
exit 1
fi
# Get the cloudlab server list.
mapfile -t myArray < servers.txt
# printf '%s\n' "${myArray[@]}"
@ -18,6 +19,8 @@ if [ $NUMSERVERS -lt 16 ]; then
exit 1
fi
# Setup for internal traffic servers
for i in 0 1 2 3
do
scp internalSetup.sh ${myArray[$i]}:./
@ -26,12 +29,14 @@ do
ssh ${myArray[$i]} "cd traffic; chmod u+x start_background.sh"
done
# Setup for firewall servers
for i in 4 5 6 7
do
scp firewallSetup.sh ${myArray[$i]}:./
ssh ${myArray[$i]} "chmod u+x firewallSetup.sh; ./firewallSetup.sh $(($i+1))"
done
# Setup for external traffic servers with IP 10.10.4.X
for i in 8 9
do
scp externalSetup.sh ${myArray[$i]}:./
@ -40,6 +45,7 @@ do
ssh ${myArray[$i]} "cd traffic; chmod u+x start_background.sh"
done
# Setup for external traffic servers with IP 10.10.5.X
for i in 10 11
do
scp externalSetup.sh ${myArray[$i]}:./
@ -48,5 +54,7 @@ do
ssh ${myArray[$i]} "cd traffic; chmod u+x start_background.sh"
done
# Setup for the global verfier
scp globalSetup.sh ${myArray[12]}:./
ssh ${myArray[12]} "chmod u+x globalSetup.sh; ./globalSetup.sh 10"

Просмотреть файл

@ -1,6 +1,6 @@
{
"fields": [
{"time" : 4},
{"time" : 8},
{"event_type" : 1},
{"transport_protocol" : 1},
{"flow_state" : 1},

Двоичные данные
firewall/Images/DFtoplogy.png Normal file

Двоичный файл не отображается.

После

Ширина:  |  Высота:  |  Размер: 41 KiB

23
firewall/README.md Normal file
Просмотреть файл

@ -0,0 +1,23 @@
# Experiment setup and settings for distributed firewall
The setup file `aaragog/Setup/setup.sh ` install ip route rules and install the required software on each machine. IP route rules enable us to overlay the topology given below over the star topology. Comments are added to the setup file as well.
After running the setup, Our topology looks like this:
![Distributed Firewall](./Images/DFtoplogy.png)
We have four external nodes, four internal hnodes on a single LAN, and four firewall nodes interposing between the two groups.
The firewalls are configured as two high-availability groups with two primaries and two hot standbys (backups).
Each primary-standby group shares a virtual IP with the VRRP protocol.
We base the traffic between external nodes and internal nodes on the traces provided in [DCTCP](https://github.com/NofelYaseen/TrafficGenerator).
The rules for firewall are simple.
Internal nodes can communicate with each other and initiate connection to external nodes.
External nodes cannot initiate connection to internal nodes.
Firewall node are also running the verifier that allows filter and suppression.
Global verifier node is running Apache Kafka and Apache Flink.
Kafka is reponsible to receive and pipeline the events from the all the local verifers.
Flink is running the global verifier. Flink allows us to scale up to more machines and provide fault tolerance.

Просмотреть файл

@ -123,8 +123,6 @@ def main():
# print("Number of bytes: ", len(tempOut))
local_connection.send(tempOut)
# if (count > 10):
# sys.exit(0)
elif flag & (select.POLLIN | select.POLLPRI):
@ -217,25 +215,6 @@ def parse_line(line):
index += 1
flowList.append(flow['dstIp'])
# if flow['proto'] == 'icmp':
# flow['type'] = int(current_entry[index].split('=')[1])
# index +=1
# flowList.append(flow['type'])
# flow['code'] = int(current_entry[index].split('=')[1])
# index +=1
# flowList.append(flow['code'])
# flow['id'] = int(current_entry[index].split('=')[1])
# index +=1
# flowList.append(flow['id'])
# else:
# flowList.append('')
# flowList.append('')
# flowList.append('')
if 'sport' in current_entry[index]:
flow['srcL4Port'] = int(current_entry[index].split('=')[1])
index +=1
@ -252,90 +231,20 @@ def parse_line(line):
flow['dstL4Port'] = 0
flowList.append(0)
# if 'REPLIED' in current_entry[index]:
# flow['replied'] = current_entry[index][1:-1]
# index += 1
# flowList.append(flow['replied'])
# else:
# flowList.append('')
# flow['reverse_src_ip'] = current_entry[index].split('=')[1]
# index += 1
# flowList.append(flow['reverse_src_ip'])
# flow['reverse_dst_ip'] = current_entry[index].split('=')[1]
# index += 1
# flowList.append(flow['reverse_dst_ip'])
# if flow['proto'] == 'icmp':
# flow['reverse_type'] = int(current_entry[index].split('=')[1])
# index +=1
# flowList.append(flow['reverse_type'])
# flow['reverse_code'] = int(current_entry[index].split('=')[1])
# index +=1
# flowList.append(flow['reverse_code'])
# flow['reverse_id'] = int(current_entry[index].split('=')[1])
# index +=1
# flowList.append(flow['reverse_id'])
# else:
# flowList.append('')
# flowList.append('')
# flowList.append('')
# if index < len(current_entry) and 'sport' in current_entry[index]:
# flow['reverse_src_port'] = int(current_entry[index].split('=')[1])
# index +=1
# flowList.append(flow['reverse_src_port'])
# else:
# flowList.append('')
# if index < len(current_entry) and 'dport' in current_entry[index]:
# flow['reverse_dst_port'] = int(current_entry[index].split('=')[1])
# index +=1
# flowList.append(flow['reverse_dst_port'])
# else:
# flowList.append('')
# if index < len(current_entry) and 'ASSURED' in current_entry[index]:
# flow['assured'] = current_entry[index][1:-1]
# index += 1
# flowList.append(flow['assured'])
# else:
# flowList.append('')
flow['location'] = int(sys.argv[1])
flowList.append(flow['location'])
# print("flow:\n", flow)
flowByteForm = [struct.pack("f", flow['time'])]
# print(flowByteForm)
flowByteForm = [struct.pack("d", flow['time'])]
flowByteForm.append(struct.pack("B", events_dict[flow['event_type']]))
# print(flowByteForm)
flowByteForm.append(struct.pack("B", flow['transport_protocol']))
# print(flowByteForm)
flowByteForm.append(struct.pack("B", flow_state_dict[flow['flow_state']]))
# print(flowByteForm)
flowByteForm.append(socket.inet_aton(flow['srcIp']))
# print(flowByteForm)
flowByteForm.append(socket.inet_aton(flow['dstIp']))
# print(flowByteForm)
flowByteForm.append(struct.pack("H", flow['srcL4Port']))
# print(flowByteForm)
flowByteForm.append(struct.pack("H", flow['dstL4Port']))
# print(flowByteForm)
flowByteForm.append(struct.pack("B", flow['location']))
# print(flowByteForm)
# print(b''.join(flowByteForm))
# print(len(flowByteForm))
# print(len(b''.join(flowByteForm)))
# print("Sending out byte form:", flowByteForm)
# print("Sending out list form: ", flowList)
# sys.exit(0)
return b''.join(flowByteForm)
def createDict():

Просмотреть файл

@ -1,6 +1,6 @@
{
"fields": [
{"time" : 4},
{"time" : 8},
{"event_type" : 1},
{"transport_protocol" : 1},
{"flow_state" : 1},

49
verification/README.md Normal file
Просмотреть файл

@ -0,0 +1,49 @@
# Global verifier
## Verifier.java
Arguments in running the global verifer
| Argument | options | |
|-------------------------|:---------------------:|:---------------------------------------------------------------------------------------------------------------------------|
| -h, --help | | show this help message and exit |
| --config_dir | CONFIG_DIR | config directory (default: out/) |
| --out_path | OUT_PATH | Output file (default: ../out/) |
| --mode | MODE | Mode: LOCAL_FILE, LOCAL_SOCKET, LOCAL_FOLDER, GLOBAL_KAFKA, STANDALONE_FILE, STANDALONE_FOLDER (default: STANDALONE_FILE) |
| --input_file | INPUT_FILE | Input file needed for LOCAL or STANDALONE (default: ../notebook/38-38.0.001000.csv) |
| --input_folder | INPUT_FOLDER | Input folder needed for global (default: out/globalInput/) |
| --broker_address | BROKER_ADDRESS | IP:Port,... for kafka broker (default: localhost:9092) |
| --local_socket_ip | LOCAL_SOCKET_IP | IP for reading from a local socket (default: localhost) |
| --local_socket_port | LOCAL_SOCKET_PORT | Port for reading from a local socket (default: 10000) |
| --parser | PARSER | Select parser from generic or firewall (default: firewall) |
| --channel_per_invariant | CHANNEL_PER_INVARIANT | Channel Per Invariant (default: 10) |
The program creates state machines according the invariants provided and processes them.
The input packets can come either from file, socket or kafka. It parses the packet, processes and raises alert if required. Everything is done in streams to allow parallelism.
Uses the `parser/ParserFactory.Java` to create the packet parser. Uses the `GlobalSFAParser` to process the packets and SFA. Finally, outputs the alerts to file.
## GlobalSFAProcessor.Java
Takes the invariant (SFA + transformation of the packets) and key for constructor. The function `processElement` takes the packets as input, stores the packet to makes sure packets are processed in the right order, makes the transitions in SFA and outputs the alerts (Notification).
## parser/ParserFactory.Java
It contains a generic way to parse packets according to `packetformat.json`, but you can add custom parsers as well.
## parser/GenericParser.Java
Takes the `packetformat.json` and delimiter for the constructor. Creates the parse tree. Parse tree consists of fields and conditional nodes. Field are simple parsing nodes and conditional nodes decides which branch of the tree to take.
## utils/Packet.Java
Class to store packet. It contain function to set and get fields in packet too. Also, contains function to change it into string format.
## dsfa/GlobalSFAParser.Java
Creates the SFA and transformation and return GlobalSFAProcessor.
## expressions/ExprBuilder.Java
Contain the functions to create the SFA from the parsed tree. The parsed tree contain many kinds of nodes (binary/boolean operators). This file goes through them to create the SFA.

Просмотреть файл

@ -57,17 +57,31 @@ public class Verifier {
argparser.addArgument("--broker_address").setDefault("localhost:9092").help("IP:Port,... for kafka broker");
argparser.addArgument("--local_socket_ip").setDefault("localhost").help("IP for reading from a local socket");
argparser.addArgument("--local_socket_port").setDefault("10000").help("Port for reading from a local socket");
argparser.addArgument("--parser").setDefault("firewall").help("Select parser");
argparser.addArgument("--parser").setDefault("firewall").help("Select parser from generic or firewall");
argparser.addArgument("--delimiter").setDefault("comma").help("Select parser delimiter from comma or semi-colon. Feel free to add more if neccessary");
argparser.addArgument("--channel_per_invariant").setDefault("10").help("Channel Per Invariant");
Namespace ns = argparser.parseArgs(args);
// Get all execution parameters
String configDir = ns.getString("config_dir");
// String formatFile = configDir + "packetformat.json";
String formatFile = configDir + "packetformat.json";
String outputPath = "file://" + System.getProperty("user.dir") + "/"
+ ns.getString("out_path");
String delimiter = "";
switch (ns.getString("delimiter")) {
case "comma":
delimiter = ",";
break;
case "semicolon":
delimiter = ";";
break;
default:
throw new RuntimeException("Unknown delimiter");
}
String parser = ns.getString("parser");
@ -77,10 +91,10 @@ public class Verifier {
mode = Mode.LOCAL_FILE;
break;
case "LOCAL_FOLDER":
mode = Mode.LOCAL_SOCKET;
mode = Mode.LOCAL_FOLDER;
break;
case "LOCAL_SOCKET":
mode = Mode.LOCAL_FOLDER;
mode = Mode.LOCAL_SOCKET;
break;
case "GLOBAL_KAFKA":
mode = Mode.GLOBAL_KAFKA;
@ -96,10 +110,11 @@ public class Verifier {
}
System.out.println("config directory: " + configDir);
// System.out.println("format file: " + formatFile);
System.out.println("format file: " + formatFile);
System.out.println("output folder path: " + outputPath);
System.out.println("mode: " + mode);
System.out.println("parser: " + parser);
System.out.println("delimiter: " + delimiter);
String inputPath;
if (mode == Mode.LOCAL_FILE || mode == Mode.STANDALONE_FILE) {
@ -204,7 +219,7 @@ public class Verifier {
// System.out.println("Putting in parsedPacketsDict: " + entry.getKey());
parsedPacketsDict.put(entry.getKey(), new ArrayList<DataStream<Packet>>());
}
parsedPacketsDict.get(entry.getKey()).add(env.addSource(kafka).flatMap(ParserFactory.createNewParser(parser)));
parsedPacketsDict.get(entry.getKey()).add(env.addSource(kafka).flatMap(ParserFactory.createNewParser(parser, formatFile, delimiter)));
}
}
@ -230,7 +245,7 @@ public class Verifier {
// PacketKeySelector pKeySelector = new PacketKeySelector(filename);
sKeySelector.put(filename,new StringKeySelector(filename));
parsedPacketsDict.get(invName).add(env.readTextFile(filePath).setParallelism(1).partitionCustom(customPartitioner, sKeySelector.get(filename))
.flatMap(ParserFactory.createNewParser(parser)));
.flatMap(ParserFactory.createNewParser(parser, formatFile, delimiter)));
// parsedPacketsDict.get(invName).add(env.readTextFile(filePath).setParallelism(1)
// .flatMap(ParserFactory.createNewParser(parser)));
System.out.print(" Adding stream: ");
@ -238,7 +253,7 @@ public class Verifier {
}
System.out.println("Out of loop");
} else if (mode == Mode.LOCAL_SOCKET) {
parsedPackets = env.socketTextStream(ns.getString("local_socket_ip") , Integer.valueOf(ns.getString("local_socket_port"))).setParallelism(1).flatMap(ParserFactory.createNewParser(parser));;
parsedPackets = env.socketTextStream(ns.getString("local_socket_ip") , Integer.valueOf(ns.getString("local_socket_port"))).setParallelism(1).flatMap(ParserFactory.createNewParser(parser, formatFile, delimiter));
} else {
throw new RuntimeException("Unknown Mode and Data Source: " + mode);
}

Просмотреть файл

@ -0,0 +1,196 @@
package org.msr.mnr.verification.parser;
import java.io.FileReader;
import java.io.IOException;
import java.io.Serializable;
import java.net.DatagramPacket;
import java.util.*;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.util.Collector;
import org.msr.mnr.verification.utils.Packet;
import org.msr.mnr.verification.utils.ParseIntArray;
public class GenericParser implements FlatMapFunction<String, Packet> {
private static final long serialVersionUID = 1L;
private ParseNode root = null;
private String delimiter;
public GenericParser(String formatFile, String _delimiter) {
JSONParser jsonParser = new JSONParser();
this.delimiter = _delimiter;
try (FileReader reader = new FileReader(formatFile)) {
JSONObject config = (JSONObject) jsonParser.parse(reader);
// Parse the packet format
JSONArray fieldFormat = (JSONArray) config.get("fields");
root = parseFormat(fieldFormat, null);
} catch (IOException | ParseException e) {
e.printStackTrace();
}
}
private ParseNode parseFormat(JSONArray fieldFormat, ParseNode last) {
ParseNode first = null;
for (Object nodeObj : fieldFormat) {
JSONObject node = (JSONObject) nodeObj;
if (node.size() > 1) {
ConditionalNode cn = new ConditionalNode(last);
for (Object conditionStr : node.keySet()) {
Condition cond = new Condition((String) conditionStr);
JSONArray child = (JSONArray) node.get(conditionStr);
cn.children.put(cond, parseFormat(child, null));
}
last = cn;
} else if (node.size() == 1) {
Object entryObj = node.entrySet().iterator().next();
Map.Entry<?, ?> entry = Map.Entry.class.cast(entryObj);
last = new Field(entry.getKey(), last);
} else {
throw new IllegalArgumentException("Empty field object?");
}
if (first == null) {
first = last;
}
}
return first;
}
@Override
public void flatMap(String data, Collector<Packet> out) throws IOException {
if(data.startsWith("#") || data.length() == 0 || data.startsWith("time")) {
return;
}
// System.out.println("Input line: " + data);
Packet p = new Packet();
String[] inputLine = data.split(delimiter);
parsePacketHelper(inputLine, 0, p, root);
out.collect(p);
}
private int parsePacketHelper(String[] data, int index, Packet p, ParseNode current) {
int nextIndex = index;
if (current instanceof Field) {
// If it's a simple Field, parse it and set nextIndex
Field f = (Field) current;
nextIndex += 1;
// System.out.println("Name: " + f.name);
if (f.name.equals("time")) {
if (isNumeric(data[index])) {
p.setTime(Math.round(Double.parseDouble(data[index]) * 1000.0));
} else {
p.setTime(data[index]);
}
} else if (isNumeric(data[index])) {
p.set(f.name, ParseIntArray.fromString(data[index]));
} else {
p.set(f.name, data[index].getBytes(), 0, data[index].length());
}
} else {
// If it's a conditional, traverse the correct child
ConditionalNode cn = (ConditionalNode) current;
ParseNode child = cn.getChildNode(p);
if (child != null) {
nextIndex = parsePacketHelper(data, index, p, child);
}
}
if (current.next != null) {
nextIndex = parsePacketHelper(data, nextIndex, p, current.next);
}
return nextIndex;
}
private abstract class ParseNode implements Serializable {
private static final long serialVersionUID = 1L;
public ParseNode next;
ParseNode(ParseNode last) {
if (last != null) {
last.next = this;
}
}
}
private class Field extends ParseNode {
private static final long serialVersionUID = 1L;
public String name;
Field(Object name, ParseNode last) {
super(last);
this.name = (String) name;
}
}
private class ConditionalNode extends ParseNode {
private static final long serialVersionUID = 1L;
LinkedHashMap<Condition, ParseNode> children = new LinkedHashMap<Condition, ParseNode>();
ConditionalNode(ParseNode last) {
super(last);
}
ParseNode getChildNode(Packet p) {
for (Condition c : children.keySet()) {
if (c.field == null) {
return children.get(c);
}
if (p.get(c.field).equals(c.value)) {
return children.get(c);
}
}
return null;
}
}
private class Condition implements Serializable {
private static final long serialVersionUID = 1L;
String field;
int[] value;
Condition(String condition) {
if (condition.equalsIgnoreCase("default")) {
field = null;
value = null;
} else {
String[] conditionTokens = condition.split("=");
assert (conditionTokens.length == 2);
field = conditionTokens[0];
if (conditionTokens[1].startsWith("0x")) {
value = ParseIntArray.fromHexString(conditionTokens[1].substring(2));
} else if (GenericParser.isNumeric(conditionTokens[1])) {
value = ParseIntArray.fromLong(Long.parseLong(conditionTokens[1]));
} else {
value = ParseIntArray.fromString(conditionTokens[1]);
}
}
}
}
public static boolean isNumeric(String inputStr) {
if (inputStr == null) {
return false;
}
try {
double d = Double.parseDouble(inputStr);
} catch (NumberFormatException e) {
return false;
}
return true;
}
}

Просмотреть файл

@ -10,6 +10,8 @@ public class ParserFactory {
return null;
} else if(parserType.equalsIgnoreCase("firewall")){
return new FirewallParser();
} else if (parserType.equalsIgnoreCase("generic")){
return new GenericParser(configFile, delimiter);
}
return null;