What are the system requirements for comfortable running IBM Cloud Private Community Edition(ICP CE)? - ibm-cloud-private

I'm trying to run IBM Cloud Private.
I saw Hardware requirements and recommendations, but I'm not sure that the spec is enough for me.
I'd liked to run several cloud foundry applications, MessageSight, Spinnaker, and so on.
How do you think this spec?
CPU: 3GHz 10core
Mem: 64GB
HDD: 2TB (SSD)

I did a Terraform script for this together with CentOS 7.6
And the minimum I use is following config, and count with 250GB as minimum disk size.
This is applying for ICP CE 3.1.1
##### ICP Cluster Components #####
master = {
nodes = "3"
vcpu = "8"
memory = "16384"
docker_disk_size = "250"
thin_provisioned = "true"
thin_provisioned_etcd = "false"
}
proxy = {
nodes = "3"
vcpu = "4"
memory = "8192"
thin_provisioned = "true"
}
worker = {
nodes = "3"
vcpu = "8"
memory = "8192"
thin_provisioned = "true"
}
management = {
nodes = "3"
vcpu = "4"
memory = "16384"
thin_provisioned = "true"
}
va = {
nodes = "2"
vcpu = "4"
memory = "8192"
thin_provisioned = "true"
}

Related

How can I integratre INET gPTP with INET TAS?

I would like to integrate two sample codes (TAS and gPTP) into the One Master Clock network environment.
The following are the .ini and .ned codes.
simpleGptp.ini
[General]
network = OneMasterClockGptpShowcase
**.displayGateSchedules = true
**.gateFilter = "**.eth[1].**"
**.gateScheduleVisualizer.height = 16
**.gateScheduleVisualizer.placementHint = "top"
# client applications
*.tsnDevice1.numApps = 2
*.tsnDevice1.app[*].typename = "UdpSourceApp"
*.tsnDevice1.app[0].display-name = "best effort"
*.tsnDevice1.app[1].display-name = "video"
*.tsnDevice1.app[*].io.destAddress = "tsnDevice2"
*.tsnDevice1.app[0].io.destPort = 1000
*.tsnDevice1.app[1].io.destPort = 1001
*.tsnDevice1.app[*].source.packetLength = 1000B - 54B # 42B = 8B (UDP) + 20B (IP) + 14B (ETH MAC) + 4B (ETH FCS) + 8B (ETH PHY)
*.tsnDevice1.app[0].source.productionInterval = exponential(200us) # ~40Mbps
*.tsnDevice1.app[1].source.productionInterval = exponential(400us) # ~20Mbps
# server applications
*.tsnDevice2.numApps = 2
*.tsnDevice2.app[*].typename = "UdpSinkApp"
*.tsnDevice2.app[0].io.localPort = 1000
*.tsnDevice2.app[1].io.localPort = 1001
# enable outgoing streams
*.tsnDevice1.hasOutgoingStreams = true
# client stream identification
*.tsnDevice1.bridging.streamIdentifier.identifier.mapping = [{stream: "best effort", packetFilter: expr(udp.destPort == 1000)},
{stream: "video", packetFilter: expr(udp.destPort == 1001)}]
# client stream encoding
*.tsnDevice1.bridging.streamCoder.encoder.mapping = [{stream: "best effort", pcp: 0},
{stream: "video", pcp: 4}]
# enable egress traffic shaping
*.tsnSwitch.hasEgressTrafficShaping = true
# time-aware traffic shaping
*.tsnSwitch.eth[*].macLayer.queue.numTrafficClasses = 2
*.tsnSwitch.eth[*].macLayer.queue.*[0].display-name = "best effort"
*.tsnSwitch.eth[*].macLayer.queue.*[1].display-name = "video"
*.tsnSwitch.eth[*].macLayer.queue.transmissionGate[0].offset = 0ms
*.tsnSwitch.eth[*].macLayer.queue.transmissionGate[0].durations = [4ms, 6ms] # period is 10 # length of periods
*.tsnSwitch.eth[*].macLayer.queue.transmissionGate[1].offset = 6ms
*.tsnSwitch.eth[*].macLayer.queue.transmissionGate[1].durations = [2ms, 8ms]
# enable time synchronization in all network nodes
*.*.hasTimeSynchronization = true
# all oscillators have a random constant drift
**.oscillator.typename = "ConstantDriftOscillator"
**.oscillator.driftRate = uniform(-100ppm, 100ppm)
# all Ethernet interfaces have 100 Mbps speed
*.*.eth[*].bitrate = 100Mbps
*.visualizer.typename = "IntegratedMultiCanvasVisualizer"
*.visualizer.infoVisualizer.displayInfos = true
# TSN clock gPTP master ports
*.tsnClock.gptp.masterPorts = ["eth0"]
# TSN switch gPTP bridge master ports
*.tsnSwitch.gptp.masterPorts = ["eth1", "eth2"]
# Set all reference clocks to master clock so the time difference can be visualized
**.referenceClock = "tsnClock.clock"
# data link visualizer displays gPTP time synchronization packets
*.visualizer.dataLinkVisualizer[0].displayLinks = true
*.visualizer.dataLinkVisualizer[0].activityLevel = "protocol"
*.visualizer.dataLinkVisualizer[0].packetFilter = "GptpSync"
*.visualizer.dataLinkVisualizer[0].lineColor = "blue2"
*.visualizer.numInfoVisualizers = 3
*.visualizer.infoVisualizer[0].modules = "*.tsnClock.clock"
*.tsnClock.clock.displayStringTextFormat = "time: %T"
*.visualizer.infoVisualizer[1].modules = "*.tsnSwitch.clock"
*.visualizer.infoVisualizer[1].placementHint = "topLeft"
*.visualizer.infoVisualizer[2].modules = "*.tsnDevice*.clock"
*.visualizer.infoVisualizer[2].placementHint = "bottom"
*.tsnDevice*.clock.displayStringTextFormat = "diff: %d"
*.tsnSwitch.clock.displayStringTextFormat = "diff: %d"
simpleGptp.ned
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program. If not, see http://www.gnu.org/licenses/.
//
package tsn_scalability.simulations;
import inet.common.scenario.ScenarioManager;
import inet.networks.base.TsnNetworkBase;
import inet.node.ethernet.EthernetLink;
import inet.node.tsn.TsnClock;
import inet.node.tsn.TsnDevice;
import inet.node.tsn.TsnSwitch;
network OneMasterClockGptpShowcase extends TsnNetworkBase
{
submodules:
tsnClock: TsnClock {
#display("p=500,150");
}
tsnSwitch: TsnSwitch {
#display("p=500,300");
}
tsnDevice1: TsnDevice {
#display("p=400,450");
}
tsnDevice2: TsnDevice {
#display("p=600,450");
}
connections:
tsnClock.ethg++ <--> EthernetLink <--> tsnSwitch.ethg++;
tsnSwitch.ethg++ <--> EthernetLink <--> tsnDevice1.ethg++;
tsnSwitch.ethg++ <--> EthernetLink <--> tsnDevice2.ethg++;
}
Error message
omnetpp::common::expression::ExprNode::eval_error: Object nullptr has no member named 'destPort' -- in module (inet::queueing::PacketMultiplexer) OneMasterClockGptpShowcase.tsnDevice1.bridging.directionReverser.join (id=337), at t=0s, event #13
The problem happened when the tsnDevice1 sent the GptpPdelayReq packet passing through the bridging module.
If you have any ideas, please let me know. Thank you.
The gPTP packet doesn't contain a UDP header, so you should avoid dereferencing the UDP header if there's none.
For example:
expr(has(udp) && udp.destPort == 1000)
Ideally the gPTP packet is neither part of the video nor the best effort streams.
the example is for Store&Forward forwarding, if I try to switch to Cutthrough by adding *.*.hasCutthroughSwitching = true , the simulation ends with the errormessage Another packet streaming operation is already in progress -- in module (inet::physicallayer::EthernetPhyHeaderInserter) TsnMultiClient.switch.eth[5].phyLayer.phyHeaderInserter (id=705), at t=0.000000962s, event #87
Could this problem also solved with such a small tweak?
Thank you very much :)

Gitlab runner on aws ec2 : Have one ec2 launching multiple concurrent jobs

I followed this tutorial : https://docs.gitlab.com/runner/configuration/runner_autoscale_aws/
And it works perfectly ! I put the runner in only one branch for testingBut in our pipeline with have like 15 jobs in one stage, but my configuration launch only 2 ec2 machines, so the jobs are taken only 2 at a time, but I want one machine to take 4-5 jobs at the same time
My jobs on the same stage : https://imgur.com/a/s2HjGML
This is my config.toml
concurrent = 8
check_interval = 0
[session_server]
session_timeout = 1800
[[runners]]
name = "gitlab-runner-xxxxx-dev"
url = "https://gitlab.com/"
token = "xxxxxxxxx"
executor = "docker+machine"
limit = 2
request_concurrency = 3
[runners.docker]
image = "alpine"
privileged = true
disable_cache = true
[runners.cache]
Type = "s3"
Shared = true
[runners.cache.s3]
ServerAddress = "s3.amazonaws.com"
AccessKey = "xxxxxxxx"
SecretKey = "xxxxx"
BucketName = "gitlab-runner-xxx-bucket"
BucketLocation = "eu-west-3"
[runners.machine]
IdleCount = 0
IdleTime = 1800
MaxBuilds = 10
MachineDriver = "amazonec2"
MachineName = "gitlab-docker-machine-%s"
MachineOptions = [
"amazonec2-access-key=xxxxxx",
"amazonec2-secret-key=xxxxxxx",
"amazonec2-region=eu-west-3",
"amazonec2-vpc-id=vpc-xxxx",
"amazonec2-subnet-id=subnet-xxxxx",
"amazonec2-use-private-address=true",
"amazonec2-tags=runner-manager-name,gitlab-aws-autoscaler,gitlab,true,gitlab-runner-autoscale,true",
"amazonec2-security-group=xxxxx",
"amazonec2-instance-type=m5.large",
]
[[runners.machine.autoscaling]]
Periods = ["* * 9-18 * * mon-fri *"]
IdleCount = 2
IdleTime = 3600
Timezone = "UTC"
[[runners.machine.autoscaling]]
Periods = ["* * * * * sat,sun *"]
IdleCount = 1
IdleTime = 60
Timezone = "UTC"
Is my config not good for what I want or what do I want is impossible ?
Thanks guys !
You've set limit = 2 in your configuration. This will limit the total number of jobs handled by all runners defined in this configuration file to 2.
Set this limit to a higher number to allow more jobs to run concurrently.
See also the relationship between limit, concurrent and IdleCount.

Adding Multiple Hosts to INET/OMNET++ Throughput Example

I have been working on adding more than one host to the INET throughput example.
inet/showcases/wireless/throughput
However, when I run the code after adding more hosts, the graph generated looks pretty similar to the original and I expected some sort of obvious difference - which leaves me to believe that there is something wrong with the code.
Original example code:
throughput.ini
[General]
[Config Throughput]
network = Throughput
sim-time-limit = 1s
*.*Host.ipv4.arp.typename = "GlobalArp"
*.*Host.wlan[*].mgmt.typename = "Ieee80211MgmtAdhoc"
*.*Host.wlan[*].agent.typename = ""
*.*Host.wlan[*].opMode = "g(erp)"
*.*Host.wlan[*].bitrate = ${bitrate = 6,9,12,18,24,36,48,54}Mbps
*.*Host.wlan[*].mac.dcf.originatorMacDataService.fragmentationPolicy.fragmentationThreshold = 2304B + 28B
*.*Host.wlan[*].radio.separateReceptionParts = true
*.*Host.wlan[*].radio.separateTransmissionParts = true
*.sourceHost.numApps = 1
*.sourceHost.app[0].typename = "UdpBasicApp"
*.sourceHost.app[*].destAddresses = "destinationHost"
*.sourceHost.app[*].destPort = 5000
*.sourceHost.app[*].packetName = "UDPData-"
*.sourceHost.app[*].startTime = 0s
*.sourceHost.app[*].messageLength = ${packetLength = 100, 1000, 2268}byte
*.sourceHost.app[*].sendInterval = ${packetLength} * 8 / ${bitrate} * 1us
*.destinationHost.numApps = 1
*.destinationHost.app[0].typename = "UdpSink"
*.destinationHost.app[*].localPort = 5000
throughput.ned
package inet.showcases.wireless.throughput;
import inet.networklayer.configurator.ipv4.Ipv4NetworkConfigurator;
import inet.node.inet.WirelessHost;
import inet.physicallayer.ieee80211.packetlevel.Ieee80211ScalarRadioMedium;
network Throughput
{
#display("bgb=6,4");
#statistic[throughput](source=liveThroughput(destinationHost.app[0].packetReceived)/1000000; record=figure; targetFigure=panel.throughput; checkSignals=false);
#statistic[numRcvdPk](source=count(destinationHost.app[0].packetReceived); record=figure; targetFigure=panel.numRcvdPkCounter; checkSignals=false);
#figure[panel](type=panel; pos=1.5,0.1);
// #figure[panel.throughput](type=gauge; pos=0,0; size=100,100; minValue=0; maxValue=40; tickSize=5; label="App level throughput [Mbps]");
#figure[panel.throughput](type=linearGauge; pos=250,50; size=250,30; minValue=0; maxValue=54; tickSize=6; label="Application level throughput [Mbps]");
#figure[panel.numRcvdPkCounter](type=counter; pos=50,50; label="Packets received"; decimalPlaces=6);
submodules:
sourceHost: WirelessHost {
#display("p=3.019269,2.746169");
}
destinationHost: WirelessHost {
#display("p=4.369595,1.8054924");
}
configurator: Ipv4NetworkConfigurator {
#display("p=1.0772266,0.6220604");
}
radioMedium: Ieee80211ScalarRadioMedium {
#display("p=1.03171,1.9723867");
}
}
The way I did it was copying the source host submodule in throughput.ned and giving it a name like sourceHost2. Then, I modified the ini file to accommodate for the changes for example, changing
*.*Host.ipv4.arp.typename = "GlobalArp"
to
*.*Host*.ipv4.arp.typename = "GlobalArp"
This runs fine but does not really change the throughput at all - one or two slightly harsher peaks/troughs in the graphs but nothing noticeable - any ideas?

How to use BertForSequenceClassification for token max_length set at 1700?

I want to perform author classification on the Reuters 50 50 dataset, where the max token length is 1600+ tokens and there are 50 classes/authors in total.
With max_length=1700 and batch_size=1, I'm getting RuntimeError: CUDA out of memory. This error can be prevented by setting max_length=512, but this has the unwanted effect of truncating the texts.
Tokenizing and encoding:
from keras.preprocessing.sequence import pad_sequences
MAX_LEN = 1700
def get_encodings(texts):
token_ids = []
attention_masks = []
for text in texts:
token_id = tokenizer.encode(text, add_special_tokens=True, max_length=MAX_LEN)
token_ids.append(token_id)
return token_ids
def pad_encodings(encodings):
return pad_sequences(encodings, maxlen=MAX_LEN, dtype="long",
value=0, truncating="post", padding="post")
def get_attention_masks(padded_encodings):
attention_masks = []
for encoding in padded_encodings:
attention_mask = [int(token_id > 0) for token_id in encoding]
attention_masks.append(attention_mask)
return attention_masks
train_encodings = get_encodings(train_df.text.values)
train_encodings = pad_encodings(train_encodings)
train_attention_masks = get_attention_masks(train_encodings)
test_encodings = get_encodings(test_df.text.values)
test_encodings = pad_encodings(test_encodings)
test_attention_masks = get_attention_masks(test_encodings)
Packing into Dataset and Dataloader:
X_train = torch.tensor(train_encodings)
y_train = torch.tensor(train_df.author_id.values)
train_masks = torch.tensor(train_attention_masks)
X_test = torch.tensor(test_encodings)
y_test = torch.tensor(test_df.author_id.values)
test_masks = torch.tensor(test_attention_masks)
batch_size = 1
# Create the DataLoader for our training set.
train_data = TensorDataset(X_train, train_masks, y_train)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
validation_data = TensorDataset(X_test, test_masks, y_test)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)
Model setup:
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
config = BertConfig.from_pretrained(
'bert-base-uncased',
num_labels = 50,
output_attentions = False,
output_hidden_states = False,
max_position_embeddings=MAX_LEN
)
model = BertForSequenceClassification(config)
model.to(device)
optimizer = AdamW(model.parameters(),
lr = 2e-5,
eps = 1e-8
)
Training:
for epoch_i in range(0, epochs):
model.train()
for step, batch in enumerate(train_dataloader):
b_texts = batch[0].to(device)
b_attention_masks = batch[1].to(device)
b_authors = batch[2].to(device)
model.zero_grad()
outputs = model(b_texts,
token_type_ids=None,
attention_mask=b_attention_masks,
labels=b_authors) <------- ERROR HERE
Error:
RuntimeError: CUDA out of memory. Tried to allocate 6.00 GiB (GPU 0; 7.93 GiB total capacity; 1.96 GiB already allocated; 5.43 GiB free; 536.50 KiB cached)
Unless you are training on a TPU, your chances are extremely low of ever having enough GPU RAM with any of the available GPUs right now.
For some BERT models, the model alone takes well above 10GB in RAM, and a doubling in sequence length beyond 512 tokens takes about that much more in memory. For reference, a Titan RTX with 24 GB GPU RAM (most of what is currently available for a single GPU), can barely fit 24 samples of 512 tokens in length at the same time.
Fortunately, most of the networks still yield a very decent performance when truncating the samples, but this is of course task-specific. Also keep in mind that - unless you are training from scratch - all of the pre-trained models are generally trained on 512 token limits. To my knowledge, the only model currently supporting longer sequences is Bart, which allows up to 1024 tokens in length.

Making DoS attack using veins

I am new to veins and I would like to make DoS attack using flooding technique. I have tried sending a message, that used in case of an accident, say, a million times by a specific car. Is this enough to make a DoS attack? Can I make this code more sophisticated?
void TraCIDemo11p::handlePositionUpdate(cObject* obj) {
BaseWaveApplLayer::handlePositionUpdate(obj);
if (externalID == "2"){ //2 is the attacker
for (int i = 0; i<1000000; i++)
sendMessage(mobility->getRoadId());
}
Note: I am using omnet 5.0, sumo-0.25.0 and veins-4.4, TraCIDemo11p.cc
For the NED File
import inet.applications.dhcp.DhcpServer;
import inet.node.dsdv.DsdvRouter;
import inet.node.inet.AdhocHost;
import inet.node.inet.MulticastRouter;
import inet.node.inet.WirelessHost;
import inet.networklayer.configurator.ipv4.Ipv4NetworkConfigurator;
import inet.node.aodv.AodvRouter;
import inet.node.wireless.AccessPoint;
import inet.physicallayer.ieee80211.packetlevel.Ieee80211ScalarRadioMedium;
import inet.visualizer.integrated.IntegratedVisualizer;
import inet.visualizer.networklayer.NetworkRouteVisualizer;
import inet.visualizer.integrated.IntegratedMultiVisualizer;
import ned.DelayChannel;
network pingattack
{
parameters:
int numhost;
int numattacker;
submodules:
visualizer: IntegratedMultiVisualizer {
#display("p=14,295");
}
configurator: Ipv4NetworkConfigurator {
//config = default(xml("<config><interface WirelessHost='**' address='10.0.0.x' netmask='255.255.255.0'/></config>"));
#display("p=42,430");
}
radioMedium: Ieee80211ScalarRadioMedium {
#display("p=14,339");
}
Attacker[numattacker]: WirelessHost {
#display("p=180,331");
}
Master: WirelessHost {
#display("p=274,316");
}
Slaves[numhost]: WirelessHost {
#display("p=313,247");
}
ap: AccessPoint {
#display("p=244,246");
}
}
For the Ini File,
[General]
description = Displaying Ping Attack
network = pingattack
# Setting up the max area which the modules are able to travel to
# "Z" limits the height. It can only be observed in 3D
**.constraintAreaMinX = 0m
**.constraintAreaMinY = 0m
**.constraintAreaMinZ = 0m
**.updateInterval = 0.1s # test with 0s too, and let getCurrentPosition update the display string from a test module
# Does not specify the intial positions. Random initial position will be chosen within the contraint area,
# unless it is specified in the display string in NED file or initialX,Y,Z
**.mobility.initFromDisplayString = false
# Setting all the default application type to "GlobalArp"
**.arp.typename = "GlobalArp"
# Attacker parameters
*.Attacker[*].numApps = 1 # Number of application layers on attackers
*.Attacker[*].app[0].typename = "PingApp" # Application type for attackers
*.Attacker[*].app[0].destAddr = "Master" # Set ping destination
*.Attacker[*].app[0].startTime = 10s # Initialize start time of ping
# Master Communication
*.Master.numApps = 1 # Number of application layers on master
*.Master.app[0].typename = "PingApp" # Application type for master
# MasterDrone Mobility
*.Master.mobility.typename = "LinearMobility" # Master move at constant speed
*.Master.mobility.speed = 20mps # of 20mps
# Slave Communication
*.Slaves[*].numApps = 1 # Number of application layers on slaves
*.Slaves[*].app[0].typename = "PingApp" # Application type for slaves
*.Slaves[*].app[0].destAddr = "Master" # Set ping destination, to ensure connection with master
*.Slaves[*].app[0].startTime = replaceUnit (0.1*(parentIndex()), "s") # to avoid synchronization
#*.Slaves[*].app[0].sendInterval= 1s # Slaves send ping every 1 second
# Slave mobility
*.Slaves[*].mobility.typename = "MassMobility" # Slaves move randomly
*.Slaves[*].mobility.changeInterval = truncnormal(2s, 0.5s)
*.Slaves[*].mobility.angleDelta = normal(0deg, 30deg)
#*.Slaves[*].mobility.speed = 15mps
# Wlan Config
*.Master.wlan[*].radio.transmitter.power = 10mW # Setting up Master, slaves, attacker wlan transmit power
*.ap.wlan[*].radio.transmitter.power = 100mW
# Pcap recording
**.crcMode = "computed" # To include CRC values in capture files
**.fcsMode = "computed" # To include FCS values in capture files
**.numPcapRecorders = 1 # To include PcapRecordere module
**.pcapRecorder[*].pcapNetwork = 105 # Set PCAP files link-layer header type to 802.11
**.pcapRecorder[*].pcapFile = "results/all.pcap" # Specifying file to write traces in & enable packet capture
**.pcapRecorder[*].verbose = true # To print tcpdump-like textual information to the log (EV)
**.pcapRecorder[*].alwaysFlush = true # Record the packets even if simulation crashes
**.pcapRecorder[*].packetFilter = "ping*"
#Analysis
*.*.wlan[*].**.vector-recording = false
# Visualizer parameters
# Displaying network path activity
*.visualizer.*.numDataLinkVisualizers = 2
*.visualizer.*.numInterfaceTableVisualizers = 2
*.visualizer.*.dataLinkVisualizer[0].displayLinks = true
*.visualizer.*.dataLinkVisualizer[0].packetFilter = "*ping*"
#*.visualizer.*.physicalLinkVisualizer[*].displayLinks = true
#*.visualizer.*.interfaceTableVisualizer[*].displayInterfaceTables = true
#*.visualizer.*.interfaceTableVisualizer[0].format = "%4"
*.visualizer.*.infoVisualizer[0].modules = "*.*.app[0]"
*.visualizer.*.infoVisualizer[1].modules = "*.*.app[1]"
#*.visualizer.*.infoVisualizer[*].format = "%t"
#*.visualizer.*.statisticVisualizer[0].sourceFilter = "**.app[*]"
#*.visualizer.*.statisticVisualizer[0].signalName = "rtt"
#*.visualizer.*.statisticVisualizer[0].unit = "ms"
#*.visualizer.*.infoVisualizer[*].placementHint = "topCenter"
#*.visualizer.*.packetDropVisualizer[*].displayPacketDrops = true
#*.visualizer.*.packetDropVisualizer[*].packetFilter = "ping*"
#*.visualizer.*.packetDropVisualizer[*].labelFormat = "%n/reason: %r"
#*.visualizer.*.packetDropVisualizer[*].fadeOutTime = 3s
#edit
# Set number of Attacker
# Set number of Host
# Set Attacker Ping interval
# Set Transmittion Power of Master Slave Attacker and AP
# Set Max constrain area XYZ
sim-time-limit = 20s
pingattack.numhost = 5
pingattack.numattacker = 1
*.Attacker[*].app[0].sendInterval = 0.0001s
**.constraintAreaMaxX = 2000m
**.constraintAreaMaxY = 2000m
**.constraintAreaMaxZ = 2000m
*.Slaves[*].wlan[*].radio.transmitter.power = 100mW
*.Attacker[*].wlan[*].radio.transmitter.power = 200mW
*.Attacker[*].**.vector-recording = false
*.Slaves[*].mobility.speed = 16mps
Hi I did a DOS Attack using PingApp. I think you can refer to the source code of PingApp. The important part is to have the sendInterval be 0.0001s. Hope this helps!

Resources