Why pytorch training on CUDA works much slower than in CPU? - performance

I guess i have made something in folowing simple neural network with PyTorch, because this runs much slower with CUDA then in CPU, can you find the mistake pls. The using function like
def backward(ctx, input):
return backward_sigm(ctx, input)
seems have no real impact on preformance
import torch
import torch.nn as nn
import torch.nn.functional as f
dname = 'cuda:0'
dname = 'cpu'
device = torch.device(dname)
print(torch.version.cuda)
def forward_sigm(ctx, input):
sigm = 1 / (1 + torch.exp(-input))
ctx.save_for_backward(sigm)
return sigm
def forward_step(ctx, input):
return torch.tensor(input > 0.5, dtype = torch.float32, device = device)
def backward_sigm(ctx, grad_output):
sigm, = ctx.saved_tensors
return grad_output * sigm * (1-sigm)
def backward_step(ctx, grad_output):
return grad_output
class StepAF(torch.autograd.Function):
#staticmethod
def forward(ctx, input):
return forward_sigm(ctx, input)
#staticmethod
def backward(ctx, input):
return backward_sigm(ctx, input)
#else return grad_output
class StepNN(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(StepNN, self).__init__()
self.linear1 = torch.nn.Linear(input_size, hidden_size)
#self.linear1.cuda()
self.linear2 = torch.nn.Linear(hidden_size, output_size)
#self.linear2.cuda()
#self.StepAF = StepAF.apply
def forward(self,x):
h_line_1 = self.linear1(x)
h_thrash_1 = StepAF.apply(h_line_1)
h_line_2 = self.linear2(h_thrash_1)
output = StepAF.apply(h_line_2)
return output
inputs = torch.tensor( [[1,0,1,0],[1,0,0,1],[0,1,0,1],[0,1,1,0],[1,0,0,0],[0,0,0,1],[1,1,0,1],[0,1,0,0],], dtype = torch.float32, device = device)
expected = torch.tensor( [[1,0,0],[1,0,0],[0,1,0],[0,1,0],[1,0,0],[0,0,1],[0,1,0],[0,0,1],], dtype = torch.float32, device = device)
nn = StepNN(4,8,3)
#print(*(x for x in nn.parameters()))
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(nn.parameters(), lr=1e-3)
steps = 50000
print_steps = steps // 20
good_loss = 1e-5
for t in range(steps):
output = nn(inputs)
loss = criterion(output, expected)
if t % print_steps == 0:
print('step ',t, ', loss :' , loss.item())
if loss < good_loss:
print('step ',t, ', loss :' , loss.item())
break
optimizer.zero_grad()
loss.backward()
optimizer.step()
test = torch.tensor( [[0,1,0,1],[0,1,1,0],[1,0,1,0],[1,1,0,1],], dtype = torch.float32, device=device)
print(nn(test))

Unless you have large enough data, you won't see any performance improvement while using GPU. The problem is that GPUs use parallel processing, so unless you have large amounts of data, the CPU can process the samples almost as fast as the GPU.
As far as I can see in your example, you are using 8 samples of size (4, 1). I would imagine maybe when having over hundreds or thousands of samples, then you would see the performance improvement on a GPU. In your case, the sample size is (4, 1), and the hidden layer size is 8, so the CPU can perform the calculations fairly quickly.
There are lots of example notebooks online of people using MNIST data (it has around 60000 images for training), so you could load one in maybe Google Colab and then try training on the CPU and then on GPU and observe the training times. You could try this link for example. It uses TensorFlow instead of PyTorch but it will give you an idea of the performance improvement of a GPU.
Note : If you haven't used Google Colab before, then you need to change the runtime type (None for CPU and GPU for GPU) in the runtime menu at the top.
Also, I will post the results from this notebook here itself (look at the time mentioned in the brackets, and if you run it, you can see firsthand how fast it runs) :
On CPU :
INFO:tensorflow:loss = 294.3736, step = 1
INFO:tensorflow:loss = 28.285727, step = 101 (23.769 sec)
INFO:tensorflow:loss = 23.518856, step = 201 (24.128 sec)
On GPU :
INFO:tensorflow:loss = 295.08328, step = 0
INFO:tensorflow:loss = 47.37291, step = 100 (4.709 sec)
INFO:tensorflow:loss = 23.31364, step = 200 (4.581 sec)
INFO:tensorflow:loss = 9.980572, step = 300 (4.572 sec)
INFO:tensorflow:loss = 17.769928, step = 400 (4.560 sec)
INFO:tensorflow:loss = 16.345463, step = 500 (4.531 sec)

Related

Running Out of RAM using FilePerUserClientData

I have a problem with training using tff.simulation.FilePerUserClientData - I am quickly running out of RAM after 5-6 rounds with 10 clients per round.
The RAM usage is steadily increasing with each round.
I tried to narrow it down and realized that the issue is not the actual iterative process but the creation of the client datasets.
Simply calling create_tf_dataset_for_client(client) in a loop causes the problem.
So this is a minimal version of my code:
import tensorflow as tf
import tensorflow_federated as tff
import numpy as np
import pickle
BATCH_SIZE = 16
EPOCHS = 2
MAX_SEQUENCE_LEN = 20
NUM_ROUNDS = 100
CLIENTS_PER_ROUND = 10
def decode_fn(record_bytes):
return tf.io.parse_single_example(
record_bytes,
{"x": tf.io.FixedLenFeature([MAX_SEQUENCE_LEN], dtype=tf.string),
"y": tf.io.FixedLenFeature([MAX_SEQUENCE_LEN], dtype=tf.string)}
)
def dataset_fn(path):
return tf.data.TFRecordDataset([path]).map(decode_fn).padded_batch(BATCH_SIZE).repeat(EPOCHS)
def sample_client_data(data, client_ids, sampling_prob):
clients_total = len(client_ids)
x = np.random.uniform(size=clients_total)
sampled_ids = [client_ids[i] for i in range(clients_total) if x[i] < sampling_prob]
data = [train_data.create_tf_dataset_for_client(client) for client in sampled_ids]
return data
with open('users.pkl', 'rb') as f:
users = pickle.load(f)
train_client_ids = users["train"]
client_id_to_train_file = {i: "reddit_leaf_tf/" + i for i in train_client_ids}
train_data = tff.simulation.datasets.FilePerUserClientData(
client_ids_to_files=client_id_to_train_file,
dataset_fn=dataset_fn
)
sampling_prob = CLIENTS_PER_ROUND / len(train_client_ids)
for round_num in range(0, NUM_ROUNDS):
print('Round {r}'.format(r=round_num))
participants_data = sample_client_data(train_data, train_client_ids, sampling_prob)
print("Round Completed")
I am using tensorflow-federated 19.0.
Is there something wrong with the way I create the client datasets or is it somehow expected that the RAM from the previous round is not freed?
schmana# noticed this occurs when changing the cardinality of the CLIENTS placement (different number of client datasets) each round. This results in a cache filing up as documented in http://github.com/tensorflow/federated/issues/1215.
A workaround in the immediate term would be to call:
tff.framework.get_context_stack().current.executor_factory.clean_up_executors()
At the start or end of every round.

Statsmodels - Poisson GLM is taking more than an hour to fit the model - does not return anything

I am trying to run a poisson GL regression with statsmodel on my training dataset (of 30.000 rows/observations) on Jupyter Notebook with the following code but it has been running for more than an hour and it does not return anything yet. I tried to shut it down and starting it again and I am running it on two different laptops (8 GB RAM one and 16 GB RAM the other) but nothing works...
Can somebody please give me some suggestions on how to fix it?
code
df=pd.read_csv('gravity.csv',sep=';')
mask = np.random.rand(len(df)) < 0.8
df_train = df[mask]
df_test = df[~mask]
print('Training data set length='+str(len(df_train)))
print('Testing data set length='+str(len(df_test)))
expr = """BTI ~ GDP_mult + distwces + DAI_mult + year + contig + comlang_off + colony"""
y_train, X_train = dmatrices(expr, df_train, return_type='dataframe')
y_test, X_test = dmatrices(expr, df_test, return_type='dataframe')
poisson_training_results = sm.GLM(y_train, X_train, family=sm.families.Poisson()).fit()

How to use BertForSequenceClassification for token max_length set at 1700?

I want to perform author classification on the Reuters 50 50 dataset, where the max token length is 1600+ tokens and there are 50 classes/authors in total.
With max_length=1700 and batch_size=1, I'm getting RuntimeError: CUDA out of memory. This error can be prevented by setting max_length=512, but this has the unwanted effect of truncating the texts.
Tokenizing and encoding:
from keras.preprocessing.sequence import pad_sequences
MAX_LEN = 1700
def get_encodings(texts):
token_ids = []
attention_masks = []
for text in texts:
token_id = tokenizer.encode(text, add_special_tokens=True, max_length=MAX_LEN)
token_ids.append(token_id)
return token_ids
def pad_encodings(encodings):
return pad_sequences(encodings, maxlen=MAX_LEN, dtype="long",
value=0, truncating="post", padding="post")
def get_attention_masks(padded_encodings):
attention_masks = []
for encoding in padded_encodings:
attention_mask = [int(token_id > 0) for token_id in encoding]
attention_masks.append(attention_mask)
return attention_masks
train_encodings = get_encodings(train_df.text.values)
train_encodings = pad_encodings(train_encodings)
train_attention_masks = get_attention_masks(train_encodings)
test_encodings = get_encodings(test_df.text.values)
test_encodings = pad_encodings(test_encodings)
test_attention_masks = get_attention_masks(test_encodings)
Packing into Dataset and Dataloader:
X_train = torch.tensor(train_encodings)
y_train = torch.tensor(train_df.author_id.values)
train_masks = torch.tensor(train_attention_masks)
X_test = torch.tensor(test_encodings)
y_test = torch.tensor(test_df.author_id.values)
test_masks = torch.tensor(test_attention_masks)
batch_size = 1
# Create the DataLoader for our training set.
train_data = TensorDataset(X_train, train_masks, y_train)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
validation_data = TensorDataset(X_test, test_masks, y_test)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)
Model setup:
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
config = BertConfig.from_pretrained(
'bert-base-uncased',
num_labels = 50,
output_attentions = False,
output_hidden_states = False,
max_position_embeddings=MAX_LEN
)
model = BertForSequenceClassification(config)
model.to(device)
optimizer = AdamW(model.parameters(),
lr = 2e-5,
eps = 1e-8
)
Training:
for epoch_i in range(0, epochs):
model.train()
for step, batch in enumerate(train_dataloader):
b_texts = batch[0].to(device)
b_attention_masks = batch[1].to(device)
b_authors = batch[2].to(device)
model.zero_grad()
outputs = model(b_texts,
token_type_ids=None,
attention_mask=b_attention_masks,
labels=b_authors) <------- ERROR HERE
Error:
RuntimeError: CUDA out of memory. Tried to allocate 6.00 GiB (GPU 0; 7.93 GiB total capacity; 1.96 GiB already allocated; 5.43 GiB free; 536.50 KiB cached)
Unless you are training on a TPU, your chances are extremely low of ever having enough GPU RAM with any of the available GPUs right now.
For some BERT models, the model alone takes well above 10GB in RAM, and a doubling in sequence length beyond 512 tokens takes about that much more in memory. For reference, a Titan RTX with 24 GB GPU RAM (most of what is currently available for a single GPU), can barely fit 24 samples of 512 tokens in length at the same time.
Fortunately, most of the networks still yield a very decent performance when truncating the samples, but this is of course task-specific. Also keep in mind that - unless you are training from scratch - all of the pre-trained models are generally trained on 512 token limits. To my knowledge, the only model currently supporting longer sequences is Bart, which allows up to 1024 tokens in length.

Tensorflow dataset API slow to get data on large batch size

I find that getting one batch from tensorflow dataset API can be super slow when the batch size is large even when all data is in memory. Following is one example. Does anyone have any insight ?
FEATURE_NUM = 500
tf_X = tf.placeholder(dtype=tf.float32, shape=[None, FEATURE_NUM], name="X")
tf_Y = tf.placeholder(dtype=tf.float32, shape=[None, 1], name="Y")
batch_size = 1000000
dataset = tf.data.Dataset.from_tensor_slices((tf_X, tf_Y)).batch(batch_size)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
se = tf.Session()
se.run(tf.global_variables_initializer())
se.run(iterator.initializer, feed_dict={tf_X : numpy_array_X, tf_Y : numpy_array_Y})
while True:
data = se.run(next_element) # This takes more than 5 seconds per call

How to measure execution time for prediction per image (keras)

I have a simple model created with Keras and I need to measure the execution time for prediction per image. Right now I just do this:
start = time.clock()
my_model.predict(images_test)
end = time.clock()
print("Time per image: {} ".format((end-start)/len(images_test)))
But I noticed that the calculated time is bigger when len(images_test) is smaller. For example when len(images_test) = 32 I get: 0.06 and when len(images_test) = 1024 I get: 0.006
Is there a "right" way to do this ?
if use TF it seems no Asynchronous problem
but if use pytorch it has Asynchronous problem.
in TF:
start = time.clock()
result = my_model.predict(images_test)
end = time.clock()
in pytorch:
torch.cuda.synchronize()
start = time.clock()
my_model.predict(images_test)
torch.cuda.synchronize()
end = time.clock()
But i think you can do 10 times Loop model_predict
and print time_list
(computer need load keras model so first time load slower than other times )
in TF:
pred_time_list=[]
for i in range(10):
start = time.clock()
result = my_model.predict(images_test)
end = time.clock()
pred_time_list.append(end-start)
print(pred_time_list)
(print the pred_time_list and you may find out why the times incorrect)
Reference:
[1]
https://discuss.pytorch.org/t/doing-qr-decomposition-on-gpu-is-much-slower-than-on-cpu/21213/6
[2]
https://discuss.pytorch.org/t/is-there-any-code-torch-backends-cudnn-benchmark-torch-cuda-synchronize-similar-in-tensorflow/51484/2

Resources