AttributeError: 'Series' object has no attribute 'lower'? - k-fold

"this is a code, folds are created but problem is with fit function"
"this is a code, folds are created but problem is with fit function"
"this is a code, folds are created but problem is with fit function"
data = pd.read_csv('Augsynonym.csv')
print(data)
txt= data['Text']
sent = data['Sentiment']
kf =KFold(n_splits=5)
model = LogisticRegression(solver= 'liblinear')
vectorizer = CountVectorizer()
acc_score = []
Xtrain=[]
xtest=[]
for train_set, test_set in kf.split(txt):
print(train_set, len(train_set))
print(test_set, len(test_set))
X_train , X_test = txt.iloc[train_set],txt.iloc[test_set]
y_train , y_test = sent[train_set] , sent[test_set]
Xtrain.append(X_train)
xtest.append(X_test)
xtrain = vectorizer.fit_transform(Xtrain)`enter code here`
testx = vectorizer.fit_transform(xtest)
model.fit(xtrain,y_train)
pred_values = model.predict(testx)
acc = accuracy_score(pred_values , y_test)
acc_score.append(acc)

Related

User warning when I use more than one gpu with trainer function

I am doing classification text and for the training of the model I am using trainer function from HuggingFace, the code is:
def get_model(name_model):
model = AutoModelForSequenceClassification.from_pretrained(
name_model,
num_labels=2,
problem_type = "single_label_classification"
)
return model
model = get_model(name_model)
training_args = TrainingArguments(
learning_rate = 3e-5,
max_grad_norm = 1.0,
#weight_decay = 0.01,
num_train_epochs = 3,
per_device_train_batch_size = 32,
per_device_eval_batch_size = 1,
logging_steps = 300,
output_dir = "./training_output",
overwrite_output_dir = True,
seed =42,
fp16=True,
remove_unused_columns = False
)
trainer = Trainer(
model = model,
args = training_args,
train_dataset = train
)
trainer.args._n_gpu = 2
So, when it finish to train the model (which is BERT model) it says
I am afraid that the model is not correctly trained and that predictions that made are not okay.
Do you know how to fix this?, with only one gpu the are not warnings.
I tried to set fp16=True because I read in another forum that it could help, and I tried to set is_model_parallel= True but I didn't fix it. I tried too to set place_model_on_device = True too but did not work.

how to speed up deepface or use multiple cores?

I am using the deep face library to analyze pictures and predict race and gender for a social studies project. I am running the deep face library on my university grid with 250GB of working memory. It still is taking too long to analyze one picture. At the moment about 1.2 seconds... Any ideas are appreciated. Here is the code I am using at the moment.
# This is the code I am using. The input is a folder containing three pictures of each person.
def ensemble_method(folder):
try:
obj = DeepFace.analyze(img_path = folder[0], actions = ['gender', 'race'])
except ValueError:
obj = {'gender': np.NaN, 'dominant_race': np.NaN}
try:
obj_2 = DeepFace.analyze(img_path = folder[1], actions = ['gender', 'race'])
except ValueError:
obj_2 = {'gender': np.NaN, 'dominant_race': np.NaN}
try:
obj_3 = DeepFace.analyze(img_path = folder[2], actions = ['gender', 'race'])
except ValueError:
obj_3 = {'gender': np.NaN, 'dominant_race': np.NaN}
index_1 = folder[0].rfind('/')
image1 = folder[0][index_1+1:-4]
index_2 = folder[1].rfind('/')
image2 = folder[1][index_2+1:-4]
index_3 = folder[2].rfind('/')
image3 = folder[2][index_3+1:-4]
result_1 = {'img_path': image1, 'gender': obj['gender'], 'dominant_race': obj["dominant_race"]}
result_2 = {'img_path': image2, 'gender': obj_2['gender'], 'dominant_race': obj_2["dominant_race"]}
result_3 = {'img_path': image3, 'gender': obj_3['gender'], 'dominant_race': obj_3['dominant_race']}
results = pd.DataFrame([result_1, result_2, result_3])
print(results)
name = results.iloc[0,0][:-2].replace('_', ' ')
if results['gender'].isna().all() == True:
final_result = {'name': name, 'gender': np.NaN , 'race': np.NaN}
else:
final_result = {'name': name, 'gender': results['gender'].mode()[0] , 'race': results['dominant_race'].mode()[0]}
return final_result
I tried finding whether deep face allows multiple CPUs but did not see how to use it..

Properly evaluate a test dataset

I trained a machine translation model using huggingface library:
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
result = {"bleu": result["score"]}
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 4) for k, v in result.items()}
return result
trainer = Seq2SeqTrainer(
model,
args,
train_dataset=tokenized_datasets['train'],
eval_dataset=tokenized_datasets['test'],
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics
)
trainer.train()
model_dir = './models/'
trainer.save_model(model_dir)
The code above is taken from this Google Colab notebook. After the training, I can see the trained model is saved to the folder models and the metric is calculated. Now I want to load the trained model and do the prediction on a new dataset, here is what I tried:
dataset = load_dataset('csv', data_files='data/training_data.csv')
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
# Tokenize the test dataset
tokenized_datasets = train_test.map(preprocess_function_v2, batched=True)
test_dataset = tokenized_datasets['test']
model = AutoModelForSeq2SeqLM.from_pretrained('models')
model(test_dataset)
It threw the following error:
*** AttributeError: 'Dataset' object has no attribute 'size'
I tried the evaluate() function as well, but it said:
*** torch.nn.modules.module.ModuleAttributeError: 'MarianMTModel' object has no attribute 'evaluate'
And the function eval only prints the configuration of the model.
What is the proper way to evaluate the performance of the trained model on a new dataset?
Turned out that the prediction can be produced using the following code:
inputs = tokenizer(
questions,
max_length=max_input_length,
truncation=True,
return_tensors='pt',
padding=True).to('cuda')
translation = model.generate(**inputs)

[XAI for transformer custom model using AllenNLP]

I have been solving the NER problem for a Vietnamese dataset with 15 tags in IO format. I have been using the AllenNLP Interpret Toolkit for my model, but I can not configure it completely.
I have used a pre-trained language model "xlm-roberta-base" based-on HuggingFace. I have concatenated 4 last bert layers, and pass through to linear layer. The model architecture you can see in the source below.
class BaseBertSoftmax(nn.Module):
def __init__(self, model, drop_out , num_labels):
super(BaseBertSoftmax, self).__init__()
self.num_labels = num_labels
self.model = model
self.dropout = nn.Dropout(drop_out)
self.classifier = nn.Linear(4*768, num_labels) # 4 last of layer
def forward_custom(self, input_ids, attention_mask=None,
labels=None, head_mask=None):
outputs = self.model(input_ids = input_ids, attention_mask=attention_mask)
sequence_output = torch.cat((outputs[1][-1], outputs[1][-2], outputs[1][-3], outputs[1][-4]),-1)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output) # bsz, seq_len, num_labels
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = nn.CrossEntropyLoss(ignore_index=0)
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs #scores, (hidden_states), (attentions)
What steps do I have to take to integrate this model to AllenNLP Interpret?
Could you please help me with this problem?

Tensorflow 2.0 ImageAugmentation using tf.keras.preprocessing.image.ImageDataGenerator and tf.datasets: model.fit() is running infinitely

I am facing issue while running the fit() function in TensorFlow with augmented images(using ImageDataGenerator) passed as a dataset. The fit() function is running infinitely without stopping.
I tried it with the default code which was shared in Tensorflow documentation.
Please find the code snippet below:
train_data_generator = ImageDataGenerator(
rotation_range=20,
shear_range=0.5,
zoom_range=0.4,
rescale=1./255,
vertical_flip=True,
validation_split=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
test_data_generator = ImageDataGenerator(rescale=1./255)
ftrain_generator = train_data_generator.flow(
X_train,
y_train,
batch_size=batch_size,
shuffle=True)
ftrain_generator_ds = tf.data.Dataset.from_generator(lambda : ftrain_generator,
output_types=(tf.float32, tf.float32),
output_shapes = ([batch_size, img_rows, img_cols, num_channel],[batch_size, num_classes]))
ftest_generator = test_data_generator.flow(
X_test,
y_test,
batch_size=batch_size,
shuffle=False)
ftest_generator_ds = tf.data.Dataset.from_generator(lambda : ftest_generator,
output_types=(tf.float32, tf.float32),
output_shapes = ([batch_size, img_rows, img_cols, num_channel],[batch_size, num_classes]))
ftrain_generator_ds = ftrain_generator_ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
ftest_generator_ds = ftest_generator_ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
model2.fit(ftrain_generator, epochs = num_epoch, validation_data=ftest_generator)

Resources