Using Frozen Parameters Bert for Sentiment Classification¶
In this example, we will construct a text classifier with parameter frozen bert, and train in on the IMDB sentimental classifcation dataset
Dataset: IMDB Sentimental¶
This is an binary classification dataset, you can download our processed dataset from here:
- https://webank-ai-1251170195.cos.ap-guangzhou.myqcloud.com/fate/examples/data/IMDB.csv and place it in the examples/data folder.
The orgin data is from:
Check dataset¶
In [9]:
Copied!
import pandas as pd
df = pd.read_csv('../../../../examples/data/IMDB.csv')
import pandas as pd
df = pd.read_csv('../../../../examples/data/IMDB.csv')
In [10]:
Copied!
df
df
Out[10]:
id | text | label | |
---|---|---|---|
0 | 0 | One of the other reviewers has mentioned that ... | 1 |
1 | 1 | A wonderful little production. <br /><br />The... | 1 |
2 | 2 | I thought this was a wonderful way to spend ti... | 1 |
3 | 3 | Basically there's a family where a little boy ... | 0 |
4 | 4 | Petter Mattei's "Love in the Time of Money" is... | 1 |
... | ... | ... | ... |
1996 | 1996 | THE CELL (2000) Rating: 8/10<br /><br />The Ce... | 1 |
1997 | 1997 | This movie, despite its list of B, C, and D li... | 0 |
1998 | 1998 | I loved this movie! It was all I could do not ... | 1 |
1999 | 1999 | This was the worst movie I have ever seen Bill... | 0 |
2000 | 2000 | Stranded in Space (1972) MST3K version - a ver... | 0 |
2001 rows × 3 columns
In [11]:
Copied!
from federatedml.nn.dataset.nlp_tokenizer import TokenizerDataset
from federatedml.nn.dataset.nlp_tokenizer import TokenizerDataset
2022-12-25 23:19:45.537897: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory 2022-12-25 23:19:45.537936: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
In [13]:
Copied!
ds = TokenizerDataset(tokenizer_name_or_path="bert-base-uncased")
ds.load('../../../../examples/data/IMDB.csv')
ds = TokenizerDataset(tokenizer_name_or_path="bert-base-uncased")
ds.load('../../../../examples/data/IMDB.csv')
In [14]:
Copied!
from torch.utils.data import DataLoader
dl = DataLoader(ds, batch_size=16)
for i in dl:
break
from torch.utils.data import DataLoader
dl = DataLoader(ds, batch_size=16)
for i in dl:
break
Build A Bert Classifier¶
In [15]:
Copied!
from pipeline.component.nn import save_to_fate
from pipeline.component.nn import save_to_fate
In [19]:
Copied!
%%save_to_fate model bert_.py
import torch as t
from federatedml.nn.model_zoo.pretrained_bert import PretrainedBert
class BertClassifier(t.nn.Module):
def __init__(self, ):
super(BertClassifier, self).__init__()
self.bert = PretrainedBert(pretrained_model_name_or_path='bert-base-uncased', freeze_weight=True)
self.classifier = t.nn.Sequential(
t.nn.Linear(768, 128),
t.nn.ReLU(),
t.nn.Linear(128, 64),
t.nn.ReLU(),
t.nn.Linear(64, 1),
t.nn.Sigmoid()
)
def parameters(self, ):
return self.classifier.parameters()
def forward(self, x):
x = self.bert(x)
return self.classifier(x.pooler_output)
%%save_to_fate model bert_.py
import torch as t
from federatedml.nn.model_zoo.pretrained_bert import PretrainedBert
class BertClassifier(t.nn.Module):
def __init__(self, ):
super(BertClassifier, self).__init__()
self.bert = PretrainedBert(pretrained_model_name_or_path='bert-base-uncased', freeze_weight=True)
self.classifier = t.nn.Sequential(
t.nn.Linear(768, 128),
t.nn.ReLU(),
t.nn.Linear(128, 64),
t.nn.ReLU(),
t.nn.Linear(64, 1),
t.nn.Sigmoid()
)
def parameters(self, ):
return self.classifier.parameters()
def forward(self, x):
x = self.bert(x)
return self.classifier(x.pooler_output)
In [1]:
Copied!
model = BertClassifier()
model = BertClassifier()
In [21]:
Copied!
import torch as t
from federatedml.nn.homo.trainer.fedavg_trainer import FedAVGTrainer
trainer = FedAVGTrainer(epochs=3, batch_size=16, shuffle=True, data_loader_worker=4)
trainer.local_mode()
trainer.set_model(model)
import torch as t
from federatedml.nn.homo.trainer.fedavg_trainer import FedAVGTrainer
trainer = FedAVGTrainer(epochs=3, batch_size=16, shuffle=True, data_loader_worker=4)
trainer.local_mode()
trainer.set_model(model)
In [22]:
Copied!
opt = t.optim.Adam(model.parameters(), lr=0.005)
loss = t.nn.BCELoss()
# local test
trainer.train(ds, None, opt, loss)
opt = t.optim.Adam(model.parameters(), lr=0.005)
loss = t.nn.BCELoss()
# local test
trainer.train(ds, None, opt, loss)
epoch is 0 100%|██████████| 126/126 [01:21<00:00, 1.55it/s] epoch loss is 0.6995822169195706 epoch is 1 100%|██████████| 126/126 [01:17<00:00, 1.63it/s] epoch loss is 0.6738948538445163 epoch is 2 100%|██████████| 126/126 [01:16<00:00, 1.64it/s] epoch loss is 0.6501996349180299
Submit a pipeline¶
In [28]:
Copied!
import torch as t
from torch import nn
from pipeline import fate_torch_hook
from pipeline.component import HomoNN
from pipeline.backend.pipeline import PipeLine
from pipeline.component import Reader, Evaluation, DataTransform
from pipeline.interface import Data, Model
fate_torch_hook(t)
import os
fate_project_path = os.path.abspath('../../../../')
guest_0 = 10000
host_1 = 9999
pipeline = PipeLine().set_initiator(role='guest', party_id=guest_0).set_roles(guest=guest_0, host=host_1,
arbiter=guest_0)
data_0 = {"name": "imdb", "namespace": "experiment"}
data_path = fate_project_path + '/examples/data/IMDB.csv'
pipeline.bind_table(name=data_0['name'], namespace=data_0['namespace'], path=data_path)
pipeline.bind_table(name=data_0['name'], namespace=data_0['namespace'], path=data_path)
import torch as t
from torch import nn
from pipeline import fate_torch_hook
from pipeline.component import HomoNN
from pipeline.backend.pipeline import PipeLine
from pipeline.component import Reader, Evaluation, DataTransform
from pipeline.interface import Data, Model
fate_torch_hook(t)
import os
fate_project_path = os.path.abspath('../../../../')
guest_0 = 10000
host_1 = 9999
pipeline = PipeLine().set_initiator(role='guest', party_id=guest_0).set_roles(guest=guest_0, host=host_1,
arbiter=guest_0)
data_0 = {"name": "imdb", "namespace": "experiment"}
data_path = fate_project_path + '/examples/data/IMDB.csv'
pipeline.bind_table(name=data_0['name'], namespace=data_0['namespace'], path=data_path)
pipeline.bind_table(name=data_0['name'], namespace=data_0['namespace'], path=data_path)
Out[28]:
{'namespace': 'experiment', 'table_name': 'imdb'}
In [29]:
Copied!
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest_0).component_param(table=data_0)
reader_0.get_party_instance(role='host', party_id=host_1).component_param(table=data_0)
reader_1 = Reader(name="reader_1")
reader_1.get_party_instance(role='guest', party_id=guest_0).component_param(table=data_0)
reader_1.get_party_instance(role='host', party_id=host_1).component_param(table=data_0)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest_0).component_param(table=data_0)
reader_0.get_party_instance(role='host', party_id=host_1).component_param(table=data_0)
reader_1 = Reader(name="reader_1")
reader_1.get_party_instance(role='guest', party_id=guest_0).component_param(table=data_0)
reader_1.get_party_instance(role='host', party_id=host_1).component_param(table=data_0)
In [30]:
Copied!
from pipeline.component.homo_nn import DatasetParam, TrainerParam
model = t.nn.Sequential(
t.nn.CustModel(module_name='bert_', class_name='BertClassifier')
)
nn_component = HomoNN(name='nn_0',
model=model,
loss=t.nn.BCELoss(),
optimizer = t.optim.Adam(lr=0.001, weight_decay=0.001),
dataset=DatasetParam(dataset_name='nlp_tokenizer', tokenizer_name_or_path="bert-base-uncased"), # 使用自定义的dataset
trainer=TrainerParam(trainer_name='fedavg_trainer', epochs=2, batch_size=16, data_loader_worker=8, cuda=True),
torch_seed=100
)
from pipeline.component.homo_nn import DatasetParam, TrainerParam
model = t.nn.Sequential(
t.nn.CustModel(module_name='bert_', class_name='BertClassifier')
)
nn_component = HomoNN(name='nn_0',
model=model,
loss=t.nn.BCELoss(),
optimizer = t.optim.Adam(lr=0.001, weight_decay=0.001),
dataset=DatasetParam(dataset_name='nlp_tokenizer', tokenizer_name_or_path="bert-base-uncased"), # 使用自定义的dataset
trainer=TrainerParam(trainer_name='fedavg_trainer', epochs=2, batch_size=16, data_loader_worker=8, cuda=True),
torch_seed=100
)
In [ ]:
Copied!
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(nn_component, data=Data(train_data=reader_0.output.data, validate_data=reader_1.output.data))
pipeline.add_component(Evaluation(name='eval_0', eval_type='binary'), data=Data(data=nn_component.output.data))
pipeline.compile()
pipeline.fit()
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(nn_component, data=Data(train_data=reader_0.output.data, validate_data=reader_1.output.data))
pipeline.add_component(Evaluation(name='eval_0', eval_type='binary'), data=Data(data=nn_component.output.data))
pipeline.compile()
pipeline.fit()
Last update:
2023-04-07