Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding coherence momentum model #60

Merged
merged 20 commits into from
Sep 20, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions demo_api/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,13 @@
```
# From root folder of repository:
docker build -t <model_name> -f demo_api/<model_name>/Dockerfile demo_api/

docker run -p 8000:8000 <model_name>

E.g.
# Example: Production build
docker build -t lsr -f demo_api/lsr/Dockerfile demo_api/
docker run -p 8000:8000 lsr

# Example: Dev build
docker build -t coherence_momentum -f demo_api/coherence_momentum/dev.Dockerfile .
```

## Notes on dev vs prod build
Expand Down
46 changes: 46 additions & 0 deletions demo_api/coherence_momentum/api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
from flask import request

from demo_api.common import create_api
from sgnlp.models.coherence_momentum import (
CoherenceMomentumModel,
CoherenceMomentumConfig,
CoherenceMomentumPreprocessor
)

app = create_api(app_name=__name__, model_card_path="model_card/coherence_momentum.json")

# Load processors and models
config = CoherenceMomentumConfig.from_pretrained(
"https://storage.googleapis.com/sgnlp/models/coherence_momentum/config.json"
)
model = CoherenceMomentumModel.from_pretrained(
"https://storage.googleapis.com/sgnlp/models/coherence_momentum/pytorch_model.bin",
config=config
)

preprocessor = CoherenceMomentumPreprocessor(config.model_size, config.max_len)

app.logger.info("Model initialization complete")


@app.route("/predict", methods=["POST"])
def predict():
req_body = request.get_json()

text1 = req_body["text1"]
text2 = req_body["text2"]

text1_tensor = preprocessor([text1])
text2_tensor = preprocessor([text2])

text1_score = model.get_main_score(text1_tensor["tokenized_texts"]).item()
text2_score = model.get_main_score(text2_tensor["tokenized_texts"]).item()

return {
"text1_score": text1_score,
"text2_score": text2_score
}


if __name__ == "__main__":
app.run()
14 changes: 14 additions & 0 deletions demo_api/coherence_momentum/dev.Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
FROM python:3.8-buster

COPY ./demo_api /demo_api
COPY ./sgnlp /sgnlp
COPY ./setup.py /setup.py
COPY ./README.md /README.md

RUN pip install -r /demo_api/coherence_momentum/requirements_dev.txt

WORKDIR /demo_api/coherence_momentum

RUN python -m download_pretrained

CMD PYTHONPATH=../../ gunicorn -c ../gunicorn.conf.py
9 changes: 9 additions & 0 deletions demo_api/coherence_momentum/download_pretrained.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from sgnlp.models.coherence_momentum import CoherenceMomentumModel, CoherenceMomentumConfig

config = CoherenceMomentumConfig.from_pretrained(
"https://storage.googleapis.com/sgnlp/models/coherence_momentum/config.json"
)
model = CoherenceMomentumModel.from_pretrained(
"https://storage.googleapis.com/sgnlp/models/coherence_momentum/pytorch_model.bin",
config=config
)
36 changes: 36 additions & 0 deletions demo_api/coherence_momentum/model_card/coherence_momentum.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
{
"name": "CoherenceMomentum",
"languages": "English",
"description": "This is a neural network model that makes use of a momentum encoder and hard negative mining during training. This model is able to take in a piece of text and output a coherence score. The coherence score is only meant for comparison, i.e. it is only meaningful when used to compare between two texts, and the text with the higher coherence score is deemed to be more coherent by the model.",
"paper": {
"text": "Jwalapuram, P., Joty, S., & Lin, X. (2022). Rethinking Self-Supervision Objectives for Generalizable Coherence Modeling. Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), May 2022 (pp. 6044-6059).",
"url": "https://aclanthology.org/2022.acl-long.418/"
},
"trainingDataset": "Permuted dataset derived from Linguistic Data Consortium's (LDC) Wall Street Journal (WSJ) dataset. Please contact the authors to get the dataset if you have a valid LDC license.",
"evaluationDataset": "Permuted dataset derived from Linguistic Data Consortium's (LDC) Wall Street Journal (WSJ) dataset. Please contact the authors to get the dataset if you have a valid LDC license.",
"evaluationScores": "0.988 accuracy on permuted WSJ dataset. 0.986 accuracy reported by authors on permuted WSJ dataset.",
"trainingConfig": {
"text": "https://storage.googleapis.com/sgnlp/models/coherence_momentum/config.json",
"url": "https://storage.googleapis.com/sgnlp/models/coherence_momentum/config.json"
},
"trainingTime": "~24 hours for ~46000 steps (batch size of 1) on a single A100 GPU",
"modelWeights": {
"text": "https://storage.googleapis.com/sgnlp/models/coherence_momentum/pytorch_model.bin",
"url": "https://storage.googleapis.com/sgnlp/models/coherence_momentum/pytorch_model.bin"
},
"modelInput": "A paragraph of text. During training, each positive example can be paired with one or more negative examples.",
"modelOutput": "Coherence score for the input text.",
"modelSize": "~930MB",
"inferenceInfo": "Not available.",
"usageScenarios": "Essay scoring, summarization, language generation.",
"originalCode": {
"text": "https://github.com/ntunlp/coherence-paradigm",
"url": "https://github.com/ntunlp/coherence-paradigm"
},
"license": {
"text": "MIT License",
"url": "https://choosealicense.com/licenses/mit"
},
"contact": "[email protected]",
"additionalInfo": "Not applicable."
}
3 changes: 3 additions & 0 deletions demo_api/coherence_momentum/requirements_dev.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
-e.
flask
gunicorn
33 changes: 33 additions & 0 deletions demo_api/coherence_momentum/usage.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
from sgnlp.models.coherence_momentum import CoherenceMomentumModel, CoherenceMomentumConfig, \
CoherenceMomentumPreprocessor

config = CoherenceMomentumConfig.from_pretrained(
"https://storage.googleapis.com/sgnlp/models/coherence_momentum/config.json"
)
model = CoherenceMomentumModel.from_pretrained(
"https://storage.googleapis.com/sgnlp/models/coherence_momentum/pytorch_model.bin",
config=config
)

preprocessor = CoherenceMomentumPreprocessor(config.model_size, config.max_len)

text1 = "Companies listed below reported quarterly profit substantially different from the average of analysts ' " \
"estimates . The companies are followed by at least three analysts , and had a minimum five-cent change in " \
"actual earnings per share . Estimated and actual results involving losses are omitted . The percent " \
"difference compares actual profit with the 30-day estimate where at least three analysts have issues " \
"forecasts in the past 30 days . Otherwise , actual profit is compared with the 300-day estimate . " \
"Source : Zacks Investment Research"
text2 = "The companies are followed by at least three analysts , and had a minimum five-cent change in actual " \
"earnings per share . The percent difference compares actual profit with the 30-day estimate where at least " \
"three analysts have issues forecasts in the past 30 days . Otherwise , actual profit is compared with the " \
"300-day estimate . Source : Zacks Investment Research. Companies listed below reported quarterly profit " \
"substantially different from the average of analysts ' estimates . Estimated and actual results involving " \
"losses are omitted ."

text1_tensor = preprocessor([text1])
text2_tensor = preprocessor([text2])

text1_score = model.get_main_score(text1_tensor["tokenized_texts"]).item()
text2_score = model.get_main_score(text2_tensor["tokenized_texts"]).item()

print(text1_score, text2_score)
3 changes: 2 additions & 1 deletion demo_api/lif_3way_ap/model_card/lif_3way_ap.json
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@
},
"evaluationScores": "0.745 F1 on test_i dataset. 0.75 F1 reported by authors in paper on test_i dataset.",
"trainingConfig": {
"text": "https://storage.googleapis.com/sgnlp/models/lif_3way_ap/config.json"
"text": "https://storage.googleapis.com/sgnlp/models/lif_3way_ap/config.json",
"url": "https://storage.googleapis.com/sgnlp/models/lif_3way_ap/config.json"
},
"trainingTime": "~12 hours for 13 epochs on a single V100 GPU.",
"modelWeights": {
Expand Down
32 changes: 32 additions & 0 deletions jsonnet/demo-api.jsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,24 @@ local build_and_push_staging(module_name, image_name) = {
],
};

local build_and_push_dev_staging(module_name, image_name) = {
image: "registry.aisingapore.net/sg-nlp/sg-nlp-runner:latest",
stage: "build_and_push_staging",
tags: [
"on-prem",
"dind",
],
when: "manual",
script: [
"echo 'Logging in to AISG Docker Registry...'",
"echo $STG_REGISTRY_PASSWORD | docker login registry.aisingapore.net -u $STG_DOCKER_USER --password-stdin",
"echo 'Building and pushing image...'",
"docker build --no-cache -t %s -f demo_api/%s/dev.Dockerfile ." % [module_name, module_name],
"docker tag %s registry.aisingapore.net/sg-nlp/%s:latest" % [module_name, image_name],
"docker push registry.aisingapore.net/sg-nlp/%s:latest" % image_name,
],
};

local build_and_push_docs_staging() = {
image: "python:3.8.11-slim",
stage: "build_and_push_staging",
Expand Down Expand Up @@ -154,6 +172,15 @@ local api_names = {
}
};

// To deploy dev builds into production (for beta public testing)
local dev_api_names = {
"coherence_momentum": {
module_name: "coherence_momentum",
image_name: "coherence-momentum",
deployment_name: "coherence-momentum"
}
};

{
"stages": [
"build_and_push_staging",
Expand All @@ -166,6 +193,11 @@ local api_names = {
[api_names[key]["module_name"] + "_build_and_push_staging"]:
build_and_push_staging(api_names[key]["module_name"], api_names[key]["image_name"])
for key in std.objectFields(api_names)
} + {
// Build and push dev staging
[dev_api_names[key]["module_name"] + "_build_and_push_dev_staging"]:
build_and_push_dev_staging(dev_api_names[key]["module_name"], dev_api_names[key]["image_name"])
for key in std.objectFields(dev_api_names)
} + {
// Restart kubernetes staging
[api_names[key]["module_name"] + "_restart_kubernetes_staging"]:
Expand Down
5 changes: 5 additions & 0 deletions jsonnet/dev-demo-api.jsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,11 @@ local api_names = {
module_name: "ufd",
image_name: "ufd",
deployment_name: "ufd"
},
"coherence_momentum": {
module_name: "coherence_momentum",
image_name: "coherence-momentum",
deployment_name: "coherence-momentum"
}
};

Expand Down
15 changes: 15 additions & 0 deletions polyaxon/coherence_momentum/model-training.Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
FROM pytorch/pytorch:1.11.0-cuda11.3-cudnn8-devel

ARG REPO_DIR="."
ARG PROJECT_USER="aisg"
ARG HOME_DIR="/home/$PROJECT_USER"

COPY $REPO_DIR nlp-hub-gcp
WORKDIR $REPO_DIR/nlp-hub-gcp

RUN pip install -r polyaxon/coherence_momentum/requirements.txt
RUN groupadd -g 2222 $PROJECT_USER && useradd -u 2222 -g 2222 -m $PROJECT_USER
RUN chown -R 2222:2222 $HOME_DIR && \
rm /bin/sh && ln -s /bin/bash /bin/sh
USER 2222

62 changes: 62 additions & 0 deletions polyaxon/coherence_momentum/polyaxon-experiment-nomig.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
version: 1.1
kind: component
name: train-model
description: Job for training a predictive model using GPU.
tags: [model_training]
inputs:
- name: SA_CRED_PATH
description: Path to credential file for GCP service account.
isOptional: true
type: str
value: /var/secret/cloud.google.com/gcp-service-account.json
toEnv: GOOGLE_APPLICATION_CREDENTIALS
- name: WORKING_DIR
description: The working directory for the job to run in.
isOptional: true
value: /home/aisg/nlp-hub-gcp
type: str
- name: TRAIN_CONFIG_FILE_PATH
description: Config file path.
type: str
isOptional: false
- name: MODEL_CONFIG_FILE_PATH
description: Config file path.
type: str
isOptional: false
run:
kind: job
connections: [fstore-pvc]
environment:
imagePullSecrets: ["gcp-imagepullsecrets"]
tolerations:
- effect: NoSchedule
key: nvidia.com/gpu
operator: Equal
value: present
- effect: NoSchedule
key: nomig
operator: Equal
value: present
volumes:
- name: gcp-service-account
secret:
secretName: "gcp-sa-credentials"
container:
image: asia.gcr.io/nlp-hub/coherence-paradigm-refactored:0.6
imagePullPolicy: IfNotPresent
workingDir: "{{ WORKING_DIR }}"
command: ["/bin/bash","-c"]
args: [
"python -m sgnlp.models.coherence_momentum.train \
--train_config_file {{ TRAIN_CONFIG_FILE_PATH }} \
--model_config_file {{ MODEL_CONFIG_FILE_PATH }}
"
]
resources:
requests:
nvidia.com/gpu: 1
limits:
nvidia.com/gpu: 1
volumeMounts:
- name: gcp-service-account
mountPath: /var/secret/cloud.google.com
21 changes: 21 additions & 0 deletions polyaxon/coherence_momentum/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
pandas==1.1.5
mlflow==1.22.0
protobuf==3.20.*
pylint==2.6.0
pytest-cov==2.10.1
pyyaml==5.4.1
python-json-logger==2.0.2
polyaxon==1.11.3
google-cloud-storage==1.43.0
hydra-core==1.1.1
hydra-optuna-sweeper==1.1.1
optuna==2.10.0
fastapi==0.70.1
uvicorn[standard]==0.14.0
gunicorn==20.1.0
nltk
scikit-learn
torchtext
transformers
sentencepiece
-e .
3 changes: 3 additions & 0 deletions sgnlp/models/coherence_momentum/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from .modeling import CoherenceMomentumModel
from .config import CoherenceMomentumConfig
from .preprocess import CoherenceMomentumPreprocessor
26 changes: 26 additions & 0 deletions sgnlp/models/coherence_momentum/config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
from transformers import PretrainedConfig


class CoherenceMomentumConfig(PretrainedConfig):
def __init__(
self,
model_size: str = "base",
margin: float = 0.1,
num_negs: int = 5,
max_len: int = 600,
num_rank_negs: int = 50,
momentum_coefficient: float = 0.9999999,
queue_size: int = 1000,
contrastive_loss_weight: float = 0.85,
**kwargs
):
super().__init__(**kwargs)

self.model_size = model_size
self.margin = margin
self.num_negs = num_negs
self.max_len = max_len
self.num_rank_negs = num_rank_negs
self.momentum_coefficient = momentum_coefficient
self.queue_size = queue_size
self.contrastive_loss_weight = contrastive_loss_weight
10 changes: 10 additions & 0 deletions sgnlp/models/coherence_momentum/model_config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
{
"contrastive_loss_weight": 0.85,
"margin": 0.1,
"max_len": 600,
"model_size": "base",
"momentum_coefficient": 0.9999999,
"num_negs": 5,
"num_rank_negs": 50,
"queue_size": 1000
}
Loading