Commit 5f575824 authored by Gaëtan Caillaut's avatar Gaëtan Caillaut
Browse files

Packaging

parent 5ea7e59c
from train_semeval import *
from examples.train_semeval import *
import argparse
import torch
from collections import Counter
......
from train_semeval import *
from examples.train_semeval import *
import argparse
import sys
......
import setuptools
with open("README.md", "rt", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="minibert-pkg-gcaillaut",
version="0.1.0",
author="Gaëtan Caillaut",
author_email="gaetan.caillaut@univ-lemans.fr",
description="A simplified implementation of BERT",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://git-lium.univ-lemans.fr/gcaillaut/minibert",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved",
"Operating System :: OS Independent",
],
python_requires='>=3.8',
)
import unittest
import torch
from torch.nn import functional as F
from math import sqrt
from minibert import Attention
class TestAttention(unittest.TestCase):
def test_attention_given_matrix(self):
k = torch.tensor([[0, 0.5], [1, 0], [0.5, 0.5]], dtype=torch.float)
q = torch.tensor([[0, 0.5], [0, 0], [0.5, 0.5]], dtype=torch.float)
v = torch.tensor([[0.5, 0.5], [1, 0.5], [1, 1]], dtype=torch.float)
attention = Attention.from_weights(k, q, v)
x = torch.tensor(
[[1, 0, 1], [1, 1, 1], [0, 0, 1]], dtype=torch.float)
xk = torch.tensor([[0.5, 1], [1.5, 1], [0.5, 0.5]], dtype=torch.float)
xq = torch.tensor([[0.5, 1], [0.5, 1], [0.5, 0.5]], dtype=torch.float)
xv = torch.tensor([[1.5, 1.5], [2.5, 2], [1, 1]], dtype=torch.float)
x_qk = torch.matmul(xq, xk.t()) / sqrt(2)
expected = torch.matmul(F.softmax(x_qk, dim=1), xv)
actual = attention(x)
self.assertTrue(torch.equal(expected, actual))
def test_attention_given_batch(self):
k = torch.tensor([[0, 0.5], [1, 0], [0.5, 0.5]], dtype=torch.float)
q = torch.tensor([[0, 0.5], [0, 0], [0.5, 0.5]], dtype=torch.float)
v = torch.tensor([[0.5, 0.5], [1, 0.5], [1, 1]], dtype=torch.float)
attention = Attention.from_weights(k, q, v)
x = torch.tensor(
[[1, 0, 1], [1, 1, 1], [0, 0, 1]], dtype=torch.float)
batch = torch.stack([x, x, x])
xk = torch.tensor([[0.5, 1], [1.5, 1], [0.5, 0.5]], dtype=torch.float)
xq = torch.tensor([[0.5, 1], [0.5, 1], [0.5, 0.5]], dtype=torch.float)
xv = torch.tensor([[1.5, 1.5], [2.5, 2], [1, 1]], dtype=torch.float)
x_qk = torch.matmul(xq, xk.t()) / sqrt(2)
expected = torch.matmul(F.softmax(x_qk, dim=1), xv)
expected = torch.stack([expected, expected, expected])
actual = attention(batch)
self.assertTrue(torch.equal(expected, actual))
if __name__ == '__main__':
unittest.main()
import unittest
import torch
from minibert import MiniBert
class TestMiniBert(unittest.TestCase):
def test_minibert_not_fail(self):
minibert = MiniBert(10, 10, 10)
x = torch.tensor([
[0, 1, 3, 4],
[0, 1, 3, 4],
[0, 1, 3, 4]
])
out = minibert(x)
if __name__ == '__main__':
unittest.main()
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment