Add: infogan python interface

This commit is contained in:
Liyan Zheng 2023-04-14 16:36:55 +08:00
parent f14edcd52f
commit 307614d95d
5 changed files with 91 additions and 13 deletions

View File

@ -46,10 +46,13 @@ class RandomGenerator : public DataGenerator {
std::mt19937 e;
std::uniform_int_distribution<int> di;
std::uniform_real_distribution<float> dr;
bool generateInteger;
public:
RandomGenerator(double l = 0, double r = 1, unsigned int seed = 0)
: l(l), r(r), e(seed), di(l, r), dr(l, r) {}
RandomGenerator(double l = 0, double r = 1, unsigned int seed = 0,
bool generateInteger = false)
: l(l), r(r), e(seed), di(l, r), dr(l, r),
generateInteger(generateInteger) {}
virtual ~RandomGenerator() {}
private:
@ -60,7 +63,7 @@ class RandomGenerator : public DataGenerator {
}
void fill(float *data, size_t size) override {
for (size_t i = 0; i < size; i++) {
data[i] = dr(e);
data[i] = (generateInteger) ? di(e) : dr(e);
}
}
};

View File

@ -242,7 +242,7 @@ class MemboundTVMPackedFunction : public Kernel {
int status = -1;
status =
write(fdP2C[1], serializedArgs.data(), serializedArgs.size());
IT_ASSERT(status == serializedArgs.size(),
IT_ASSERT((size_t)status == serializedArgs.size(),
"Failed to write to pipe");
close(fdP2C[1]);
@ -255,7 +255,6 @@ class MemboundTVMPackedFunction : public Kernel {
// Read from pipe
FILE *stream;
int c;
stream = fdopen(fdC2P[0], "r");
char buf_read[257] = {0};
status = std::fscanf(stream, "%256c", buf_read);

View File

@ -12,7 +12,7 @@
namespace infini {
// NHWC format
Graph getInfoGAN(int batch, Runtime runtime) {
Graph getInfoGAN(int batch, Runtime runtime, int nLayers) {
Graph g = make_ref<GraphObj>(runtime);
vector<Tensor> weights;
vector<tuple<int, int, int, int>> cs{
@ -21,7 +21,8 @@ Graph getInfoGAN(int batch, Runtime runtime) {
{64, 4, 1, 2}, {32, 4, 1, 2},
};
Tensor input = g->addTensor({batch, 1, 1, 228});
for (auto [channel, kernelSize, pad, stride] : cs) {
for (int i = 0; i < (int)cs.size() && i < nLayers; ++i) {
auto [channel, kernelSize, pad, stride] = cs[i];
int f = input->getDims()[3]; // n, h, w, f
auto weight =
g->addTensor({f, kernelSize, kernelSize, channel}); // f, r, s, c
@ -42,13 +43,12 @@ void printGraph(Graph g) {
}
}
vector<Tensor> runInfoGAN() {
const bool useMutatorDirectly = true;
vector<Tensor> runInfoGAN(int nLayers) {
Runtime cuda = make_ref<CudaRuntimeObj>();
Runtime cpu = NativeCpuRuntimeObj::getInstance();
Graph gCpu = make_ref<GraphObj>(cpu);
Graph g = getInfoGAN(1, cuda);
Graph g = getInfoGAN(1, cuda, nLayers);
auto mutator =
make_ref<NMutator>(NMutator::Mode::RuleBased,
@ -70,7 +70,8 @@ vector<Tensor> runInfoGAN() {
fuidToInputTensor[t->getFuid()] = t;
}
auto gen = RandomGenerator(-1, 1, 0);
auto gen = RandomGenerator(-0.1, 0.1, 0);
// auto gen = RandomGenerator(-5, 5, 0, true);
for (auto t : g->getInputs()) {
t->setData(gen);
}

View File

@ -0,0 +1,74 @@
import backend
import onnx
import torch
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
def to_pytorch_tensor(tensor) -> torch.Tensor:
data = tensor.copyout_float()
tt = torch.tensor(data)
return tt.reshape(tensor.shape())
def run_InfoGAN(n_layers: int):
if_tensors = backend.runInfoGAN(n_layers)
tensors = [to_pytorch_tensor(t) for t in if_tensors]
return tensors
def read_and_check():
for n_layers in range(1, 6):
ans = torch.load(f'torch_{n_layers}layers_0.pt')
x = torch.load(f'torch_{n_layers}layers_1.pt')
print(f'=== {n_layers} layers ===')
print(x.abs().max())
def run():
data = []
for n_layers in range(1, 6):
tensors = run_InfoGAN(n_layers)
for i, t in enumerate(tensors):
torch.save(t, f'torch_{n_layers}layers_{i}.pt')
print(f'============ {n_layers} layers = = =')
ans, x = tensors
print(f'Allclose {torch.allclose(ans, x)}')
# Print error numbers
tot = np.product(ans.shape)
data.append([])
for i in range(0, 10):
tol = 10**(-i)
clo = torch.isclose(ans, x, atol=tol, rtol=tol).sum().item()
print(f'0.1^{i} close: {clo}/{tot} = {clo/tot}')
data[-1].append(clo/tot)
rel_err = torch.abs((ans-x)/ans)
print(rel_err, rel_err.max())
print(f'ans = {ans}')
print(f'x = {x}')
# # Plot CDF
# fig, axes = plt.subplots(9,1)
# print(axes)
# for i, ax in enumerate(axes):
# print(i)
# ax:plt.Axes
# ax.hist(torch.flatten(rel_err), density=True, cumulative=True, label='CDF',
# histtype='step', alpha=0.8, color='k')
# ax.set_xlim(0, 10**(-i))
# # ax.set_title('')
# plt.show()
# plt.savefig('a.pdf')
df = pd.DataFrame(data)
print(df.to_string())
df.set_axis([f'0.1^{i}' for i in range(0, 10)], axis=1, inplace=True)
print(df.to_string())
df.to_csv('a.csv')
if __name__ == "__main__":
run()
# read_and_check()

View File

@ -56,7 +56,7 @@ TEST(Mutator, NaiveConvWithInterpreter) {
// FIXME: failed since implicit transpose for DLT
TEST(Mutator, InfoGAN_TConv_3_correctness) {
const bool useMutatorDirectly = true;
const bool useMutatorDirectly = false;
Runtime runtime = make_ref<CudaRuntimeObj>();
Graph g = make_ref<GraphObj>(runtime);
Runtime cpu = NativeCpuRuntimeObj::getInstance(); // CPUruntime is singleton
@ -91,12 +91,13 @@ TEST(Mutator, InfoGAN_TConv_3_correctness) {
fuidToInputTensor[t->getFuid()] = t;
}
std::cout << "# bestGraphs = " << bestGraphs.size() << std::endl;
for (size_t i = 0; i < bestGraphs.size(); i++) {
auto bestGraphCpu = bestGraphs[i];
auto bestGraph =
make_ref<GraphObj>(runtime, bestGraphCpu->getOperators());
auto gen = RandomGenerator(0, 1, i);
auto gen = RandomGenerator(0.1, 0.1, i);
bestGraph->dataMalloc();
// Initialize inputs with random data
for (auto t : g->getInputs()) {