Skip to content

hbehrad/GANS-mnist

Folders and files

NameName
Last commit message
Last commit date

Latest commit

 

History

6 Commits
 
 
 
 
 
 

Repository files navigation

GANS-mnist (to gerate number three by mnist dataset)

Because of high prosses requirement I suggest you to use kaggle by https://www.kaggle.com

after register and open new notebook follow this step

First epoch

1

80th epoch

2

In this project i try to train A gans to generate number 3 by mnist dataset using paython

First we must install requirement like tensorflow numpy and etc

import numpy as np
import pip
import tensorflow as tf
from keras.datasets import mnist
#upload mnist.npz to kaggle and use the path to add it to project
(X_train,Y_train), (X_test,Y_test)=mnist.load_data(path='/kaggle/input/mnistnpz/mnist.npz')
import matplotlib.pyplot as plt
from keras.layers import Dense,Input,Flatten,Conv2D,MaxPool2D,LeakyReLU,Reshape,Dropout
from keras.models import Model,Sequential
from keras.optimizers import Adam
import shutil
#filter dataset to use only data with label 3
train_filter = np.where((Y_train == 3 ))
test_filter = np.where((Y_test == 3))
X_train, Y_train = X_train[train_filter], Y_train[train_filter]
X_test, Y_test = X_test[test_filter], Y_test[test_filter]                       

X=np.vstack((X_train, X_test))
X=X.astype('float32')
X=(X-127.5)/127.5

def discriminator_dense():
    inp=Input(shape=(28,28,1))
    
    x=Flatten()(inp)
    x=Dropout(0.4)(x)
    x=Dense(1024,activation=LeakyReLU(alpha=0.2))(x)
    x=Dropout(0.4)(x)
    x=Dense(512,activation=LeakyReLU(alpha=0.2))(x)
    op=Dense(1,activation="sigmoid")(x)
    model=Model(inp,op)
    model.compile(optimizer=Adam(learning_rate=0.0002, beta_1=0.5), loss="binary_crossentropy")
    return model
    
def generator(n):
    inp=Input(shape=(n))
    x=Dense(256,activation=LeakyReLU(alpha=0.2))(inp)
    x=Dense(512,activation=LeakyReLU(alpha=0.2))(x)
    x=Dense(1024,activation=LeakyReLU(alpha=0.2))(x)
    x=Dense(784,activation='tanh')(x)
    op=Reshape((28,28,1))(x)
    return Model(inp,op)
    
    
def gan(discrim, gen):
    discrim.trainable=False
    model=Sequential()
    model.add(gen)
    model.add(discrim)
    model.compile(optimizer=Adam(learning_rate=0.0002, beta_1=0.5), loss="binary_crossentropy")
    return model
    
discrim=discriminator_dense()

gener=generator(100)

gan_model=gan(discrim,gener)

plt.imshow(gener.predict(np.random.randn(100).reshape(1,100)).reshape(28,28), cmap="gray")
plt.xlabel(discrim.predict(gener.predict(np.random.randn(100).reshape(1,100))))

plt.imshow(X[0].reshape(28,28), cmap="gray")
plt.xlabel(discrim.predict(X[15].reshape(1,28,28,1)))

epochs=80
batch_size=2048
half_batch=batch_size//2
n=100
losses=[]

from tqdm import tqdm

for i in range(epochs):
    print("EPOCH ",i)
    for j in tqdm(range(len(X)//batch_size)):
        xreal, yreal=X[np.random.randint(0,len(X), half_batch)].reshape(half_batch,28,28,1), np.ones(half_batch).reshape(half_batch, 1)
        xfake, yfake=gener.predict(np.random.randn(half_batch, n),verbose = 0), np.zeros(half_batch).reshape(half_batch, 1)
        xfinal, yfinal=np.vstack((xreal,xfake)), np.vstack((yreal,yfake))
        dloss=discrim.train_on_batch(xfinal,yfinal)
        gloss=gan_model.train_on_batch(np.random.randn(batch_size , n), np.ones(batch_size).reshape(batch_size, 1))
        losses.append([dloss,gloss])
            
            
    fig, axes=plt.subplots(5,5, figsize=(12,12))
    print("losses->", dloss, " ", gloss)
    for ii in range(5):
        for jj in range(5):
               axes[ii,jj].imshow(gener.predict(np.random.randn(1*n).reshape(1, n),verbose = 0).reshape(28,28), cmap='gray')
        
    plt.show()
    plt.close()

I strongly suggest you to download file and use it instead of copy above code . maybe some space and intent will change from the main file

About

No description, website, or topics provided.

Resources

Stars

Watchers

Forks

Releases

No releases published

Packages

No packages published