Because of high prosses requirement I suggest you to use kaggle by https://www.kaggle.com
import numpy as np
import pip
import tensorflow as tf
from keras.datasets import mnist
#upload mnist.npz to kaggle and use the path to add it to project
(X_train,Y_train), (X_test,Y_test)=mnist.load_data(path='/kaggle/input/mnistnpz/mnist.npz')
import matplotlib.pyplot as plt
from keras.layers import Dense,Input,Flatten,Conv2D,MaxPool2D,LeakyReLU,Reshape,Dropout
from keras.models import Model,Sequential
from keras.optimizers import Adam
import shutil
#filter dataset to use only data with label 3
train_filter = np.where((Y_train == 3 ))
test_filter = np.where((Y_test == 3))
X_train, Y_train = X_train[train_filter], Y_train[train_filter]
X_test, Y_test = X_test[test_filter], Y_test[test_filter]
X=np.vstack((X_train, X_test))
X=X.astype('float32')
X=(X-127.5)/127.5
def discriminator_dense():
inp=Input(shape=(28,28,1))
x=Flatten()(inp)
x=Dropout(0.4)(x)
x=Dense(1024,activation=LeakyReLU(alpha=0.2))(x)
x=Dropout(0.4)(x)
x=Dense(512,activation=LeakyReLU(alpha=0.2))(x)
op=Dense(1,activation="sigmoid")(x)
model=Model(inp,op)
model.compile(optimizer=Adam(learning_rate=0.0002, beta_1=0.5), loss="binary_crossentropy")
return model
def generator(n):
inp=Input(shape=(n))
x=Dense(256,activation=LeakyReLU(alpha=0.2))(inp)
x=Dense(512,activation=LeakyReLU(alpha=0.2))(x)
x=Dense(1024,activation=LeakyReLU(alpha=0.2))(x)
x=Dense(784,activation='tanh')(x)
op=Reshape((28,28,1))(x)
return Model(inp,op)
def gan(discrim, gen):
discrim.trainable=False
model=Sequential()
model.add(gen)
model.add(discrim)
model.compile(optimizer=Adam(learning_rate=0.0002, beta_1=0.5), loss="binary_crossentropy")
return model
discrim=discriminator_dense()
gener=generator(100)
gan_model=gan(discrim,gener)
plt.imshow(gener.predict(np.random.randn(100).reshape(1,100)).reshape(28,28), cmap="gray")
plt.xlabel(discrim.predict(gener.predict(np.random.randn(100).reshape(1,100))))
plt.imshow(X[0].reshape(28,28), cmap="gray")
plt.xlabel(discrim.predict(X[15].reshape(1,28,28,1)))
epochs=80
batch_size=2048
half_batch=batch_size//2
n=100
losses=[]
from tqdm import tqdm
for i in range(epochs):
print("EPOCH ",i)
for j in tqdm(range(len(X)//batch_size)):
xreal, yreal=X[np.random.randint(0,len(X), half_batch)].reshape(half_batch,28,28,1), np.ones(half_batch).reshape(half_batch, 1)
xfake, yfake=gener.predict(np.random.randn(half_batch, n),verbose = 0), np.zeros(half_batch).reshape(half_batch, 1)
xfinal, yfinal=np.vstack((xreal,xfake)), np.vstack((yreal,yfake))
dloss=discrim.train_on_batch(xfinal,yfinal)
gloss=gan_model.train_on_batch(np.random.randn(batch_size , n), np.ones(batch_size).reshape(batch_size, 1))
losses.append([dloss,gloss])
fig, axes=plt.subplots(5,5, figsize=(12,12))
print("losses->", dloss, " ", gloss)
for ii in range(5):
for jj in range(5):
axes[ii,jj].imshow(gener.predict(np.random.randn(1*n).reshape(1, n),verbose = 0).reshape(28,28), cmap='gray')
plt.show()
plt.close()