Skip to content

Commit

Permalink
to python3
Browse files Browse the repository at this point in the history
  • Loading branch information
Zi Wang committed Jul 18, 2019
1 parent 4e91700 commit 4e6f9ed
Show file tree
Hide file tree
Showing 9 changed files with 90 additions and 103 deletions.
15 changes: 9 additions & 6 deletions ebo_core/bo.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,24 @@
import numpy as np
from gibbs import GibbsSampler
from ebo_core.gibbs import GibbsSampler
from scipy.optimize import minimize


class bo(object):
def __init__(self, f, X, y, x_range, eval_only, extra, options):
self.f = f
self.x_range = x_range
self.options = options
self.well_defined = X.shape[0] > 0
self.solver = GibbsSampler(X, y, options)
self.eval_only = eval_only
self.opt_n = options['opt_n']
self.dx = options['dx']

if eval_only:
self.newX = extra
else:
self.x_range = x_range

self.well_defined = X.shape[0] > 0
self.solver = GibbsSampler(X, y, options)

self.opt_n = options['opt_n']
self.dx = options['dx']
self.n_bo = extra
self.opt_n = np.maximum(self.opt_n, self.n_bo * 2)

Expand Down
36 changes: 17 additions & 19 deletions ebo_core/ebo.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import os
import time

import helper
import ebo_core.helper as helper
import numpy as np
from mondrian import MondrianTree
from mypool import MyPool
from ebo_core.mondrian import MondrianTree
from ebo_core.mypool import MyPool
from builtins import range

try:
import cPickle as pickle
Expand Down Expand Up @@ -50,8 +51,8 @@ def get_params(self):

def run(self):
x_range, T, B, dim_limit, min_leaf_size, max_n_leaves, n_bo, n_top = self.get_params()
tstart = self.X.shape[0] / B
for t in xrange(tstart, T):
tstart = self.X.shape[0] // B
for t in range(tstart, T):
# search space partition
ref = self.y.min() if self.y.shape[0] > 0 else None
self.tree = MondrianTree(self.X, self.y, x_range, max_n_leaves, reference=ref)
Expand All @@ -61,7 +62,7 @@ def run(self):
# this might be dangerous if high dimension and R>1
tot_volumn = np.array([n.volumn for n in leaves]).sum()
parameters = [[0, n.X, n.y, n.x_range, False,
np.maximum(n_bo, np.ceil((tot_eval * n.volumn / tot_volumn)).astype(int)), self.options] for
np.maximum(n_bo, np.ceil((tot_eval * n.volumn // tot_volumn)).astype(int)), self.options] for
n in leaves]

# run bo learning in parallel
Expand All @@ -84,7 +85,7 @@ def run(self):
newX = self.choose_newX(newX, newacf, n_top, B)

# map again to evaluate the selected inputs
parameters = [[self.f, n.X, n.y, n.x_range, True, [x], self.options] for x in newX]
parameters = [[self.f, None, None, None, True, [x], self.options] for x in newX]

newY = self.pool.map(parameters, 'eval' + str(t), not self.options['func_cheap'])
# update X, y
Expand All @@ -99,8 +100,7 @@ def run(self):
self.pause()

def choose_newX(self, newX, newacf, n_top, B):
print
'start choosing newX'
print('start choosing newX')
start = time.time()
inds = newacf.argsort()
if 'heuristic' in self.options and self.options['heuristic']:
Expand Down Expand Up @@ -143,8 +143,7 @@ def choose_newX(self, newX, newacf, n_top, B):
all_candidates = all_candidates[all_candidates != jbest]
next_ind += 1
rec.append(marginal)
print
'finished choosing newX, eplased time = ', time.time() - start
print('finished choosing newX, eplased time = ', time.time() - start)

return newX[good_inds]

Expand All @@ -163,18 +162,18 @@ def print_step(self, newX, t):
if self.options['isplot']:
plot_ebo(self.tree, newX, t)
_, besty, cur = self.get_best()
print
't=', t, ', bestid=', cur, ', besty=', besty
print
'final z=', self.z, ' final k=', self.k
print('t=', t, ', bestid=', cur, ', besty=', besty)

print('final z=', self.z, ' final k=', self.k)


def reload(self):
fnm = self.options['save_file_name']
if not os.path.isfile(fnm):
return False
self.X, self.y, self.z, self.k, self.timing = pickle.load(open(fnm))
print
'Successfully reloaded file.'
print('Successfully reloaded file.')


# This will save the pool workers
def pause(self):
Expand All @@ -191,8 +190,7 @@ def save(self):
if not os.path.exists(dirnm):
os.makedirs(dirnm)
pickle.dump([self.X, self.y, self.z, self.k, self.timing], open(fnm, 'wb'))
print
'saving file... ', time.time() - start, ' seconds'
print('saving file... ', time.time() - start, ' seconds')


def check_valid_options(options):
Expand Down
11 changes: 6 additions & 5 deletions ebo_core/gibbs.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import helper
import ebo_core.helper as helper
import numpy as np
import sklearn.random_projection as rp
from gp_tools.gp import SparseFeatureGP, DenseFeatureGP, DenseKernelGP, SparseKernelGP
from gp_tools.representation import TileCoding, IndexToBinarySparse, DenseKernel, DenseL1Kernel, SparseKernel, SparseRPTilecoding
from scipy.misc import comb
from scipy.special import comb
from builtins import range


# remember to set random seed somewhere
Expand Down Expand Up @@ -71,7 +72,7 @@ def get_tilegp(self):
all_cat = np.unique(self.z)
if self.tilecap:
hashing_mem = self.tilecap / len(all_cat) / nlayers
hashing = [rp.UNH(hashing_mem) for _ in xrange(len(all_cat))]
hashing = [rp.UNH(hashing_mem) for _ in range(len(all_cat))]
for a in all_cat:
inds = helper.find(self.z == a)
indices.append(inds)
Expand Down Expand Up @@ -109,7 +110,7 @@ def get_tilegp(self):

# idea: can instead get log likelihood on different subset of data for gibbs
def run(self, niter):
for i in xrange(niter):
for i in range(niter):
# sample z w/ limit on size
# random permute dimensions
for d in np.random.permutation(range(self.xdim)):
Expand All @@ -130,7 +131,7 @@ def run(self, niter):
other_cat = other_cat[np.logical_and(other_cat != zd_old, other_cat != -1)]
# otherwise, need to remove z[d] and add one additional category
if a_size > 0 and other_cat.size + 1 < self.n_add:
for a in xrange(self.n_add):
for a in range(self.n_add):
if (a not in other_cat) and (a != zd_old):
other_cat = np.append(other_cat, [a])
break
Expand Down
2 changes: 1 addition & 1 deletion ebo_core/mondrian.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import helper
import ebo_core.helper as helper
import numpy as np


Expand Down
2 changes: 1 addition & 1 deletion ebo_core/mypool.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from bo import bo
from ebo_core.bo import bo


class MyPool(object):
Expand Down
3 changes: 2 additions & 1 deletion gp_tools/gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
import scipy.sparse


# import sksparse.cholmod as spch
import sksparse.cholmod as spch

class SparseFeatureGP:
def __init__(self, X, y, sigma, phi):
self.X = X
Expand Down
14 changes: 11 additions & 3 deletions gp_tools/representation.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,12 @@
from itertools import chain, izip
from __future__ import print_function
from itertools import chain

try:
import itertools.zip as zip
except ImportError:
pass

from builtins import range

import numpy as np
from scipy.sparse import csr_matrix
Expand Down Expand Up @@ -463,7 +471,7 @@ def __init__(self,
self.offset = offset
if offset is None:
self.offset = np.empty((ntiles.shape[1], ntilings))
for i in xrange(ntiles.shape[0]):
for i in range(ntiles.shape[0]):
self.offset[i, :] = -rnd_stream.random_sample(ntilings) / ntiles[0, i]

if self.hashing == None:
Expand Down Expand Up @@ -759,7 +767,7 @@ def grid_of_points(state_range, num_centers):
if isinstance(num_centers, int):
num_centers = [num_centers] * state_range[0].shape[0]
points = [np.linspace(start, stop, num, endpoint=True)
for start, stop, num in izip(state_range[0],
for start, stop, num in zip(state_range[0],
state_range[1],
num_centers)]

Expand Down
Loading

0 comments on commit 4e6f9ed

Please sign in to comment.