Skip to content

Commit

Permalink
Merge branch 'jgfouca/add_code_checking' (PR #267)
Browse files Browse the repository at this point in the history
Add a new tool that checks that all python library source files are
100% clean for a certain configuration of pylint. Fix all pylint
errors discovered by the tool. Add a test to run this tool every
night.

Test suite: scripts_regression_tests
Test baseline:
Test namelist changes:
Test status: bit for bit

Fixes: [CIME Github issue #]

User interface changes?:

Code review: @jedwards4b

* jgfouca/add_code_checking:
  Fix a couple remaining pylint issues
  Made code_checker parallel, fix test name so it actually runs
  Fix remaining test failures
  Bug fix
  All scripts passing
  Add code checker test. Remove refactor disablings from scripts_regression_tests.py
  Progress. Tool added. build.py 100%

Conflicts:
	utils/python/CIME/case_setup.py
  • Loading branch information
jgfouca committed Jul 19, 2016
2 parents e46d0f4 + de33082 commit 367d47f
Show file tree
Hide file tree
Showing 71 changed files with 584 additions and 510 deletions.
8 changes: 2 additions & 6 deletions driver_cpl/cime_config/buildexe
Original file line number Diff line number Diff line change
Expand Up @@ -42,19 +42,15 @@ def _main_func():
# build model executable

makefile = os.path.join(casetools, "Makefile")
exename = os.path.join(exeroot, model + ".exe")
exename = os.path.join(exeroot, model + ".exe")

cmd = "%s exec_se -j %d EXEC_SE=%s MODEL=%s LIBROOT=%s -f %s "\
% (gmake, gmake_j, exename, "driver", libroot, makefile)

rc, out, err = run_cmd(cmd, ok_to_fail=True)
rc, out, err = run_cmd(cmd)
expect(rc==0,"Command %s failed rc=%d\nout=%s\nerr=%s"%(cmd,rc,out,err))

###############################################################################

if __name__ == "__main__":
_main_func()




2 changes: 1 addition & 1 deletion driver_cpl/cime_config/buildnml
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def _main_func():
cmd = cmd + " -grid %s -atm_grid %s -lnd_grid %s -rof_grid %s -ocn_grid %s -wav_grid %s" \
% (grid, atm_grid, lnd_grid, rof_grid, ocn_grid, wav_grid)

rc, out, err = run_cmd(cmd, from_dir=confdir, ok_to_fail=True)
rc, out, err = run_cmd(cmd, from_dir=confdir)
expect(rc==0,"Command %s failed rc=%d\nout=%s\nerr=%s"%(cmd,rc,out,err))

# copy drv_in, seq_maps.rc and all *modio* files to rundir
Expand Down
18 changes: 9 additions & 9 deletions scripts/Tools/acme_bisect
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ script is intended to be run by git bisect.
"""

from standard_script_setup import *
from CIME.utils import expect, run_cmd
from CIME.utils import expect, run_cmd_no_fail
from CIME.XML.machines import Machines

import argparse, sys, os, doctest, traceback
Expand Down Expand Up @@ -80,18 +80,18 @@ def acme_bisect(testname, good, bad, testroot, compiler, project, baseline_name,
wait_for_tests = os.path.join(CIME.utils.get_scripts_root(), "Tools", "wait_for_tests")

# Important: we only want to test merges
commits_we_want_to_test = run_cmd("git rev-list %s..%s --merges --first-parent" % (good, bad)).splitlines()
all_commits = run_cmd("git rev-list %s..%s" % (good, bad)).splitlines()
commits_we_want_to_test = run_cmd_no_fail("git rev-list %s..%s --merges --first-parent" % (good, bad)).splitlines()
all_commits = run_cmd_no_fail("git rev-list %s..%s" % (good, bad)).splitlines()
commits_to_skip = set(all_commits) - set(commits_we_want_to_test)
print "Skipping these non-merge commits"
for item in commits_to_skip:
print item

# Basic setup
run_cmd("git bisect start")
run_cmd("git bisect good %s" % good)
run_cmd("git bisect bad %s" % bad)
run_cmd("git bisect skip %s" % " ".join(commits_to_skip))
run_cmd_no_fail("git bisect start")
run_cmd_no_fail("git bisect good %s" % good)
run_cmd_no_fail("git bisect bad %s" % bad)
run_cmd_no_fail("git bisect skip %s" % " ".join(commits_to_skip))

# Formulate the create_test command

Expand All @@ -114,9 +114,9 @@ def acme_bisect(testname, good, bad, testroot, compiler, project, baseline_name,

bisect_cmd += " && %s" % wait_for_tests_cmd

run_cmd("git bisect run sh -c '%s'" % bisect_cmd, ok_to_fail=True, verbose=True)
run_cmd("git bisect run sh -c '%s'" % bisect_cmd, verbose=True)

run_cmd("git bisect reset")
run_cmd_no_fail("git bisect reset")

###############################################################################
def _main_func(description):
Expand Down
8 changes: 4 additions & 4 deletions scripts/Tools/bless_test_results
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ from standard_script_setup import *

import wait_for_tests, compare_namelists, simple_compare
from CIME.system_test import NAMELIST_PHASE
from CIME.utils import run_cmd, expect
from CIME.utils import run_cmd, run_cmd_no_fail, expect
from CIME.XML.machines import Machines

import argparse, sys, os, glob, doctest, time
Expand Down Expand Up @@ -135,7 +135,7 @@ def bless_namelists(test_name, baseline_dir_for_test, testcase_dir_for_test, rep
def bless_history(test_name, baseline_tag, baseline_dir_for_test, testcase_dir_for_test, report_only, force):
###############################################################################
# Get user that test was run as (affects loc of hist files)
acme_root = run_cmd("./xmlquery CESMSCRATCHROOT -value", from_dir=testcase_dir_for_test)
acme_root = run_cmd_no_fail("./xmlquery CESMSCRATCHROOT -value", from_dir=testcase_dir_for_test)

case = os.path.basename(testcase_dir_for_test)
cime_root = CIME.utils.get_cime_root()
Expand All @@ -147,15 +147,15 @@ def bless_history(test_name, baseline_tag, baseline_dir_for_test, testcase_dir_f
(machine_env, compgen, baseline_dir_for_test, case, test_name, run_dir, cprnc_loc, baseline_tag)

check_compare = os.path.join(cime_root, "scripts", "Tools", "component_write_comparefail.pl")
stat, out, _ = run_cmd("%s %s 2>&1" % (check_compare, run_dir), ok_to_fail=True)
stat, out, _ = run_cmd("%s %s 2>&1" % (check_compare, run_dir))

if (stat != 0):
# found diff, offer rebless
print out

if (not report_only and
(force or raw_input("Update this diff (y/n)? ").upper() in ["Y", "YES"])):
stat = run_cmd(compgen_cmd, ok_to_fail=True, verbose=True)[0]
stat = run_cmd(compgen_cmd, verbose=True)[0]
if (stat != 0):
logging.warning("Hist file bless FAILED for test %s" % test_name)
return False, "Bless command failed"
Expand Down
8 changes: 5 additions & 3 deletions scripts/Tools/case.submit
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,11 @@ OR

args = parser.parse_args(args[1:])

CIME.utils.expect(args.prereq is None, "--prereq not currently supported")

CIME.utils.handle_standard_logging_options(args)

return args.caseroot, args.job, args.no_batch, args.prereq
return args.caseroot, args.job, args.no_batch

###############################################################################
def _main_func(description):
Expand All @@ -55,9 +57,9 @@ def _main_func(description):
test_results = doctest.testmod(verbose=True)
sys.exit(1 if test_results.failed > 0 else 0)

caseroot, job, no_batch, prereq = parse_command_line(sys.argv, description)
caseroot, job, no_batch = parse_command_line(sys.argv, description)
with Case(caseroot, read_only=False) as case:
submit(case, job=job, no_batch=no_batch, prereq_jobid=prereq)
submit(case, job=job, no_batch=no_batch)

if __name__ == "__main__":
_main_func(__doc__)
2 changes: 1 addition & 1 deletion scripts/Tools/check_input_data
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ Should be run from case.
"""
from standard_script_setup import *

from CIME.utils import expect, get_model, run_cmd
from CIME.utils import expect, get_model
from CIME.XML.machines import Machines
from CIME.check_input_data import check_input_data, SVN_LOCS
from CIME.case import Case
Expand Down
96 changes: 96 additions & 0 deletions scripts/Tools/code_checker
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
#!/usr/bin/env python

"""
Ensure that all CIME python files are free of errors
and follow the PEP8 standard.
"""

from standard_script_setup import *

from CIME.utils import run_cmd, run_cmd_no_fail, expect, get_python_libs_root

import argparse, sys, os, doctest
from multiprocessing.dummy import Pool as ThreadPool

###############################################################################
def parse_command_line(args, description):
###############################################################################
parser = argparse.ArgumentParser(
usage="""\n%s [--verbose]
OR
%s --help
OR
%s --test
\033[1mEXAMPLES:\033[0m
\033[1;32m# Check code \033[0m
> %s
""" % ((os.path.basename(args[0]), ) * 4),

description=description,

formatter_class=argparse.ArgumentDefaultsHelpFormatter
)

CIME.utils.setup_standard_logging_options(parser)

parser.add_argument("--dir", default=get_python_libs_root(),
help="The root directory containing python files to check.")

parser.add_argument("-j", "--num-procs", type=int, default=8,
help="The number of files to check in parallel")

args = parser.parse_args(args[1:])

CIME.utils.handle_standard_logging_options(args)

return args.dir, args.num_procs

###############################################################################
def run_pylint(on_file):
###############################################################################
cmd = "pylint --disable I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement %s" % on_file
stat = run_cmd(cmd)[0]
if stat != 0:
sys.stdout.write("File %s has pylint problems, please fix\n Use command: %s\n" % (on_file, cmd))
return False
else:
sys.stdout.write("File %s has no pylint problems\n" % on_file)
return True

###############################################################################
def check_code(dir_to_check, num_procs):
###############################################################################
"""
Check all python files in the given directory
Returns True if all files had no problems
"""
# Pylint won't work right if the imports within the checked files fails
if "PYTHONPATH" in os.environ:
os.environ["PYTHONPATH"] += dir_to_check
else:
os.environ["PYTHONPATH"] = dir_to_check

# Get list of files to check
files_to_check = run_cmd_no_fail('find %s -name "*.py"' % dir_to_check).splitlines()
pool = ThreadPool(num_procs)
results = pool.map(run_pylint, files_to_check)

return False not in results

###############################################################################
def _main_func(description):
###############################################################################
if ("--test" in sys.argv):
test_results = doctest.testmod(verbose=True)
sys.exit(1 if test_results.failed > 0 else 0)

dir_to_check, num_procs = parse_command_line(sys.argv, description)

sys.exit(0 if check_code(dir_to_check, num_procs) else 1)

###############################################################################

if (__name__ == "__main__"):
_main_func(__doc__)
2 changes: 1 addition & 1 deletion scripts/Tools/compare_namelists
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter
def _main_func(description):
###############################################################################
if ("--test" in sys.argv):
CIME.utils.run_cmd("python -m doctest %s/compare_namelists.py -v" % CIME.utils.get_python_libs_root(), arg_stdout=None, arg_stderr=None)
CIME.utils.run_cmd_no_fail("python -m doctest %s/compare_namelists.py -v" % CIME.utils.get_python_libs_root(), arg_stdout=None, arg_stderr=None)
return

gold_file, compare_file, case = \
Expand Down
Loading

0 comments on commit 367d47f

Please sign in to comment.