Skip to content

Commit

Permalink
Reduced double imports
Browse files Browse the repository at this point in the history
  • Loading branch information
andreas-zeller committed Jan 5, 2025
1 parent c2e3d37 commit 0d54881
Showing 1 changed file with 32 additions and 74 deletions.
106 changes: 32 additions & 74 deletions notebooks/Alhazen.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@
"metadata": {},
"outputs": [],
"source": [
"from typing import List, Tuple, Dict, Any"
"from typing import List, Tuple, Dict, Any, Optional"
]
},
{
Expand Down Expand Up @@ -153,7 +153,9 @@
"metadata": {},
"outputs": [],
"source": [
"from fuzzingbook.Grammars import Grammar"
" from fuzzingbook.Grammars import Grammar, EXPR_GRAMMAR, reachable_nonterminals, is_valid_grammar, START_SYMBOL\n",
" from fuzzingbook.GrammarFuzzer import GrammarFuzzer, expansion_to_children, DerivationTree, tree_to_string, display_tree, is_nonterminal\n",
" from fuzzingbook.Parser import EarleyParser"
]
},
{
Expand All @@ -180,8 +182,7 @@
"\n",
" \"<digit>\":\n",
" [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n",
"}\n",
"START_SYMBOL = \"<start>\""
"}"
]
},
{
Expand Down Expand Up @@ -248,8 +249,15 @@
"metadata": {},
"outputs": [],
"source": [
"from enum import Enum\n",
"\n",
"from enum import Enum"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class OracleResult(Enum):\n",
" BUG = \"BUG\"\n",
" NO_BUG = \"NO_BUG\"\n",
Expand Down Expand Up @@ -617,8 +625,6 @@
"metadata": {},
"outputs": [],
"source": [
"from fuzzingbook.GrammarFuzzer import expansion_to_children, DerivationTree\n",
"\n",
"class ExistenceFeature(Feature):\n",
" '''\n",
" This class represents existence features of a grammar. Existence features indicate\n",
Expand Down Expand Up @@ -680,7 +686,6 @@
"metadata": {},
"outputs": [],
"source": [
"from fuzzingbook.GrammarFuzzer import tree_to_string\n",
"from numpy import nanmax, isnan\n",
"\n",
"class NumericInterpretation(Feature):\n",
Expand Down Expand Up @@ -762,7 +767,6 @@
"metadata": {},
"outputs": [],
"source": [
"from fuzzingbook.Grammars import reachable_nonterminals\n",
"from collections import defaultdict\n",
"import re\n",
"\n",
Expand Down Expand Up @@ -855,9 +859,6 @@
"metadata": {},
"outputs": [],
"source": [
"from fuzzingbook.Parser import EarleyParser\n",
"from fuzzingbook.Grammars import Grammar\n",
"import pandas\n",
"from pandas import DataFrame\n",
"\n",
"def collect_features(sample_list: List[str],\n",
Expand Down Expand Up @@ -905,8 +906,6 @@
"outputs": [],
"source": [
"# TODO: handle multiple trees\n",
"from fuzzingbook.Parser import EarleyParser\n",
"\n",
"def compute_feature_values(sample: str, grammar: Grammar, features: List[Feature]) -> Dict[str, float]:\n",
" '''\n",
" Extracts all feature values from an input.\n",
Expand Down Expand Up @@ -952,10 +951,6 @@
"metadata": {},
"outputs": [],
"source": [
"from fuzzingbook.Grammars import EXPR_GRAMMAR\n",
"\n",
"from fuzzingbook.GrammarFuzzer import GrammarFuzzer\n",
"from fuzzingbook.GrammarFuzzer import tree_to_string, display_tree\n",
"import random\n",
"\n",
"# For this example, fix the random seed so that the produced output is deterministic\n",
Expand Down Expand Up @@ -1023,9 +1018,6 @@
"metadata": {},
"outputs": [],
"source": [
"from fuzzingbook.Grammars import is_nonterminal\n",
"from fuzzingbook.GrammarFuzzer import tree_to_string\n",
"\n",
"# Then, recursively iterate through the derivation tree and for each non-terminal,\n",
"# add the derived word to the grammar\n",
"\n",
Expand All @@ -1050,16 +1042,8 @@
"metadata": {},
"outputs": [],
"source": [
"from fuzzingbook.GrammarFuzzer import GrammarFuzzer\n",
"from fuzzingbook.GrammarFuzzer import display_tree, tree_to_string\n",
"from fuzzingbook.Grammars import EXPR_GRAMMAR, Grammar\n",
"\n",
"import random\n",
"import copy\n",
"\n",
"from fuzzingbook.Parser import EarleyParser\n",
"from fuzzingbook.GrammarFuzzer import display_tree, tree_to_string\n",
"\n",
"START_SYMBOL = \"<start>\"\n",
"\n",
"def transform_grammar(sample: str,\n",
Expand Down Expand Up @@ -1115,10 +1099,9 @@
"metadata": {},
"outputs": [],
"source": [
"import sklearn\n",
"from sklearn.tree import DecisionTreeClassifier\n",
"from sklearn.feature_extraction import DictVectorizer\n",
"\n",
"import graphviz"
"from sklearn.feature_extraction import DictVectorizer"
]
},
{
Expand Down Expand Up @@ -1223,9 +1206,15 @@
"metadata": {},
"outputs": [],
"source": [
"import graphviz\n",
"import sklearn\n",
"\n",
"import graphviz"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def show_decision_tree(clf, feature_names):\n",
" dot_data = sklearn.tree.export_graphviz(clf, out_file=None, \n",
" feature_names=feature_names,\n",
Expand Down Expand Up @@ -1351,9 +1340,6 @@
"metadata": {},
"outputs": [],
"source": [
"from sklearn.feature_extraction import DictVectorizer\n",
"import pandas\n",
"\n",
"# Features for each input, one dict per input\n",
"features = [\n",
" {'function-sqrt': 1, 'function-cos': 0, 'function-sin': 0, 'number': -900},\n",
Expand Down Expand Up @@ -1387,9 +1373,6 @@
"feature_names = ['function-sqrt', 'function-cos', 'function-sin', 'number']\n",
"X_data = pandas.DataFrame.from_records(features)\n",
"\n",
"from sklearn.tree import DecisionTreeClassifier\n",
"from sklearn import tree\n",
"\n",
"# Fix the random state to produce a deterministic result (for illustration purposes only)\n",
"clf = DecisionTreeClassifier(random_state=10)\n",
"\n",
Expand All @@ -1399,7 +1382,6 @@
"# Train with Pandas Dataframe\n",
"clf = clf.fit(X_data, oracle)\n",
"\n",
"import graphviz\n",
"dot_data = sklearn.tree.export_graphviz(clf, out_file=None, \n",
" feature_names=feature_names,\n",
" class_names=[\"BUG\", \"NO BUG\"], \n",
Expand Down Expand Up @@ -1688,9 +1670,7 @@
"source": [
"import logging\n",
"from pathlib import Path\n",
"\n",
"import numpy as np\n",
"from typing import List, Optional\n",
"\n",
"def tree_to_paths(tree, features: List[Feature]):\n",
" logging.info(\"Extracting requirements from tree ...\")\n",
Expand Down Expand Up @@ -1894,7 +1874,6 @@
"metadata": {},
"outputs": [],
"source": [
"import pandas\n",
"x = pandas.DataFrame.from_records(features)\n",
"bounds = pandas.DataFrame([{'feature': c, 'min': x[c].min(), 'max': x[c].max()}\n",
" for c in feature_names],\n",
Expand Down Expand Up @@ -1955,8 +1934,6 @@
"metadata": {},
"outputs": [],
"source": [
"import pandas\n",
"\n",
"def extracting_prediction_paths(clf, feature_names, data):\n",
" \n",
" # determine the bounds\n",
Expand Down Expand Up @@ -2032,30 +2009,28 @@
"outputs": [],
"source": [
"import string\n",
"from fuzzingbook.Grammars import Grammar, is_valid_grammar\n",
"START_SYMBOL = \"<start>\"\n",
"\n",
"SPECIFICATION: Grammar = {\n",
" \"<start>\":\n",
" [\"<req_list>\"],\n",
" \n",
"\n",
" \"<req_list>\": \n",
" [\"<req>\", \"<req>\"\", \"\"<req_list>\"],\n",
"\n",
" \"<req>\":\n",
" [\"<feature>\"\" \"\"<quant>\"\" \"\"<num>\"],\n",
" \n",
"\n",
" \"<feature>\": [\"exists(<string>)\",\n",
" \"num(<string>)\",\n",
" # currently not used\n",
" \"char(<string>)\",\n",
" \"length(<string>)\"], \n",
" \n",
"\n",
" \"<quant>\":\n",
" [\"<\", \">\", \"<=\", \">=\"],\n",
" \n",
"\n",
" \"<num>\": [\"-<value>\", \"<value>\"],\n",
" \n",
"\n",
" \"<value>\":\n",
" [\"<integer>.<integer>\",\n",
" \"<integer>\"],\n",
Expand All @@ -2065,12 +2040,12 @@
"\n",
" \"<digit>\":\n",
" [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"],\n",
" \n",
"\n",
" '<string>': ['<letters>'],\n",
" '<letters>': ['<letter><letters>', '<letter>'],\n",
" '<letter>': list(string.ascii_letters + string.digits + string.punctuation)\n",
"}\n",
" \n",
"\n",
"assert is_valid_grammar(SPECIFICATION, START_SYMBOL) == True"
]
},
Expand All @@ -2094,10 +2069,6 @@
"metadata": {},
"outputs": [],
"source": [
"from fuzzingbook.GrammarFuzzer import GrammarFuzzer\n",
"from fuzzingbook.Grammars import EXPR_GRAMMAR, Expansion\n",
"from fuzzingbook.Parser import EarleyParser, tree_to_string\n",
"\n",
"g = GrammarFuzzer(SPECIFICATION, START_SYMBOL ,max_nonterminals= 100)\n",
"earley = EarleyParser(SPECIFICATION)\n",
"for i in range(10):\n",
Expand Down Expand Up @@ -2163,9 +2134,6 @@
"metadata": {},
"outputs": [],
"source": [
"from typing import List\n",
"from fuzzingbook.GrammarFuzzer import DerivationTree\n",
"\n",
"class SpecRequirement:\n",
" '''\n",
" This class represents a requirement for a new input sample that should be generated.\n",
Expand Down Expand Up @@ -2383,11 +2351,6 @@
"from typing import List\n",
"from itertools import chain\n",
"\n",
"from fuzzingbook.Parser import EarleyParser\n",
"from fuzzingbook.GrammarFuzzer import DerivationTree, all_terminals, Grammar, tree_to_string\n",
"from fuzzingbook.Grammars import Grammar, nonterminals, opts, is_valid_grammar\n",
"from fuzzingbook.Grammars import reachable_nonterminals, unreachable_nonterminals\n",
"\n",
"\n",
"def best_trees(forest, spec):\n",
" samples = [tree_to_string(tree) for tree in forest]\n",
Expand Down Expand Up @@ -2531,8 +2494,6 @@
"metadata": {},
"outputs": [],
"source": [
"from fuzzingbook.GrammarFuzzer import GrammarFuzzer\n",
"\n",
"def generate_samples_random(grammar, new_input_specifications, num):\n",
" f = GrammarFuzzer(grammar ,max_nonterminals=50, log=False)\n",
" data = []\n",
Expand Down Expand Up @@ -2584,9 +2545,6 @@
"metadata": {},
"outputs": [],
"source": [
"from typing import List\n",
"import pandas\n",
"\n",
"GENERATOR_TIMEOUT = 10 # timeout in seconds\n",
"\n",
"class Alhazen:\n",
Expand Down

0 comments on commit 0d54881

Please sign in to comment.