diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..02b1348 --- /dev/null +++ b/404.html @@ -0,0 +1,620 @@ + + + +
+ + + + + + + + + + + + + + +Config file to define all hyperparameters.
+1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 |
|
TensorflowExtension
+
+
+¶
+ Bases: Extension
Connect Keras to OpenML-Python.
+ + + + + + +openml_tensorflow/extension.py
55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 + 100 + 101 + 102 + 103 + 104 + 105 + 106 + 107 + 108 + 109 + 110 + 111 + 112 + 113 + 114 + 115 + 116 + 117 + 118 + 119 + 120 + 121 + 122 + 123 + 124 + 125 + 126 + 127 + 128 + 129 + 130 + 131 + 132 + 133 + 134 + 135 + 136 + 137 + 138 + 139 + 140 + 141 + 142 + 143 + 144 + 145 + 146 + 147 + 148 + 149 + 150 + 151 + 152 + 153 + 154 + 155 + 156 + 157 + 158 + 159 + 160 + 161 + 162 + 163 + 164 + 165 + 166 + 167 + 168 + 169 + 170 + 171 + 172 + 173 + 174 + 175 + 176 + 177 + 178 + 179 + 180 + 181 + 182 + 183 + 184 + 185 + 186 + 187 + 188 + 189 + 190 + 191 + 192 + 193 + 194 + 195 + 196 + 197 + 198 + 199 + 200 + 201 + 202 + 203 + 204 + 205 + 206 + 207 + 208 + 209 + 210 + 211 + 212 + 213 + 214 + 215 + 216 + 217 + 218 + 219 + 220 + 221 + 222 + 223 + 224 + 225 + 226 + 227 + 228 + 229 + 230 + 231 + 232 + 233 + 234 + 235 + 236 + 237 + 238 + 239 + 240 + 241 + 242 + 243 + 244 + 245 + 246 + 247 + 248 + 249 + 250 + 251 + 252 + 253 + 254 + 255 + 256 + 257 + 258 + 259 + 260 + 261 + 262 + 263 + 264 + 265 + 266 + 267 + 268 + 269 + 270 + 271 + 272 + 273 + 274 + 275 + 276 + 277 + 278 + 279 + 280 + 281 + 282 + 283 + 284 + 285 + 286 + 287 + 288 + 289 + 290 + 291 + 292 + 293 + 294 + 295 + 296 + 297 + 298 + 299 + 300 + 301 + 302 + 303 + 304 + 305 + 306 + 307 + 308 + 309 + 310 + 311 + 312 + 313 + 314 + 315 + 316 + 317 + 318 + 319 + 320 + 321 + 322 + 323 + 324 + 325 + 326 + 327 + 328 + 329 + 330 + 331 + 332 + 333 + 334 + 335 + 336 + 337 + 338 + 339 + 340 + 341 + 342 + 343 + 344 + 345 + 346 + 347 + 348 + 349 + 350 + 351 + 352 + 353 + 354 + 355 + 356 + 357 + 358 + 359 + 360 + 361 + 362 + 363 + 364 + 365 + 366 + 367 + 368 + 369 + 370 + 371 + 372 + 373 + 374 + 375 + 376 + 377 + 378 + 379 + 380 + 381 + 382 + 383 + 384 + 385 + 386 + 387 + 388 + 389 + 390 + 391 + 392 + 393 + 394 + 395 + 396 + 397 + 398 + 399 + 400 + 401 + 402 + 403 + 404 + 405 + 406 + 407 + 408 + 409 + 410 + 411 + 412 + 413 + 414 + 415 + 416 + 417 + 418 + 419 + 420 + 421 + 422 + 423 + 424 + 425 + 426 + 427 + 428 + 429 + 430 + 431 + 432 + 433 + 434 + 435 + 436 + 437 + 438 + 439 + 440 + 441 + 442 + 443 + 444 + 445 + 446 + 447 + 448 + 449 + 450 + 451 + 452 + 453 + 454 + 455 + 456 + 457 + 458 + 459 + 460 + 461 + 462 + 463 + 464 + 465 + 466 + 467 + 468 + 469 + 470 + 471 + 472 + 473 + 474 + 475 + 476 + 477 + 478 + 479 + 480 + 481 + 482 + 483 + 484 + 485 + 486 + 487 + 488 + 489 + 490 + 491 + 492 + 493 + 494 + 495 + 496 + 497 + 498 + 499 + 500 + 501 + 502 + 503 + 504 + 505 + 506 + 507 + 508 + 509 + 510 + 511 + 512 + 513 + 514 + 515 + 516 + 517 + 518 + 519 + 520 + 521 + 522 + 523 + 524 + 525 + 526 + 527 + 528 + 529 + 530 + 531 + 532 + 533 + 534 + 535 + 536 + 537 + 538 + 539 + 540 + 541 + 542 + 543 + 544 + 545 + 546 + 547 + 548 + 549 + 550 + 551 + 552 + 553 + 554 + 555 + 556 + 557 + 558 + 559 + 560 + 561 + 562 + 563 + 564 + 565 + 566 + 567 + 568 + 569 + 570 + 571 + 572 + 573 + 574 + 575 + 576 + 577 + 578 + 579 + 580 + 581 + 582 + 583 + 584 + 585 + 586 + 587 + 588 + 589 + 590 + 591 + 592 + 593 + 594 + 595 + 596 + 597 + 598 + 599 + 600 + 601 + 602 + 603 + 604 + 605 + 606 + 607 + 608 + 609 + 610 + 611 + 612 + 613 + 614 + 615 + 616 + 617 + 618 + 619 + 620 + 621 + 622 + 623 + 624 + 625 + 626 + 627 + 628 + 629 + 630 + 631 + 632 + 633 + 634 + 635 + 636 + 637 + 638 + 639 + 640 + 641 + 642 + 643 + 644 + 645 + 646 + 647 + 648 + 649 + 650 + 651 + 652 + 653 + 654 + 655 + 656 + 657 + 658 + 659 + 660 + 661 + 662 + 663 + 664 + 665 + 666 + 667 + 668 + 669 + 670 + 671 + 672 + 673 + 674 + 675 + 676 + 677 + 678 + 679 + 680 + 681 + 682 + 683 + 684 + 685 + 686 + 687 + 688 + 689 + 690 + 691 + 692 + 693 + 694 + 695 + 696 + 697 + 698 + 699 + 700 + 701 + 702 + 703 + 704 + 705 + 706 + 707 + 708 + 709 + 710 + 711 + 712 + 713 + 714 + 715 + 716 + 717 + 718 + 719 + 720 + 721 + 722 + 723 + 724 + 725 + 726 + 727 + 728 + 729 + 730 + 731 + 732 + 733 + 734 + 735 + 736 + 737 + 738 + 739 + 740 + 741 + 742 + 743 + 744 + 745 + 746 + 747 + 748 + 749 + 750 + 751 + 752 + 753 + 754 + 755 + 756 + 757 + 758 + 759 + 760 + 761 + 762 + 763 + 764 + 765 + 766 + 767 + 768 + 769 + 770 + 771 + 772 + 773 + 774 + 775 + 776 + 777 + 778 + 779 + 780 + 781 + 782 + 783 + 784 + 785 + 786 + 787 + 788 + 789 + 790 + 791 + 792 + 793 + 794 + 795 + 796 + 797 + 798 + 799 + 800 + 801 + 802 + 803 + 804 + 805 + 806 + 807 + 808 + 809 + 810 + 811 + 812 + 813 + 814 + 815 + 816 + 817 + 818 + 819 + 820 + 821 + 822 + 823 + 824 + 825 + 826 + 827 + 828 + 829 + 830 + 831 + 832 + 833 + 834 + 835 + 836 + 837 + 838 + 839 + 840 + 841 + 842 + 843 + 844 + 845 + 846 + 847 + 848 + 849 + 850 + 851 + 852 + 853 + 854 + 855 + 856 + 857 + 858 + 859 + 860 + 861 + 862 + 863 + 864 + 865 + 866 + 867 + 868 + 869 + 870 + 871 + 872 + 873 + 874 + 875 + 876 + 877 + 878 + 879 + 880 + 881 + 882 + 883 + 884 + 885 + 886 + 887 + 888 + 889 + 890 + 891 + 892 + 893 + 894 + 895 + 896 + 897 + 898 + 899 + 900 + 901 + 902 + 903 + 904 + 905 + 906 + 907 + 908 + 909 + 910 + 911 + 912 + 913 + 914 + 915 + 916 + 917 + 918 + 919 + 920 + 921 + 922 + 923 + 924 + 925 + 926 + 927 + 928 + 929 + 930 + 931 + 932 + 933 + 934 + 935 + 936 + 937 + 938 + 939 + 940 + 941 + 942 + 943 + 944 + 945 + 946 + 947 + 948 + 949 + 950 + 951 + 952 + 953 + 954 + 955 + 956 + 957 + 958 + 959 + 960 + 961 + 962 + 963 + 964 + 965 + 966 + 967 + 968 + 969 + 970 + 971 + 972 + 973 + 974 + 975 + 976 + 977 + 978 + 979 + 980 + 981 + 982 + 983 + 984 + 985 + 986 + 987 + 988 + 989 + 990 + 991 + 992 + 993 + 994 + 995 + 996 + 997 + 998 + 999 +1000 +1001 +1002 +1003 +1004 +1005 +1006 +1007 +1008 +1009 +1010 +1011 +1012 +1013 +1014 +1015 +1016 +1017 +1018 +1019 +1020 +1021 +1022 +1023 +1024 +1025 +1026 +1027 +1028 +1029 +1030 +1031 +1032 +1033 +1034 +1035 +1036 +1037 +1038 +1039 +1040 +1041 +1042 +1043 +1044 +1045 +1046 +1047 +1048 +1049 +1050 +1051 +1052 +1053 +1054 +1055 +1056 +1057 +1058 +1059 +1060 +1061 +1062 +1063 +1064 +1065 +1066 +1067 +1068 +1069 +1070 +1071 +1072 +1073 +1074 +1075 +1076 +1077 +1078 +1079 +1080 +1081 +1082 +1083 +1084 +1085 +1086 +1087 +1088 +1089 +1090 +1091 +1092 +1093 +1094 +1095 +1096 +1097 +1098 +1099 +1100 +1101 +1102 +1103 +1104 +1105 +1106 +1107 +1108 +1109 +1110 +1111 +1112 +1113 +1114 |
|
can_handle_flow(flow)
+
+
+ classmethod
+
+
+¶Check whether a given flow describes a Keras neural network.
+This is done by parsing the external_version
field.
flow : OpenMLFlow
+bool
+ +openml_tensorflow/extension.py
61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 |
|
can_handle_model(model)
+
+
+ classmethod
+
+
+¶Check whether a model is an instance of tf.models.Model
.
model : Any
+bool
+ +openml_tensorflow/extension.py
77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 |
|
check_if_model_fitted(model)
+
+¶Returns True/False denoting if the model has already been fitted/trained +Parameters
+model : Any +Returns
+bool
+ +openml_tensorflow/extension.py
1106 +1107 +1108 +1109 +1110 +1111 +1112 +1113 +1114 |
|
compile_additional_information(task, additional_information)
+
+¶Compiles additional information provided by the extension during the runs into a final +set of files.
+task : OpenMLTask + The task the model was run on. +additional_information: List[Tuple[int, int, Any]] + A list of (fold, repetition, additional information) tuples obtained during training.
+files : Dict[str, Tuple[str, str]] + A dictionary of files with their file name and contents.
+ +openml_tensorflow/extension.py
939 +940 +941 +942 +943 +944 +945 +946 +947 +948 +949 +950 +951 +952 +953 +954 +955 +956 +957 +958 +959 |
|
create_setup_string(model)
+
+¶Create a string which can be used to reinstantiate the given model.
+model : Any
+str
+ +openml_tensorflow/extension.py
296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 |
|
flow_to_model(flow, initialize_with_defaults=False)
+
+¶Initializes a Keras model based on a flow.
+flow : mixed + the object to deserialize (can be flow object, or any serialized + parameter value that is accepted by)
+ + +If this flag is set, the hyperparameter values of flows will be +ignored and a flow with its defaults is returned.
+mixed
+ +openml_tensorflow/extension.py
94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 |
|
get_version_information()
+
+¶List versions of libraries required by the flow.
+Libraries listed are Python
, tensorflow
, numpy
and scipy
.
List
+ +openml_tensorflow/extension.py
273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 |
|
instantiate_model_from_hpo_class(model, trace_iteration)
+
+¶Instantiate a base_estimator
which can be searched over by the hyperparameter
+optimization model (UNUSED)
model : Any + A hyperparameter optimization model which defines the model to be instantiated. +trace_iteration : OpenMLTraceIteration + Describing the hyperparameter settings to instantiate.
+Any
+ +openml_tensorflow/extension.py
1085 +1086 +1087 +1088 +1089 +1090 +1091 +1092 +1093 +1094 +1095 +1096 +1097 +1098 +1099 +1100 +1101 +1102 +1103 +1104 +1105 |
|
is_estimator(model)
+
+¶Check whether the given model is a Keras neural network.
+This function is only required for backwards compatibility and will be removed in the +near future.
+model : Any
+bool
+ +openml_tensorflow/extension.py
651 +652 +653 +654 +655 +656 +657 +658 +659 +660 +661 +662 +663 +664 +665 |
|
model_to_flow(model)
+
+¶Transform a Keras model to a flow for uploading it to OpenML.
+model : Any
+OpenMLFlow
+ +openml_tensorflow/extension.py
216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 |
|
obtain_parameter_values(flow, model=None)
+
+¶Extracts all parameter settings required for the flow from the model.
+If no explicit model is provided, the parameters will be extracted from flow.model
+instead.
flow : OpenMLFlow + OpenMLFlow object (containing flow ids, i.e., it has to be downloaded from the server)
+ + +The model from which to obtain the parameter values. Must match the flow signature.
+If None, use the model specified in OpenMLFlow.model
.
list
+ A list of dicts, where each dict has the following entries:
+ - oml:name
: str: The OpenML parameter name
+ - oml:value
: mixed: A representation of the parameter value
+ - oml:component
: int: flow id to which the parameter belongs
openml_tensorflow/extension.py
961 + 962 + 963 + 964 + 965 + 966 + 967 + 968 + 969 + 970 + 971 + 972 + 973 + 974 + 975 + 976 + 977 + 978 + 979 + 980 + 981 + 982 + 983 + 984 + 985 + 986 + 987 + 988 + 989 + 990 + 991 + 992 + 993 + 994 + 995 + 996 + 997 + 998 + 999 +1000 +1001 +1002 +1003 +1004 +1005 +1006 +1007 +1008 +1009 +1010 +1011 +1012 +1013 +1014 +1015 +1016 +1017 +1018 +1019 +1020 +1021 +1022 +1023 +1024 +1025 +1026 +1027 +1028 +1029 +1030 +1031 +1032 +1033 +1034 +1035 +1036 +1037 +1038 +1039 +1040 +1041 +1042 +1043 +1044 +1045 +1046 +1047 +1048 +1049 +1050 +1051 |
|
seed_model(model, seed=None)
+
+¶Not applied for Keras, since there are no random states in Keras.
+model : keras model + The model to be seeded +seed : int + The seed to initialize the RandomState with. Unseeded subcomponents + will be seeded with a random number from the RandomState.
+Any
+ +openml_tensorflow/extension.py
667 +668 +669 +670 +671 +672 +673 +674 +675 +676 +677 +678 +679 +680 +681 +682 +683 +684 |
|
The docker container has the latest version of OpenML-Tensorflow downloaded and pre-installed. It can be used to run TensorFlow Deep Learning analysis on OpenML datasets. +This document contains information about:
+Usage: how to use the image
+These are the steps to use the image:
+1 |
|
1 |
|
1 |
|
1 |
|
1 |
|
This folder contains examples of how to use the openml-tensorflow
extension for different datasets and models.
%matplotlib inline
+
An example of a Tensorflow network that classifies Meta Album images.
+import openml
+import openml_tensorflow
+from tensorflow.keras.preprocessing.image import ImageDataGenerator
+import tensorflow as tf
+from tensorflow.keras import layers, models
+
openml.config.apikey = 'KEY' # Paste your API key here
+
openml_tensorflow.config.epoch = 1 # small epoch for test runs
+
+datagen = ImageDataGenerator()
+openml_tensorflow.config.datagen = datagen
+openml_tensorflow.config.dir = openml.config.get_cache_directory()+'/datasets/44312/PNU_Micro/images/'
+openml_tensorflow.config.x_col = "FILE_NAME"
+openml_tensorflow.config.y_col = 'encoded_labels'
+openml_tensorflow.config.datagen = datagen
+openml_tensorflow.config.batch_size = 32
+openml_tensorflow.config.class_mode = "categorical"
+
+# Perform cross-validation during traning
+openml_tensorflow.config.perform_validation = True
+openml_tensorflow.config.validation_split = 0.1
+openml_tensorflow.config.datagen_valid = ImageDataGenerator()
+
+IMG_SIZE = (128, 128)
+IMG_SHAPE = IMG_SIZE + (3,)
+
+# Example tensorflow image classification model.
+model = models.Sequential()
+model.add(layers.Conv2D(128, (3, 3), activation='relu', input_shape=IMG_SHAPE))
+model.add(layers.MaxPooling2D((2, 2)))
+model.add(layers.Conv2D(64, (3, 3), activation='relu'))
+model.add(layers.MaxPooling2D((2, 2)))
+model.add(layers.Conv2D(64, (3, 3), activation='relu'))
+model.add(layers.Flatten())
+model.add(layers.Dense(64, activation='relu'))
+model.add(layers.Dense(84, activation='relu'))
+model.add(layers.Dense(19, activation='softmax')) # Adjust output size
+model.compile(optimizer='adam',
+ loss='categorical_crossentropy',
+ metrics=['AUC'])
+
# Download the OpenML task for the Meta_Album_PNU_Micro dataset.
+task = openml.tasks.get_task(362071)
+
+# Run the Keras model on the task (requires an API key).
+run = openml.runs.run_model_on_task(model, task, avoid_duplicate_runs=False)
+
If you want to publish the run with the onnx file, +then you must call openml_tensorflow.add_onnx_to_run() immediately before run.publish(). +When you publish, onnx file of last trained model is uploaded. +Careful to not call this function when another run_model_on_task is called in between, +as during publish later, only the last trained model (from last run_model_on_task call) is uploaded.
+run = openml_tensorflow.add_onnx_to_run(run)
+
+run.publish()
+
+print('URL for run: %s/run/%d?api_key=%s' % (openml.config.server, run.run_id, openml.config.apikey))
+
Optional: Visualize model in netron
+from urllib.request import urlretrieve
+
+published_run = openml.runs.get_run(run.run_id)
+url = 'https://api.openml.org/data/download/{}/model.onnx'.format(published_run.output_files['onnx_model'])
+
+file_path, _ = urlretrieve(url, 'model.onnx')
+
+import netron
+# Visualize the ONNX model using Netron
+netron.start(file_path)
+
%matplotlib inline
+
An example of a tensorflow network that classifies IndoorScenes images into 67
classes using tensorflow Sequential
model.
import os
+import logging
+os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
+os.environ['ABSL_MIN_LOG_LEVEL'] = '3'
+# logging.getLogger('tensorflow').setLevel(logging.ERROR)
+
+import tensorflow
+
+import openml
+import openml_tensorflow
+import pandas as pd
+from sklearn import preprocessing
+from tensorflow.keras.preprocessing.image import ImageDataGenerator
+import tensorflow as tf
+from tensorflow.keras import datasets, layers, models
+import logging
+from keras import regularizers
+
+import warnings
+warnings.simplefilter(action='ignore', category=FutureWarning)
+
+import pandas as pd
+pd.options.mode.chained_assignment = None # default='warn'
+
Enable logging in order to observe the progress while running the example.
+openml.config.logger.setLevel(logging.DEBUG)
+
openml.config.apikey = 'KEY'
+
openml_tensorflow.config.epoch = 1 # small epoch for test runs
+
+IMG_SIZE = (128, 128)
+IMG_SHAPE = IMG_SIZE + (3,)
+base_learning_rate = 0.0001
+
+# Toy example
+datagen = ImageDataGenerator(
+ rotation_range=25,
+ width_shift_range=0.01,
+ height_shift_range=0.01,
+ brightness_range=(0.9, 1.1),
+ zoom_range=0.1,
+ horizontal_flip=True,
+ vertical_flip=True,
+)
+
+openml_tensorflow.config.datagen = datagen
+openml_tensorflow.config.dir = openml.config.get_cache_directory()+'/datasets/45936/Images/'
+openml_tensorflow.config.x_col = "Filename"
+openml_tensorflow.config.y_col = 'Class_encoded'
+openml_tensorflow.config.datagen = datagen
+openml_tensorflow.config.batch_size = 32
+openml_tensorflow.config.class_mode = "categorical"
+openml_tensorflow.config.perform_validation = True
+
+kwargs = {
+ 'callbacks': tf.keras.callbacks.EarlyStopping(monitor='auc', patience=5),
+ 'verbose': 2
+}
+openml_tensorflow.config.kwargs = kwargs
+
Large CNN
+IMG_SIZE = 128
+NUM_CLASSES = 67
+
+# Example tensorflow image classification model. You can do better :)
+model = models.Sequential()
+
+# 4 VGG-like CNN blocks
+model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same',
+ input_shape=(IMG_SIZE, IMG_SIZE, 3)))
+model.add(layers.BatchNormalization())
+model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))
+model.add(layers.BatchNormalization())
+model.add(layers.MaxPooling2D((2, 2)))
+model.add(layers.Dropout(0.2))
+
+
+model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same'))
+model.add(layers.BatchNormalization())
+model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same'))
+model.add(layers.BatchNormalization())
+model.add(layers.MaxPooling2D((2, 2)))
+model.add(layers.Dropout(0.3))
+
+
+model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same'))
+model.add(layers.BatchNormalization())
+model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same'))
+model.add(layers.BatchNormalization())
+model.add(layers.MaxPooling2D((2, 2)))
+model.add(layers.Dropout(0.4))
+
+model.add(layers.Conv2D(256, (3, 3), activation='relu', padding='same'))
+model.add(layers.BatchNormalization())
+model.add(layers.Conv2D(256, (3, 3), activation='relu', padding='same'))
+model.add(layers.BatchNormalization())
+model.add(layers.MaxPooling2D((2, 2)))
+model.add(layers.Dropout(0.5))
+
+# Pooling and one dense layer + output layer
+model.add(layers.GlobalAveragePooling2D())
+model.add(layers.Dense(192, activation='relu', kernel_regularizer=regularizers.L2(1e-4)))
+model.add(layers.BatchNormalization())
+model.add(layers.Dropout(0.25))
+model.add(layers.Dense(NUM_CLASSES, activation='softmax'))
+
+model.compile(
+ optimizer='adam',
+ loss='categorical_crossentropy',
+ metrics=['AUC'])
+
# Download the OpenML task for the Indoorscenes dataset.
+
+# task = openml.tasks.get_task(362065)
+task = openml.tasks.get_task(362070)
+
+# Run the Keras model on the task (requires an API key).
+run = openml.runs.run_model_on_task(model, task, avoid_duplicate_runs=False)
+
+# If you want to publish the run with the onnx file,
+# then you must call openml_tensorflow.add_onnx_to_run() immediately before run.publish().
+# When you publish, onnx file of last trained model is uploaded.
+# Careful to not call this function when another run_model_on_task is called in between,
+# as during publish later, only the last trained model (from last run_model_on_task call) is uploaded.
+run = openml_tensorflow.add_onnx_to_run(run)
+
+run.publish()
+
+print('URL for run: %s/run/%d?api_key=%s' % (openml.config.server, run.run_id, openml.config.apikey))
+
# Visualize model in netron
+
+from urllib.request import urlretrieve
+
+published_run = openml.runs.get_run(run.run_id)
+url = 'https://api.openml.org/data/download/{}/model.onnx'.format(published_run.output_files['onnx_model'])
+
+file_path, _ = urlretrieve(url, 'model.onnx')
+
+import netron
+# Visualize the ONNX model using Netron
+netron.start(file_path)
+
%matplotlib inline
+
This example demonstrates how to build and train a TensorFlow network that classifies images from the Meta Album Images dataset on OpenML.
+The model runs independently and can be used as a sanity check to compare results generated using openml.runs.run_model_on_task
.
import openml
+import openml_tensorflow
+
+import os
+import pandas as pd
+from tensorflow.keras.preprocessing.image import ImageDataGenerator
+import tensorflow as tf
+from tensorflow.keras import datasets, layers, models
+import logging
+
+from sklearn.model_selection import train_test_split
+
+import warnings
+warnings.simplefilter(action='ignore', category=FutureWarning)
+warnings.simplefilter(action='ignore', category=UserWarning)
+warnings.simplefilter(action='ignore', category=RuntimeWarning)
+warnings.simplefilter(action='ignore')
+
+import pandas as pd
+pd.options.mode.chained_assignment = None # default='warn'
+
Enable logging in order to observe the progress while running the example.
+openml.config.logger.setLevel(logging.DEBUG)
+
openml.config.apikey = 'KEY'
+
openml_tensorflow.config.epoch = 1 # small epoch for test runs
+
+IMG_SIZE = (128, 128)
+IMG_SHAPE = IMG_SIZE + (3,)
+base_learning_rate = 0.0001
+
+datagen = ImageDataGenerator()
+openml_tensorflow.config.datagen = datagen
+openml_tensorflow.config.dir = openml.config.get_cache_directory()+'/datasets/44312/PNU_Micro/images/'
+openml_tensorflow.config.x_col = "FILE_NAME"
+openml_tensorflow.config.y_col = 'encoded_labels'
+openml_tensorflow.config.datagen = datagen
+openml_tensorflow.config.batch_size = 32
+openml_tensorflow.config.class_mode = "categorical"
+
+data_augmentation = tf.keras.Sequential([
+ layers.RandomFlip("horizontal_and_vertical"),
+ layers.RandomRotation(0.2),
+])
+
+# Example tensorflow image classification model. You can do better :)
+model = models.Sequential()
+model.add(layers.Conv2D(128, (3, 3), activation='relu', input_shape=(128, 128, 3)))
+model.add(layers.MaxPooling2D((2, 2)))
+model.add(layers.Conv2D(64, (3, 3), activation='relu'))
+model.add(layers.MaxPooling2D((2, 2)))
+model.add(layers.Conv2D(64, (3, 3), activation='relu'))
+model.add(layers.Flatten())
+model.add(layers.Dense(64, activation='relu'))
+model.add(layers.Dense(84, activation='relu'))
+model.add(layers.Dense(67, activation='softmax')) # Adjust output size
+model.compile(optimizer='adam',
+ loss='categorical_crossentropy',
+ metrics=['accuracy'])
+
+# task = openml.tasks.get_task(362071)
+
+
+
+openml_dataset = openml.datasets.get_dataset(45923, download_all_files=True)
+df, *_ = openml_dataset.get_data()
+
+# Local directory with the images
+data_dir = os.path.join(os.path.dirname(openml_dataset.data_file), "Images")
+
+# Splitting the data
+df_train, df_valid = train_test_split(df, test_size=0.1, random_state=42, stratify=df['Class_name'])
+
+datagen_train = ImageDataGenerator() # You can add data augmentation options here.
+train_generator = datagen_train.flow_from_dataframe(dataframe=df_train,
+ directory=data_dir,
+ x_col="Filename", y_col="Class_encoded",
+ class_mode="categorical",
+ target_size=(IMG_SIZE, IMG_SIZE),
+ batch_size=32)
+
+history = model.fit(train_generator, steps_per_epoch=openml_tensorflow.config.step_per_epoch,
+ batch_size=openml_tensorflow.config.batch_size, epochs=openml_tensorflow.config.epoch, verbose=1)
+learning_curves = history.history
+
%matplotlib inline
+
An example of a tensorflow network that classifies Indoor Scenes images using pre-trained transformer model. +Here some layers of the pre-trained model are trained while other layers are frozen.
+import logging
+import os
+os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
+os.environ['ABSL_MIN_LOG_LEVEL'] = '3'
+logging.getLogger('tensorflow').setLevel(logging.ERROR)
+
+import openml
+import openml_tensorflow
+
+import pandas as pd
+from tensorflow.keras.preprocessing.image import ImageDataGenerator
+import tensorflow as tf
+from tensorflow.keras import layers, models
+from tensorflow.keras.applications import EfficientNetV2B3
+
+import warnings
+warnings.simplefilter(action='ignore')
+
+import pandas as pd
+pd.options.mode.chained_assignment = None # default='warn'
+
Enable logging in order to observe the progress while running the example.
+openml.config.logger.setLevel(logging.DEBUG)
+
openml.config.apikey = 'KEY'
+
openml_tensorflow.config.epoch = 1 # small epoch for test runs
+
+IMG_SIZE = (128, 128)
+IMG_SHAPE = IMG_SIZE + (3,)
+base_learning_rate = 0.0001
+
+# datagen = ImageDataGenerator(
+# rotation_range=20, # Degree range for random rotations
+# width_shift_range=0.2, # Fraction of total width for random horizontal shifts
+# height_shift_range=0.2, # Fraction of total height for random vertical shifts
+# shear_range=0.2, # Shear intensity (shear angle in radians)
+# zoom_range=0.2, # Random zoom range
+# horizontal_flip=True, # Randomly flip inputs horizontally
+# fill_mode='nearest',
+# validation_split=0.2
+# )
+
+datagen = ImageDataGenerator()
+openml_tensorflow.config.datagen = datagen
+
+openml_tensorflow.config.dir = openml.config.get_cache_directory()+'/datasets/45923/Images/'
+# openml_tensorflow.config.dir = 'dataset/Images'
+openml_tensorflow.config.x_col = "Filename"
+openml_tensorflow.config.y_col = 'Class_encoded'
+openml_tensorflow.config.datagen = datagen
+openml_tensorflow.config.batch_size = 2
+openml_tensorflow.config.class_mode = "categorical"
+openml_tensorflow.config.perform_validation = True
+
+kwargs = {
+ 'callbacks': tf.keras.callbacks.EarlyStopping(monitor='loss', patience=0),
+ 'verbose': 2
+}
+openml_tensorflow.config.kwargs = kwargs
+
+IMG_SIZE = 128
+NUM_CLASSES = 67
+
+def build_model():
+
+ dropout_rate = 0.6
+
+ base = EfficientNetV2B3(
+ include_top=False,
+ weights="imagenet",
+ pooling=None)
+ count = 0
+ count_trainable = 0
+ for layer in base.layers:
+ if count >= len(base.layers) - 10:
+ layer.trainable = True
+ count_trainable += 1
+ else:
+ layer.trainable = False
+ count += 1
+
+ inputs = layers.Input(shape=(IMG_SIZE, IMG_SIZE, 3))
+ x = base(inputs, training=False)
+ x = layers.GlobalAveragePooling2D()(x)
+ x = layers.Dense(512, activation='relu')(x)
+ x = layers.Dropout(dropout_rate)(x)
+ x = layers.Dense(256, activation='relu')(x)
+ x = layers.Dropout(dropout_rate)(x)
+ outputs = layers.Dense(NUM_CLASSES, activation='softmax')(x)
+ model = models.Model(inputs=inputs, outputs=outputs)
+ model.compile(optimizer='adam',
+ loss='categorical_crossentropy', # Ensure that you're passing it as a string
+ metrics=['accuracy'])
+ print(count_trainable)
+ return model
+
# Download the OpenML task for the Indoorscenes dataset.
+
+# task = openml.tasks.get_task(362065)# 10 fold cross validation
+task = openml.tasks.get_task(362070)# 3 fold cross validation
+
+model = build_model()
+
+# Run the model on the task (requires an API key).
+run = openml.runs.run_model_on_task(model, task, avoid_duplicate_runs=False)
+
+# If you want to publish the run with the onnx file,
+# then you must call openml_tensorflow.add_onnx_to_run() immediately before run.publish().
+# When you publish, onnx file of last trained model is uploaded.
+# Careful to not call this function when another run_model_on_task is called in between,
+# as during publish later, only the last trained model (from last run_model_on_task call) is uploaded.
+run = openml_tensorflow.add_onnx_to_run(run)
+
+run.publish()
+
+print('URL for run: %s/run/%d?api_key=%s' % (openml.config.server, run.run_id, openml.config.apikey))
+
# Visualize model in netron
+
+from urllib.request import urlretrieve
+
+published_run = openml.runs.get_run(run.run_id)
+url = 'https://api.openml.org/data/download/{}/model.onnx'.format(published_run.output_files['onnx_model'])
+
+file_path, _ = urlretrieve(url, 'model.onnx')
+
+import netron
+# Visualize the ONNX model using Netron
+netron.start(file_path)
+
+# URL for run: https://www.openml.org/api/v1/xml/run/10594206
+
%matplotlib inline
+
An example of a tensorflow pre-trained network that classifies indoor scenes images, where all layers are trained. +For smaller datasets or datasets similar to the dataset the base model was trained on, +it is advisable to freeze pre-trained network, and only train custom layers.
+import openml
+import openml_tensorflow
+import pandas as pd
+from tensorflow.keras.preprocessing.image import ImageDataGenerator
+import tensorflow as tf
+from tensorflow.keras.applications import EfficientNetB0
+from tensorflow.keras import optimizers, Model
+from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
+
openml.config.apikey = 'KEY' # Paste your API key here
+
openml_tensorflow.config.epoch = 1 # small epoch for test runs
+
+datagen = ImageDataGenerator()
+openml_tensorflow.config.datagen = datagen
+openml_tensorflow.config.dir = openml.config.get_cache_directory()+'/datasets/45923/Images/'
+openml_tensorflow.config.x_col = "Filename"
+openml_tensorflow.config.y_col = 'Class_encoded'
+openml_tensorflow.config.datagen = datagen
+openml_tensorflow.config.batch_size = 2
+openml_tensorflow.config.class_mode = "categorical"
+openml_tensorflow.config.perform_validation = True
+
+kwargs = {
+ 'callbacks': tf.keras.callbacks.EarlyStopping(monitor='loss', patience=0),
+ 'verbose': 2
+}
+openml_tensorflow.config.kwargs = kwargs
+
+IMG_SIZE = 128
+NUM_CLASSES = 67
+base_learning_rate = 0.0001
+
+# Example pre-trained model
+base_model = EfficientNetB0(input_shape=(IMG_SIZE, IMG_SIZE, 3),
+ weights="imagenet",
+ include_top=False)
+x = base_model.output
+x = GlobalAveragePooling2D()(x)
+predictions = Dense(NUM_CLASSES, activation='softmax')(x)
+model = Model(inputs=base_model.input, outputs=predictions)
+model.compile(optimizers.Adam(learning_rate=4e-4),
+ loss='categorical_crossentropy',
+ metrics=['AUC'])
+
# Download the OpenML task for the Indoor Scenes dataset.
+task = openml.tasks.get_task(362070)# 3 fold cross validation
+
+model = model
+
+# Run the Keras model on the task (requires an API key).
+run = openml.runs.run_model_on_task(model, task, avoid_duplicate_runs=False)
+
Note: If you want to publish the run with the onnx file, +then you must call openml_tensorflow.add_onnx_to_run() immediately before run.publish(). +When you publish, onnx file of last trained model is uploaded. +Careful to not call this function when another run_model_on_task is called in between, +as during publish later, only the last trained model (from last run_model_on_task call) is uploaded.
+run = openml_tensorflow.add_onnx_to_run(run)
+
+run.publish()
+
+print('URL for run: %s/run/%d?api_key=%s' % (openml.config.server, run.run_id, openml.config.apikey))
+
Optional: Visualize model in netron
+from urllib.request import urlretrieve
+
+published_run = openml.runs.get_run(run.run_id)
+url = 'https://api.openml.org/data/download/{}/model.onnx'.format(published_run.output_files['onnx_model'])
+
+file_path, _ = urlretrieve(url, 'model.onnx')
+
+import netron
+# Visualize the ONNX model using Netron
+netron.start(file_path)
+