From 200f6d292bd8634c6e4eb8c346e832b242dd37ae Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Fri, 29 Mar 2024 16:42:09 +0100 Subject: [PATCH 001/147] tg - trame page et premiers graphs sur page data --- dashboards/app/pages/data.py | 260 ++++++++++++++++++++++++++++++++--- 1 file changed, 238 insertions(+), 22 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index c1bd247..8f76a15 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -2,13 +2,22 @@ import altair as alt import pandas as pd import duckdb +import plotly.express as px + + +# Page setting : wide layout +st.set_page_config( + layout="wide", page_title="Dashboard Zéro Déchet Sauvage : onglet Data" +) st.markdown( """# 🔎 Data -*Quels impacts sur les milieux naturels et quels sont les comportements/ usages / Secteurs economiques à l’origine de cette pollution / macrodechets ?* +Visualisez les impacts sur les milieux naturels et secteurs/filières/marques à l’origine de cette pollution """ ) +# Import des données + df_nb_dechet = pd.read_csv( ( "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" @@ -25,25 +34,232 @@ ) ) -res_aggCategory_filGroup = duckdb.query( - ( - "SELECT categorie, sum(nb_dechet) AS total_dechet " - "FROM df_nb_dechet " - "WHERE type_regroupement = 'GROUPE' " - "GROUP BY categorie " - "HAVING sum(nb_dechet) > 10000 " - "ORDER BY total_dechet DESC;" - ) -).to_df() - -# st.bar_chart(data=res_aggCategory_filGroup, x="categorie", y="total_dechet") - -st.altair_chart( - alt.Chart(res_aggCategory_filGroup) - .mark_bar() - .encode( - x=alt.X("categorie", sort=None, title=""), - y=alt.Y("total_dechet", title="Total de déchet"), - ), - use_container_width=True, + +# 3 Onglets : Matériaux, Top déchets, Filières et marques +tab1, tab2, tab3 = st.tabs( + [ + "Matériaux :wood:", + "Top Déchets :wastebasket:", + "Secteurs et marques :womans_clothes:", + ] ) + +# Onglet 1 : Matériaux +with tab1: + + # Transformation du dataframe pour les graphiques + # Variables à conserver en ligne + cols_identifiers = [ + "ANNEE", + "TYPE_MILIEU", + "INSEE_COM", + "DEP", + "REG", + "EPCI", + "BV2022", + ] + + # variables à décroiser de la base de données correspondant aux Volume global de chaque matériau + cols_volume = [k for k in df_other.columns if "GLOBAL_VOLUME_" in k] + + # Copie des données pour transfo + df_volume = df_other.copy() + + # Calcul des indicateurs clés de haut de tableau avant transformation + volume_total = df_volume["VOLUME_TOTAL"].sum() + poids_total = df_volume["POIDS_TOTAL"].sum() + volume_total_categorise = df_volume[cols_volume].sum().sum() + pct_volume_cateforise = volume_total_categorise / volume_total + nb_collectes = len(df_volume) + + # Dépivotage du tableau pour avoir une base de données exploitable + df_volume = df_volume.melt( + id_vars=cols_identifiers, + value_vars=cols_volume, + var_name="Matériau", + value_name="Volume", + ) + + # Nettoyer le nom du Type déchet pour le rendre plus lisible + df_volume["Matériau"] = ( + df_volume["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() + ) + + # Grouper par type de matériau pour les visualisations + df_totals_sorted = df_volume.groupby(["Matériau"], as_index=False)["Volume"].sum() + df_totals_sorted = df_totals_sorted.sort_values(["Volume"], ascending=False) + + # Charte graphique MERTERRE : + colors_map = { + "Plastique": "#48BEF0", + "Caoutchouc": "#364E74", + "Bois": "#673C11", + "Textile": "#C384B1", + "Papier": "#CAA674", + "Metal": "#A0A0A0", + "Verre": "#3DCE89", + "Autre": "#F3B900", + } + + # Ligne 0 : Filtres géographiques + l0_col1, l0_col2 = st.columns(2) + filtre_niveaugeo = l0_col1.selectbox( + "Niveau géo", ["Région", "Département", "EPCI", "Commune", "Bassin de vie"] + ) + filtre_lieu = l0_col2.selectbox("Territoire", ["Ter1", "Ter2"]) + + # Ligne 1 : 2 cellules avec les indicateurs clés en haut de page + l1_col1, l1_col2, l1_col3 = st.columns(3) + + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + + # 1ère métrique : volume total de déchets collectés + cell1 = l1_col1.container(border=True) + cell1.metric("Volume de déchets collectés", f"{volume_total:.0f} litres") + + # 2ème métrique : poids + cell2 = l1_col2.container(border=True) + cell2.metric("Poids total collecté", f"{poids_total:.0f} kg") + + # 3ème métrique : nombre de relevés + cell3 = l1_col3.container(border=True) + cell3.metric("Nombre de collectes réalisées", f"{nb_collectes}") + + # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux + l2_col1, l2_col2 = st.columns(2) + with l2_col1: + + # Création du diagramme en donut en utilisant le dictionnaire de couleurs pour la correspondance + fig = px.pie( + df_totals_sorted, + values="Volume", + names="Matériau", + title="Répartition des matériaux en volume", + hole=0.4, + color="Matériau", # Utilisation de 'index' pour le mappage des couleurs + color_discrete_map=colors_map, + ) # Application du dictionnaire de mappage de couleurs + + # Amélioration de l'affichage + fig.update_traces(textinfo="percent") + fig.update_layout(autosize=True, legend_title_text="Matériau") + + # Affichage du graphique + st.plotly_chart(fig, use_container_width=True) + + st.write( + f"Cette analyse se base sur les déchets qui ont pu être classés par matériau : {volume_total_categorise:.0f} Litres, soit {pct_volume_cateforise:.0%} du volume total" + ) + + with l2_col2: + # Création du graphique en barres avec Plotly Express + fig2 = px.bar( + df_totals_sorted, + x="Matériau", + y="Volume", + text="Volume", + title="Volume total par materiau (en litres)", + color="Matériau", + color_discrete_map=colors_map, + ) + + # Amélioration du graphique + fig2.update_traces(texttemplate="%{text:.2s}", textposition="outside") + fig2.update_layout( + autosize=True, + uniformtext_minsize=8, + uniformtext_mode="hide", + xaxis_tickangle=90, + showlegend=False, + ) + + # Affichage du graphique + st.plotly_chart(fig2, use_container_width=True) + + st.divider() + + # Ligne 3 : Graphe par milieu de collecte + st.write("**Volume collecté par matériau en fonction du milieu de collecte**") + + # Part de volume collecté par type de milieu + + # Grouper par année et type de matériau + df_typemilieu = df_volume.groupby(["TYPE_MILIEU", "Matériau"], as_index=False)[ + "Volume" + ].sum() + df_typemilieu = df_typemilieu.sort_values( + ["TYPE_MILIEU", "Volume"], ascending=False + ) + + # Graphique à barre empilées du pourcentage de volume collecté par an et type de matériau + fig3 = px.histogram( + df_typemilieu, + x="TYPE_MILIEU", + y="Volume", + color="Matériau", + barnorm="percent", + title="Répartition des matériaux en fonction du milieu de collecte", + text_auto=False, + color_discrete_map=colors_map, + ) + + fig3.update_layout(bargap=0.2) + fig3.update_layout(yaxis_title="% du volume collecté", xaxis_title=None) + + # Afficher le graphique + st.plotly_chart(fig3, use_container_width=True) + + st.divider() + + # Ligne 3 : Graphe par milieu de collecte + st.write("**Détail par milieu, lieu ou année**") + l3_col1, l3_col2, l3_col3 = st.columns(3) + filtre_milieu = l3_col1.selectbox("Milieu", ["Test 1", "Test_2"], index=None) + filtre_lieu = l3_col2.selectbox("Lieu", ["Lieu 1", "Lieu 2"], index=None) + filtre_annee = l3_col3.selectbox("Année", [2020, 2021], index=None) + + # Ligne 4 : donut filtré et table de données + l4_col1, l4_col2 = st.columns(2) + with l4_col1: + st.markdown("""**Répartition des matériaux collectés (% volume)**""") + + with l4_col2: + st.markdown("""Table de données""") + + +# Onglet 2 : Top Déchets +with tab2: + st.markdown( + """## Quels sont les types de déchets les plus présents sur votre territoire ? + """ + ) + res_aggCategory_filGroup = duckdb.query( + ( + "SELECT categorie, sum(nb_dechet) AS total_dechet " + "FROM df_nb_dechet " + "WHERE type_regroupement = 'GROUPE' " + "GROUP BY categorie " + "HAVING sum(nb_dechet) > 10000 " + "ORDER BY total_dechet DESC;" + ) + ).to_df() + + # st.bar_chart(data=res_aggCategory_filGroup, x="categorie", y="total_dechet") + + st.altair_chart( + alt.Chart(res_aggCategory_filGroup) + .mark_bar() + .encode( + x=alt.X("categorie", sort=None, title=""), + y=alt.Y("total_dechet", title="Total de déchet"), + ), + use_container_width=True, + ) + + +# Onglet 3 : Secteurs et marques +with tab3: + st.markdown( + """## Quels sont les secteurs, filières et marques les plus représentés ? + """ + ) From 5ed8874bdd4058a2eea89af87805f247eff90f24 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Mon, 1 Apr 2024 16:48:16 +0200 Subject: [PATCH 002/147] avancee deshboard --- dashboards/app/pages/data.py | 13 +++++++++++++ dashboards/app/pages/style.css | 12 ++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 dashboards/app/pages/style.css diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 8f76a15..5ff3578 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -3,6 +3,8 @@ import pandas as pd import duckdb import plotly.express as px +import folium +from folium import IFrame # Page setting : wide layout @@ -228,6 +230,17 @@ # Onglet 2 : Top Déchets + +# Préparation des datas pour l'onglet 2 +df_top = df_nb_dechet.copy() +df_top_data_releves = df_other.copy() +# Filtration sur les type-regroupement selection dechets "GROUPE" uniquement +df_top_dechet_milieu = df_top[df_top["type_regroupement"].isin(['GROUPE'])] +#Ajout du type milieu et lieu + + + + with tab2: st.markdown( """## Quels sont les types de déchets les plus présents sur votre territoire ? diff --git a/dashboards/app/pages/style.css b/dashboards/app/pages/style.css new file mode 100644 index 0000000..e1ce9cd --- /dev/null +++ b/dashboards/app/pages/style.css @@ -0,0 +1,12 @@ + +/* Card */ +div.css-1r6slb0.e1tzin5v2 { + background-color: #FFFFFF; + border: 1px solid #CCCCCC; + padding: 5% 5% 5% 10%; + border-radius: 5px; + + border-left: 0.5rem solid #1951A0 !important; + box-shadow: 0 0.15rem 1.75rem 0 rgba(58, 59, 69, 0.15) !important; + +} \ No newline at end of file From 188a0046cd782baf4ee66ddaa771dad07b0dc711 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Mon, 1 Apr 2024 16:50:30 +0200 Subject: [PATCH 003/147] avancee dashboard --- dashboards/app/requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index 28dbd01..77eceb6 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -1,3 +1,5 @@ pandas==2.0.3 duckdb==0.10.0 streamlit==1.32.2 +folium==0.15.1 +plotly==5.19.0 From a7d8d3efde99805d4af50461fc25127b55666741 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Mon, 1 Apr 2024 17:50:04 +0200 Subject: [PATCH 004/147] =?UTF-8?q?avanc=C3=A9e=20dashboard?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 5ff3578..5b11f76 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -46,6 +46,17 @@ ] ) +# Creation des dictionnaires pour filtration des graphiques: +collectivites_dict = { + "REGION": df_other["REGION"].unique().tolist(), + "DEPARTEMENT": df_other["DEPARTEMENT"].unique().tolist(), + "EPCI": df_other["EPCI"].unique().tolist(), + "Commune": df_other["INSEE_COM"].unique().tolist(), # Assuming 'Commune' refers to the 'INSEE_COM' column + "Bassin de vie": df_other["BASSIN_DE_VIE"].unique().tolist() +} + +milieu_lieu_dict = {} + # Onglet 1 : Matériaux with tab1: @@ -236,8 +247,10 @@ df_top_data_releves = df_other.copy() # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement df_top_dechet_milieu = df_top[df_top["type_regroupement"].isin(['GROUPE'])] -#Ajout du type milieu et lieu - +# Group by 'categorie', sum 'nb_dechet', et top 10 +df_top10_dechets = df_dechets_groupe.groupby("categorie").agg({"nb_dechet": "sum"}).sort_values(by="nb_dechet", ascending=False).head(10) +# recuperation de ces 10 dechets dans une liste pour filtration bubble map +noms_top10_dechets = df_top10_dechets.index.tolist() From 97df2ad897ff2cbf3449ca05db69c9ce4eaa3cd3 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Mon, 1 Apr 2024 19:16:13 +0200 Subject: [PATCH 005/147] =?UTF-8?q?avanc=C3=A9e=20dashboard=20onglet=20dat?= =?UTF-8?q?a=20streamlit?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 123 ++++++++++++++++++++++++++++------- 1 file changed, 99 insertions(+), 24 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 5b11f76..f0b6ce8 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -51,11 +51,15 @@ "REGION": df_other["REGION"].unique().tolist(), "DEPARTEMENT": df_other["DEPARTEMENT"].unique().tolist(), "EPCI": df_other["EPCI"].unique().tolist(), - "Commune": df_other["INSEE_COM"].unique().tolist(), # Assuming 'Commune' refers to the 'INSEE_COM' column - "Bassin de vie": df_other["BASSIN_DE_VIE"].unique().tolist() + "Commune": df_other["commune"].unique().tolist(), + "Bassin de vie": df_other["BASSIN_DE_VIE"].unique().tolist(), + "LIB EPCI": df_other["LIBEPCI"].unique().tolist(), + "NATURE EPCI": df_other["NATURE_EPCI"].unique().tolist() } -milieu_lieu_dict = {} +milieu_lieu_dict = df_other.groupby('TYPE_MILIEU')['TYPE_LIEU'].unique().apply(lambda x: x.tolist()).to_dict() + +annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) # Onglet 1 : Matériaux with tab1: @@ -117,7 +121,7 @@ # Ligne 0 : Filtres géographiques l0_col1, l0_col2 = st.columns(2) filtre_niveaugeo = l0_col1.selectbox( - "Niveau géo", ["Région", "Département", "EPCI", "Commune", "Bassin de vie"] + "Niveau géo", ["Région", "Département", "EPCI", "Commune", "Bassin de vie", "LIB EPCI", "NATURE EPCI"] ) filtre_lieu = l0_col2.selectbox("Territoire", ["Ter1", "Ter2"]) @@ -246,11 +250,26 @@ df_top = df_nb_dechet.copy() df_top_data_releves = df_other.copy() # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement -df_top_dechet_milieu = df_top[df_top["type_regroupement"].isin(['GROUPE'])] +df_dechets_groupe = df_top[df_top["type_regroupement"].isin(['GROUPE'])] # Group by 'categorie', sum 'nb_dechet', et top 10 df_top10_dechets = df_dechets_groupe.groupby("categorie").agg({"nb_dechet": "sum"}).sort_values(by="nb_dechet", ascending=False).head(10) # recuperation de ces 10 dechets dans une liste pour filtration bubble map noms_top10_dechets = df_top10_dechets.index.tolist() +# Preparation de la figure barplot +df_top10_dechets.reset_index(inplace=True) +# Création du graphique en barres avec Plotly Express +fig = px.bar(df_top10_dechets, + x='categorie', + y='nb_dechet', + labels={'categorie': 'Dechet', 'nb_dechet': 'Nombre total'}, + title='Top 10 dechets ramassés') + +# Amélioration du graphique pour le rendre plus agréable à regarder +fig.update_traces(texttemplate='%{text:.2s}', textposition='outside') +fig.update_layout( + width=1400, + height=900, + uniformtext_minsize=8, uniformtext_mode='hide', xaxis_tickangle=90) @@ -259,28 +278,84 @@ """## Quels sont les types de déchets les plus présents sur votre territoire ? """ ) - res_aggCategory_filGroup = duckdb.query( - ( - "SELECT categorie, sum(nb_dechet) AS total_dechet " - "FROM df_nb_dechet " - "WHERE type_regroupement = 'GROUPE' " - "GROUP BY categorie " - "HAVING sum(nb_dechet) > 10000 " - "ORDER BY total_dechet DESC;" - ) - ).to_df() +# res_aggCategory_filGroup = duckdb.query( +# ( +# "SELECT categorie, sum(nb_dechet) AS total_dechet " +# "FROM df_nb_dechet " +# "WHERE type_regroupement = 'GROUPE' " +# "GROUP BY categorie " +# "HAVING sum(nb_dechet) > 10000 " +# "ORDER BY total_dechet DESC;" +# ) +# ).to_df() # st.bar_chart(data=res_aggCategory_filGroup, x="categorie", y="total_dechet") - st.altair_chart( - alt.Chart(res_aggCategory_filGroup) - .mark_bar() - .encode( - x=alt.X("categorie", sort=None, title=""), - y=alt.Y("total_dechet", title="Total de déchet"), - ), - use_container_width=True, - ) +# st.altair_chart( +# alt.Chart(res_aggCategory_filGroup) +# .mark_bar() +# .encode( +# x=alt.X("categorie", sort=None, title=""), +# y=alt.Y("total_dechet", title="Total de déchet"), +# ), +# use_container_width=True, +# ) + +with st.container(): + col1, col2 = st.columns([3, 1]) + + with col1: + st.plotly_chart(fig) + + with col2: + st.write("Métriques des déchets") # Titre pour les cartes + for index, row in df_top10_dechets.iterrows(): + st.metric(label=row['categorie'], value=row['nb_dechet']) + +with st.container(): + # Ajout de la selectbox + selected_dechet = st.selectbox("Choisir un type de déchet :", noms_top10_dechets, index=0) + + # Filtration sur le dechet top 10 sélectionné + df_top_map = df_top[df_top["categorie"] == selected_dechet] + + # Création du DataFrame de travail pour la carte + df_map_data = pd.merge(df_top_map, df_top_data_releves, on='ID_RELEVE', how='inner') + + # Création de la carte centrée autour d'une localisation + # Calcul des limites à partir de vos données + min_lat = df_map_data["LIEU_COORD_GPS_Y"].min() + max_lat = df_map_data["LIEU_COORD_GPS_Y"].max() + min_lon = df_map_data["LIEU_COORD_GPS_X"].min() + max_lon = df_map_data["LIEU_COORD_GPS_X"].max() + + map_paca = folium.Map(location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], zoom_start=8, tiles='OpenStreetMap') + + # Facteur de normalisation pour ajuster la taille des bulles + normalisation_facteur = 1000 + + for index, row in df_map_data.iterrows(): + # Application de la normalisation + radius = row['nb_dechet'] / normalisation_facteur + + # Application d'une limite minimale pour le rayon si nécessaire + radius = max(radius, 1) + + folium.CircleMarker( + location=(row['LIEU_COORD_GPS_Y'], row['LIEU_COORD_GPS_X']), + radius=radius, # Utilisation du rayon ajusté + popup=f"{row['NOM_ZONE']}, {row['LIEU_VILLE']}, {row['DATE']} : {row['nb_dechet']} {selected_dechet}", + color='#3186cc', + fill=True, + fill_color='#3186cc' + ).add_to(map_paca) + + # Affichage de la carte Folium dans Streamlit + st_folium = st.components.v1.html + st_folium(folium.Figure().add_child(map_paca).render() + #, width=1400 + , height=1000 + ) # Onglet 3 : Secteurs et marques From 0b212c4c83079a3d371b4e63077af07b9964bf1a Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 3 Apr 2024 17:44:04 +0200 Subject: [PATCH 006/147] tg - add filters --- dashboards/app/pages/data.py | 293 ++++++------ poetry.lock | 851 ++++++++++++++++++++++++++++++++--- pyproject.toml | 4 + 3 files changed, 969 insertions(+), 179 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index f0b6ce8..8f057dd 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -5,7 +5,7 @@ import plotly.express as px import folium from folium import IFrame - +from streamlit_dynamic_filters import DynamicFilters # Page setting : wide layout st.set_page_config( @@ -36,6 +36,15 @@ ) ) +# Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) +df_other["DEP_CODE_NOM"] = df_other["DEP"] + " - " + df_other["DEPARTEMENT"] +df_other["COMMUNE_CODE_NOM"] = df_other["INSEE_COM"] + " - " + df_other["commune"] + + +# Création du filtre dynamique par niveau géographique +niveaux_geo = ["REGION", "DEP_CODE_NOM", "LIBEPCI", "BASSIN_DE_VIE", "COMMUNE_CODE_NOM"] +dynamic_filters = DynamicFilters(df_other, filters=niveaux_geo) +df_other_filtre = dynamic_filters.filter_df() # 3 Onglets : Matériaux, Top déchets, Filières et marques tab1, tab2, tab3 = st.tabs( @@ -48,16 +57,21 @@ # Creation des dictionnaires pour filtration des graphiques: collectivites_dict = { - "REGION": df_other["REGION"].unique().tolist(), - "DEPARTEMENT": df_other["DEPARTEMENT"].unique().tolist(), + "Région": df_other["REGION"].unique().tolist(), + "Département": df_other["DEPARTEMENT"].unique().tolist(), "EPCI": df_other["EPCI"].unique().tolist(), "Commune": df_other["commune"].unique().tolist(), "Bassin de vie": df_other["BASSIN_DE_VIE"].unique().tolist(), "LIB EPCI": df_other["LIBEPCI"].unique().tolist(), - "NATURE EPCI": df_other["NATURE_EPCI"].unique().tolist() + "NATURE EPCI": df_other["NATURE_EPCI"].unique().tolist(), } -milieu_lieu_dict = df_other.groupby('TYPE_MILIEU')['TYPE_LIEU'].unique().apply(lambda x: x.tolist()).to_dict() +milieu_lieu_dict = ( + df_other.groupby("TYPE_MILIEU")["TYPE_LIEU"] + .unique() + .apply(lambda x: x.tolist()) + .to_dict() +) annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) @@ -80,7 +94,7 @@ cols_volume = [k for k in df_other.columns if "GLOBAL_VOLUME_" in k] # Copie des données pour transfo - df_volume = df_other.copy() + df_volume = df_other_filtre.copy() # Calcul des indicateurs clés de haut de tableau avant transformation volume_total = df_volume["VOLUME_TOTAL"].sum() @@ -119,11 +133,14 @@ } # Ligne 0 : Filtres géographiques - l0_col1, l0_col2 = st.columns(2) - filtre_niveaugeo = l0_col1.selectbox( - "Niveau géo", ["Région", "Département", "EPCI", "Commune", "Bassin de vie", "LIB EPCI", "NATURE EPCI"] - ) - filtre_lieu = l0_col2.selectbox("Territoire", ["Ter1", "Ter2"]) + # Popover cell + # with st.popover("Filtres géographiques", help = "Sélectionnez le niveau géographique souhaité pour afficher les indicateurs") : + + dynamic_filters.display_filters(location="sidebar") + # filtre_region = st.selectbox( + # "Région :", collectivites_dict["Région"], + # index=None + # ) # Ligne 1 : 2 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2, l1_col3 = st.columns(3) @@ -132,14 +149,18 @@ # 1ère métrique : volume total de déchets collectés cell1 = l1_col1.container(border=True) - cell1.metric("Volume de déchets collectés", f"{volume_total:.0f} litres") + # Trick pour séparer les milliers + volume_total = f"{volume_total:,.0f}".replace(",", " ") + cell1.metric("Volume de déchets collectés", f"{volume_total} litres") # 2ème métrique : poids cell2 = l1_col2.container(border=True) - cell2.metric("Poids total collecté", f"{poids_total:.0f} kg") + poids_total = f"{poids_total:,.0f}".replace(",", " ") + cell2.metric("Poids total collecté", f"{poids_total} kg") # 3ème métrique : nombre de relevés cell3 = l1_col3.container(border=True) + nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") cell3.metric("Nombre de collectes réalisées", f"{nb_collectes}") # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux @@ -164,10 +185,6 @@ # Affichage du graphique st.plotly_chart(fig, use_container_width=True) - st.write( - f"Cette analyse se base sur les déchets qui ont pu être classés par matériau : {volume_total_categorise:.0f} Litres, soit {pct_volume_cateforise:.0%} du volume total" - ) - with l2_col2: # Création du graphique en barres avec Plotly Express fig2 = px.bar( @@ -193,6 +210,11 @@ # Affichage du graphique st.plotly_chart(fig2, use_container_width=True) + st.write("") + st.caption( + f"Note : Cette analyse se base sur les déchets qui ont pu être classés par matériau : {volume_total_categorise:.0f} Litres, soit {pct_volume_cateforise:.0%} du volume total collecté." + ) + st.divider() # Ligne 3 : Graphe par milieu de collecte @@ -245,122 +267,141 @@ # Onglet 2 : Top Déchets - -# Préparation des datas pour l'onglet 2 -df_top = df_nb_dechet.copy() -df_top_data_releves = df_other.copy() -# Filtration sur les type-regroupement selection dechets "GROUPE" uniquement -df_dechets_groupe = df_top[df_top["type_regroupement"].isin(['GROUPE'])] -# Group by 'categorie', sum 'nb_dechet', et top 10 -df_top10_dechets = df_dechets_groupe.groupby("categorie").agg({"nb_dechet": "sum"}).sort_values(by="nb_dechet", ascending=False).head(10) -# recuperation de ces 10 dechets dans une liste pour filtration bubble map -noms_top10_dechets = df_top10_dechets.index.tolist() -# Preparation de la figure barplot -df_top10_dechets.reset_index(inplace=True) -# Création du graphique en barres avec Plotly Express -fig = px.bar(df_top10_dechets, - x='categorie', - y='nb_dechet', - labels={'categorie': 'Dechet', 'nb_dechet': 'Nombre total'}, - title='Top 10 dechets ramassés') - -# Amélioration du graphique pour le rendre plus agréable à regarder -fig.update_traces(texttemplate='%{text:.2s}', textposition='outside') -fig.update_layout( - width=1400, - height=900, - uniformtext_minsize=8, uniformtext_mode='hide', xaxis_tickangle=90) - - - with tab2: - st.markdown( - """## Quels sont les types de déchets les plus présents sur votre territoire ? - """ + # Préparation des datas pour l'onglet 2 + df_top = df_nb_dechet.copy() + df_top_data_releves = df_other.copy() + # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement + df_dechets_groupe = df_top[df_top["type_regroupement"].isin(["GROUPE"])] + # Group by 'categorie', sum 'nb_dechet', et top 10 + df_top10_dechets = ( + df_dechets_groupe.groupby("categorie") + .agg({"nb_dechet": "sum"}) + .sort_values(by="nb_dechet", ascending=False) + .head(10) + ) + # recuperation de ces 10 dechets dans une liste pour filtration bubble map + noms_top10_dechets = df_top10_dechets.index.tolist() + # Preparation de la figure barplot + df_top10_dechets.reset_index(inplace=True) + # Création du graphique en barres avec Plotly Express + fig = px.bar( + df_top10_dechets, + x="categorie", + y="nb_dechet", + labels={"categorie": "Dechet", "nb_dechet": "Nombre total"}, + title="Top 10 dechets ramassés", ) -# res_aggCategory_filGroup = duckdb.query( -# ( -# "SELECT categorie, sum(nb_dechet) AS total_dechet " -# "FROM df_nb_dechet " -# "WHERE type_regroupement = 'GROUPE' " -# "GROUP BY categorie " -# "HAVING sum(nb_dechet) > 10000 " -# "ORDER BY total_dechet DESC;" -# ) -# ).to_df() + + # Amélioration du graphique pour le rendre plus agréable à regarder + fig.update_traces(texttemplate="%{text:.2s}", textposition="outside") + fig.update_layout( + width=1400, + height=900, + uniformtext_minsize=8, + uniformtext_mode="hide", + xaxis_tickangle=90, + ) + + # st.markdown( + # """## Quels sont les types de déchets les plus présents sur votre territoire ? + # """ + # ) + # res_aggCategory_filGroup = duckdb.query( + # ( + # "SELECT categorie, sum(nb_dechet) AS total_dechet " + # "FROM df_nb_dechet " + # "WHERE type_regroupement = 'GROUPE' " + # "GROUP BY categorie " + # "HAVING sum(nb_dechet) > 10000 " + # "ORDER BY total_dechet DESC;" + # ) + # ).to_df() # st.bar_chart(data=res_aggCategory_filGroup, x="categorie", y="total_dechet") -# st.altair_chart( -# alt.Chart(res_aggCategory_filGroup) -# .mark_bar() -# .encode( -# x=alt.X("categorie", sort=None, title=""), -# y=alt.Y("total_dechet", title="Total de déchet"), -# ), -# use_container_width=True, -# ) + # st.altair_chart( + # alt.Chart(res_aggCategory_filGroup) + # .mark_bar() + # .encode( + # x=alt.X("categorie", sort=None, title=""), + # y=alt.Y("total_dechet", title="Total de déchet"), + # ), + # use_container_width=True, + # ) + + with st.container(): + col1, col2 = st.columns([3, 1]) + + with col1: + st.plotly_chart(fig, use_container_width=True) + + with col2: + st.write("Métriques des déchets") # Titre pour les cartes + for index, row in df_top10_dechets.iterrows(): + value = f"{row['nb_dechet']:,.0f}".replace(",", " ") + st.metric(label=row["categorie"], value=value) + + with st.container(): + # Ajout de la selectbox + selected_dechet = st.selectbox( + "Choisir un type de déchet :", noms_top10_dechets, index=0 + ) -with st.container(): - col1, col2 = st.columns([3, 1]) - - with col1: - st.plotly_chart(fig) - - with col2: - st.write("Métriques des déchets") # Titre pour les cartes - for index, row in df_top10_dechets.iterrows(): - st.metric(label=row['categorie'], value=row['nb_dechet']) - -with st.container(): - # Ajout de la selectbox - selected_dechet = st.selectbox("Choisir un type de déchet :", noms_top10_dechets, index=0) - - # Filtration sur le dechet top 10 sélectionné - df_top_map = df_top[df_top["categorie"] == selected_dechet] - - # Création du DataFrame de travail pour la carte - df_map_data = pd.merge(df_top_map, df_top_data_releves, on='ID_RELEVE', how='inner') - - # Création de la carte centrée autour d'une localisation - # Calcul des limites à partir de vos données - min_lat = df_map_data["LIEU_COORD_GPS_Y"].min() - max_lat = df_map_data["LIEU_COORD_GPS_Y"].max() - min_lon = df_map_data["LIEU_COORD_GPS_X"].min() - max_lon = df_map_data["LIEU_COORD_GPS_X"].max() - - map_paca = folium.Map(location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], zoom_start=8, tiles='OpenStreetMap') - - # Facteur de normalisation pour ajuster la taille des bulles - normalisation_facteur = 1000 - - for index, row in df_map_data.iterrows(): - # Application de la normalisation - radius = row['nb_dechet'] / normalisation_facteur - - # Application d'une limite minimale pour le rayon si nécessaire - radius = max(radius, 1) - - folium.CircleMarker( - location=(row['LIEU_COORD_GPS_Y'], row['LIEU_COORD_GPS_X']), - radius=radius, # Utilisation du rayon ajusté - popup=f"{row['NOM_ZONE']}, {row['LIEU_VILLE']}, {row['DATE']} : {row['nb_dechet']} {selected_dechet}", - color='#3186cc', - fill=True, - fill_color='#3186cc' - ).add_to(map_paca) - - # Affichage de la carte Folium dans Streamlit - st_folium = st.components.v1.html - st_folium(folium.Figure().add_child(map_paca).render() - #, width=1400 - , height=1000 - ) + # Filtration sur le dechet top 10 sélectionné + df_top_map = df_top[df_top["categorie"] == selected_dechet] + + # Création du DataFrame de travail pour la carte + df_map_data = pd.merge( + df_top_map, df_top_data_releves, on="ID_RELEVE", how="inner" + ) + + # Création de la carte centrée autour d'une localisation + # Calcul des limites à partir de vos données + min_lat = df_map_data["LIEU_COORD_GPS_Y"].min() + max_lat = df_map_data["LIEU_COORD_GPS_Y"].max() + min_lon = df_map_data["LIEU_COORD_GPS_X"].min() + max_lon = df_map_data["LIEU_COORD_GPS_X"].max() + + map_paca = folium.Map( + location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], + zoom_start=8, + tiles="OpenStreetMap", + ) + + # Facteur de normalisation pour ajuster la taille des bulles + normalisation_facteur = 1000 + + for index, row in df_map_data.iterrows(): + # Application de la normalisation + radius = row["nb_dechet"] / normalisation_facteur + + # Application d'une limite minimale pour le rayon si nécessaire + radius = max(radius, 1) + + folium.CircleMarker( + location=(row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]), + radius=radius, # Utilisation du rayon ajusté + popup=f"{row['NOM_ZONE']}, {row['LIEU_VILLE']}, {row['DATE']} : {row['nb_dechet']} {selected_dechet}", + color="#3186cc", + fill=True, + fill_color="#3186cc", + ).add_to(map_paca) + + # Affichage de la carte Folium dans Streamlit + st_folium = st.components.v1.html + st_folium( + folium.Figure().add_child(map_paca).render() + # , width=1400 + , + height=1000, + ) # Onglet 3 : Secteurs et marques with tab3: - st.markdown( - """## Quels sont les secteurs, filières et marques les plus représentés ? - """ - ) + st.write("") +# st.markdown( +# """## Quels sont les secteurs, filières et marques les plus représentés ? +# """ +# ) diff --git a/poetry.lock b/poetry.lock index 9bcff55..7e1375a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,10 +1,53 @@ -# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. + +[[package]] +name = "altair" +version = "5.3.0" +description = "Vega-Altair: A declarative statistical visualization library for Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "altair-5.3.0-py3-none-any.whl", hash = "sha256:7084a1dab4d83c5e7e5246b92dc1b4451a6c68fd057f3716ee9d315c8980e59a"}, + {file = "altair-5.3.0.tar.gz", hash = "sha256:5a268b1a0983b23d8f9129f819f956174aa7aea2719ed55a52eba9979b9f6675"}, +] + +[package.dependencies] +jinja2 = "*" +jsonschema = ">=3.0" +numpy = "*" +packaging = "*" +pandas = ">=0.25" +toolz = "*" +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +all = ["altair-tiles (>=0.3.0)", "anywidget (>=0.9.0)", "pyarrow (>=11)", "vega-datasets (>=0.9.0)", "vegafusion[embed] (>=1.6.6)", "vl-convert-python (>=1.3.0)"] +dev = ["geopandas", "hatch", "ipython", "m2r", "mypy", "pandas-stubs", "pytest", "pytest-cov", "ruff (>=0.3.0)", "types-jsonschema", "types-setuptools"] +doc = ["docutils", "jinja2", "myst-parser", "numpydoc", "pillow (>=9,<10)", "pydata-sphinx-theme (>=0.14.1)", "scipy", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinxext-altair"] + +[[package]] +name = "attrs" +version = "23.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, + {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] +tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] [[package]] name = "blinker" version = "1.7.0" description = "Fast, simple object-to-object and broadcast signaling" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -12,11 +55,24 @@ files = [ {file = "blinker-1.7.0.tar.gz", hash = "sha256:e6820ff6fa4e4d1d8e2747c2283749c3f547e4fee112b98555cdcdae32996182"}, ] +[[package]] +name = "branca" +version = "0.7.1" +description = "Generate complex HTML+JS pages with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "branca-0.7.1-py3-none-any.whl", hash = "sha256:70515944ed2d1ed2784c552508df58037ca19402a8a1069d57f9113e3e012f51"}, + {file = "branca-0.7.1.tar.gz", hash = "sha256:e6b6f37a37bc0abffd960c68c045a7fe025d628eff87fedf6ab6ca814812110c"}, +] + +[package.dependencies] +jinja2 = ">=3" + [[package]] name = "cachetools" version = "5.3.2" description = "Extensible memoizing collections and decorators" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -28,7 +84,6 @@ files = [ name = "certifi" version = "2024.2.2" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -40,7 +95,6 @@ files = [ name = "cfgv" version = "3.4.0" description = "Validate configuration and produce human readable error messages." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -52,7 +106,6 @@ files = [ name = "chardet" version = "5.2.0" description = "Universal encoding detector for Python 3" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -64,7 +117,6 @@ files = [ name = "charset-normalizer" version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -164,7 +216,6 @@ files = [ name = "click" version = "8.1.7" description = "Composable command line interface toolkit" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -179,7 +230,6 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ @@ -191,7 +241,6 @@ files = [ name = "dash" version = "2.16.1" description = "A Python framework for building reactive web-apps. Developed by Plotly." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -225,7 +274,6 @@ testing = ["beautifulsoup4 (>=4.8.2)", "cryptography (<3.4)", "dash-testing-stub name = "dash-core-components" version = "2.0.0" description = "Core component suite for Dash" -category = "main" optional = false python-versions = "*" files = [ @@ -237,7 +285,6 @@ files = [ name = "dash-html-components" version = "2.0.0" description = "Vanilla HTML components for Dash" -category = "main" optional = false python-versions = "*" files = [ @@ -249,7 +296,6 @@ files = [ name = "dash-table" version = "5.0.0" description = "Dash table" -category = "main" optional = false python-versions = "*" files = [ @@ -261,7 +307,6 @@ files = [ name = "distlib" version = "0.3.8" description = "Distribution utilities" -category = "dev" optional = false python-versions = "*" files = [ @@ -273,7 +318,6 @@ files = [ name = "duckdb" version = "0.10.1" description = "DuckDB in-process database" -category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -330,7 +374,6 @@ files = [ name = "exceptiongroup" version = "1.2.0" description = "Backport of PEP 654 (exception groups)" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -345,7 +388,6 @@ test = ["pytest (>=6)"] name = "filelock" version = "3.13.1" description = "A platform independent file lock." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -362,7 +404,6 @@ typing = ["typing-extensions (>=4.8)"] name = "flask" version = "3.0.2" description = "A simple framework for building complex web applications." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -381,11 +422,63 @@ Werkzeug = ">=3.0.0" async = ["asgiref (>=3.2)"] dotenv = ["python-dotenv"] +[[package]] +name = "folium" +version = "0.16.0" +description = "Make beautiful maps with Leaflet.js & Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "folium-0.16.0-py2.py3-none-any.whl", hash = "sha256:ba72505db18bef995c880da19457d2b10c931db8059af5f6ccec9310d262b584"}, + {file = "folium-0.16.0.tar.gz", hash = "sha256:2585ee9253dc758d3a365534caa6fb5fa0c244646db4dc5819afc67bbd4daabb"}, +] + +[package.dependencies] +branca = ">=0.6.0" +jinja2 = ">=2.9" +numpy = "*" +requests = "*" +xyzservices = "*" + +[package.extras] +testing = ["pytest"] + +[[package]] +name = "gitdb" +version = "4.0.11" +description = "Git Object Database" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, + {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, +] + +[package.dependencies] +smmap = ">=3.0.1,<6" + +[[package]] +name = "gitpython" +version = "3.1.43" +description = "GitPython is a Python library used to interact with Git repositories" +optional = false +python-versions = ">=3.7" +files = [ + {file = "GitPython-3.1.43-py3-none-any.whl", hash = "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff"}, + {file = "GitPython-3.1.43.tar.gz", hash = "sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c"}, +] + +[package.dependencies] +gitdb = ">=4.0.1,<5" + +[package.extras] +doc = ["sphinx (==4.3.2)", "sphinx-autodoc-typehints", "sphinx-rtd-theme", "sphinxcontrib-applehelp (>=1.0.2,<=1.0.4)", "sphinxcontrib-devhelp (==1.0.2)", "sphinxcontrib-htmlhelp (>=2.0.0,<=2.0.1)", "sphinxcontrib-qthelp (==1.0.3)", "sphinxcontrib-serializinghtml (==1.1.5)"] +test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions"] + [[package]] name = "identify" version = "2.5.33" description = "File identification library for Python" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -400,7 +493,6 @@ license = ["ukkonen"] name = "idna" version = "3.6" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -412,7 +504,6 @@ files = [ name = "importlib-metadata" version = "7.1.0" description = "Read metadata from Python packages" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -432,7 +523,6 @@ testing = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -444,7 +534,6 @@ files = [ name = "itsdangerous" version = "2.1.2" description = "Safely pass data to untrusted environments and back." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -456,7 +545,6 @@ files = [ name = "jinja2" version = "3.1.3" description = "A very fast and expressive template engine." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -470,11 +558,69 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "jsonschema" +version = "4.21.1" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.21.1-py3-none-any.whl", hash = "sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f"}, + {file = "jsonschema-4.21.1.tar.gz", hash = "sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "jsonschema-specifications" +version = "2023.12.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, + {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + [[package]] name = "markupsafe" version = "2.1.5" description = "Safely add untrusted strings to HTML/XML markup." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -540,11 +686,21 @@ files = [ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + [[package]] name = "nest-asyncio" version = "1.6.0" description = "Patch asyncio to allow nested event loops" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -556,7 +712,6 @@ files = [ name = "nodeenv" version = "1.8.0" description = "Node.js virtual environment builder" -category = "dev" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" files = [ @@ -571,7 +726,6 @@ setuptools = "*" name = "numpy" version = "1.26.4" description = "Fundamental package for array computing in Python" -category = "main" optional = false python-versions = ">=3.9" files = [ @@ -617,7 +771,6 @@ files = [ name = "packaging" version = "23.2" description = "Core utilities for Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -629,7 +782,6 @@ files = [ name = "pandas" version = "2.2.1" description = "Powerful data structures for data analysis, time series, and statistics" -category = "main" optional = false python-versions = ">=3.9" files = [ @@ -699,11 +851,114 @@ sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-d test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] xml = ["lxml (>=4.9.2)"] +[[package]] +name = "patsy" +version = "0.5.6" +description = "A Python package for describing statistical models and for building design matrices." +optional = false +python-versions = "*" +files = [ + {file = "patsy-0.5.6-py2.py3-none-any.whl", hash = "sha256:19056886fd8fa71863fa32f0eb090267f21fb74be00f19f5c70b2e9d76c883c6"}, + {file = "patsy-0.5.6.tar.gz", hash = "sha256:95c6d47a7222535f84bff7f63d7303f2e297747a598db89cf5c67f0c0c7d2cdb"}, +] + +[package.dependencies] +numpy = ">=1.4" +six = "*" + +[package.extras] +test = ["pytest", "pytest-cov", "scipy"] + +[[package]] +name = "pillow" +version = "10.3.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"}, + {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf"}, + {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3"}, + {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5"}, + {file = "pillow-10.3.0-cp310-cp310-win32.whl", hash = "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2"}, + {file = "pillow-10.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f"}, + {file = "pillow-10.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b"}, + {file = "pillow-10.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795"}, + {file = "pillow-10.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd"}, + {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad"}, + {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c"}, + {file = "pillow-10.3.0-cp311-cp311-win32.whl", hash = "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09"}, + {file = "pillow-10.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d"}, + {file = "pillow-10.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f"}, + {file = "pillow-10.3.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84"}, + {file = "pillow-10.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"}, + {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef"}, + {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3"}, + {file = "pillow-10.3.0-cp312-cp312-win32.whl", hash = "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d"}, + {file = "pillow-10.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b"}, + {file = "pillow-10.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a"}, + {file = "pillow-10.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b"}, + {file = "pillow-10.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd"}, + {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d"}, + {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3"}, + {file = "pillow-10.3.0-cp38-cp38-win32.whl", hash = "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b"}, + {file = "pillow-10.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999"}, + {file = "pillow-10.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936"}, + {file = "pillow-10.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8"}, + {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9"}, + {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb"}, + {file = "pillow-10.3.0-cp39-cp39-win32.whl", hash = "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572"}, + {file = "pillow-10.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb"}, + {file = "pillow-10.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591"}, + {file = "pillow-10.3.0.tar.gz", hash = "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] + [[package]] name = "platformdirs" version = "4.1.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -719,7 +974,6 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-co name = "plotly" version = "5.20.0" description = "An open-source, interactive data visualization library for Python" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -731,11 +985,29 @@ files = [ packaging = "*" tenacity = ">=6.2.0" +[[package]] +name = "plotly-express" +version = "0.4.1" +description = "Plotly Express - a high level wrapper for Plotly.py" +optional = false +python-versions = "*" +files = [ + {file = "plotly_express-0.4.1-py2.py3-none-any.whl", hash = "sha256:5f112922b0a6225dc7c010e3b86295a74449e3eac6cac8faa95175e99b7698ce"}, + {file = "plotly_express-0.4.1.tar.gz", hash = "sha256:ff73a41ce02fb43d1d8e8fa131ef3e6589857349ca216b941b8f3f862bce0278"}, +] + +[package.dependencies] +numpy = ">=1.11" +pandas = ">=0.20.0" +patsy = ">=0.5" +plotly = ">=4.1.0" +scipy = ">=0.18" +statsmodels = ">=0.9.0" + [[package]] name = "pluggy" version = "1.4.0" description = "plugin and hook calling mechanisms for python" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -751,7 +1023,6 @@ testing = ["pytest", "pytest-benchmark"] name = "pre-commit" version = "2.21.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -766,11 +1037,112 @@ nodeenv = ">=0.11.1" pyyaml = ">=5.1" virtualenv = ">=20.10.0" +[[package]] +name = "protobuf" +version = "4.25.3" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "protobuf-4.25.3-cp310-abi3-win32.whl", hash = "sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa"}, + {file = "protobuf-4.25.3-cp310-abi3-win_amd64.whl", hash = "sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8"}, + {file = "protobuf-4.25.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c"}, + {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019"}, + {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d"}, + {file = "protobuf-4.25.3-cp38-cp38-win32.whl", hash = "sha256:f4f118245c4a087776e0a8408be33cf09f6c547442c00395fbfb116fac2f8ac2"}, + {file = "protobuf-4.25.3-cp38-cp38-win_amd64.whl", hash = "sha256:c053062984e61144385022e53678fbded7aea14ebb3e0305ae3592fb219ccfa4"}, + {file = "protobuf-4.25.3-cp39-cp39-win32.whl", hash = "sha256:19b270aeaa0099f16d3ca02628546b8baefe2955bbe23224aaf856134eccf1e4"}, + {file = "protobuf-4.25.3-cp39-cp39-win_amd64.whl", hash = "sha256:e3c97a1555fd6388f857770ff8b9703083de6bf1f9274a002a332d65fbb56c8c"}, + {file = "protobuf-4.25.3-py3-none-any.whl", hash = "sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9"}, + {file = "protobuf-4.25.3.tar.gz", hash = "sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c"}, +] + +[[package]] +name = "pyarrow" +version = "15.0.2" +description = "Python library for Apache Arrow" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyarrow-15.0.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:88b340f0a1d05b5ccc3d2d986279045655b1fe8e41aba6ca44ea28da0d1455d8"}, + {file = "pyarrow-15.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eaa8f96cecf32da508e6c7f69bb8401f03745c050c1dd42ec2596f2e98deecac"}, + {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23c6753ed4f6adb8461e7c383e418391b8d8453c5d67e17f416c3a5d5709afbd"}, + {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f639c059035011db8c0497e541a8a45d98a58dbe34dc8fadd0ef128f2cee46e5"}, + {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:290e36a59a0993e9a5224ed2fb3e53375770f07379a0ea03ee2fce2e6d30b423"}, + {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:06c2bb2a98bc792f040bef31ad3e9be6a63d0cb39189227c08a7d955db96816e"}, + {file = "pyarrow-15.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:f7a197f3670606a960ddc12adbe8075cea5f707ad7bf0dffa09637fdbb89f76c"}, + {file = "pyarrow-15.0.2-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:5f8bc839ea36b1f99984c78e06e7a06054693dc2af8920f6fb416b5bca9944e4"}, + {file = "pyarrow-15.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f5e81dfb4e519baa6b4c80410421528c214427e77ca0ea9461eb4097c328fa33"}, + {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a4f240852b302a7af4646c8bfe9950c4691a419847001178662a98915fd7ee7"}, + {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e7d9cfb5a1e648e172428c7a42b744610956f3b70f524aa3a6c02a448ba853e"}, + {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2d4f905209de70c0eb5b2de6763104d5a9a37430f137678edfb9a675bac9cd98"}, + {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:90adb99e8ce5f36fbecbbc422e7dcbcbed07d985eed6062e459e23f9e71fd197"}, + {file = "pyarrow-15.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:b116e7fd7889294cbd24eb90cd9bdd3850be3738d61297855a71ac3b8124ee38"}, + {file = "pyarrow-15.0.2-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:25335e6f1f07fdaa026a61c758ee7d19ce824a866b27bba744348fa73bb5a440"}, + {file = "pyarrow-15.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:90f19e976d9c3d8e73c80be84ddbe2f830b6304e4c576349d9360e335cd627fc"}, + {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a22366249bf5fd40ddacc4f03cd3160f2d7c247692945afb1899bab8a140ddfb"}, + {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2a335198f886b07e4b5ea16d08ee06557e07db54a8400cc0d03c7f6a22f785f"}, + {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e6d459c0c22f0b9c810a3917a1de3ee704b021a5fb8b3bacf968eece6df098f"}, + {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:033b7cad32198754d93465dcfb71d0ba7cb7cd5c9afd7052cab7214676eec38b"}, + {file = "pyarrow-15.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:29850d050379d6e8b5a693098f4de7fd6a2bea4365bfd073d7c57c57b95041ee"}, + {file = "pyarrow-15.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:7167107d7fb6dcadb375b4b691b7e316f4368f39f6f45405a05535d7ad5e5058"}, + {file = "pyarrow-15.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e85241b44cc3d365ef950432a1b3bd44ac54626f37b2e3a0cc89c20e45dfd8bf"}, + {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:248723e4ed3255fcd73edcecc209744d58a9ca852e4cf3d2577811b6d4b59818"}, + {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ff3bdfe6f1b81ca5b73b70a8d482d37a766433823e0c21e22d1d7dde76ca33f"}, + {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:f3d77463dee7e9f284ef42d341689b459a63ff2e75cee2b9302058d0d98fe142"}, + {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:8c1faf2482fb89766e79745670cbca04e7018497d85be9242d5350cba21357e1"}, + {file = "pyarrow-15.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:28f3016958a8e45a1069303a4a4f6a7d4910643fc08adb1e2e4a7ff056272ad3"}, + {file = "pyarrow-15.0.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:89722cb64286ab3d4daf168386f6968c126057b8c7ec3ef96302e81d8cdb8ae4"}, + {file = "pyarrow-15.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cd0ba387705044b3ac77b1b317165c0498299b08261d8122c96051024f953cd5"}, + {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad2459bf1f22b6a5cdcc27ebfd99307d5526b62d217b984b9f5c974651398832"}, + {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58922e4bfece8b02abf7159f1f53a8f4d9f8e08f2d988109126c17c3bb261f22"}, + {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:adccc81d3dc0478ea0b498807b39a8d41628fa9210729b2f718b78cb997c7c91"}, + {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:8bd2baa5fe531571847983f36a30ddbf65261ef23e496862ece83bdceb70420d"}, + {file = "pyarrow-15.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6669799a1d4ca9da9c7e06ef48368320f5856f36f9a4dd31a11839dda3f6cc8c"}, + {file = "pyarrow-15.0.2.tar.gz", hash = "sha256:9c9bc803cb3b7bfacc1e96ffbfd923601065d9d3f911179d81e72d99fd74a3d9"}, +] + +[package.dependencies] +numpy = ">=1.16.6,<2" + +[[package]] +name = "pydeck" +version = "0.8.0" +description = "Widget for deck.gl maps" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydeck-0.8.0-py2.py3-none-any.whl", hash = "sha256:a8fa7757c6f24bba033af39db3147cb020eef44012ba7e60d954de187f9ed4d5"}, + {file = "pydeck-0.8.0.tar.gz", hash = "sha256:07edde833f7cfcef6749124351195aa7dcd24663d4909fd7898dbd0b6fbc01ec"}, +] + +[package.dependencies] +jinja2 = ">=2.10.1" +numpy = ">=1.16.4" + +[package.extras] +carto = ["pydeck-carto"] +jupyter = ["ipykernel (>=5.1.2)", "ipython (>=5.8.0)", "ipywidgets (>=7,<8)", "traitlets (>=4.3.2)"] + +[[package]] +name = "pygments" +version = "2.17.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, + {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, +] + +[package.extras] +plugins = ["importlib-metadata"] +windows-terminal = ["colorama (>=0.4.6)"] + [[package]] name = "pyproject-api" version = "1.6.1" description = "API to interact with the python pyproject.toml based projects" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -790,7 +1162,6 @@ testing = ["covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytes name = "pytest" version = "7.4.4" description = "pytest: simple powerful testing with Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -813,7 +1184,6 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no name = "python-dateutil" version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ @@ -828,7 +1198,6 @@ six = ">=1.5" name = "pytz" version = "2024.1" description = "World timezone definitions, modern and historical" -category = "main" optional = false python-versions = "*" files = [ @@ -840,7 +1209,6 @@ files = [ name = "pyyaml" version = "6.0.1" description = "YAML parser and emitter for Python" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -897,11 +1265,25 @@ files = [ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] +[[package]] +name = "referencing" +version = "0.34.0" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.34.0-py3-none-any.whl", hash = "sha256:d53ae300ceddd3169f1ffa9caf2cb7b769e92657e4fafb23d34b93679116dfd4"}, + {file = "referencing-0.34.0.tar.gz", hash = "sha256:5773bd84ef41799a5a8ca72dc34590c041eb01bf9aa02632b4a973fb0181a844"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + [[package]] name = "requests" version = "2.31.0" description = "Python HTTP for Humans." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -923,7 +1305,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] name = "retrying" version = "1.3.4" description = "Retrying" -category = "main" optional = false python-versions = "*" files = [ @@ -934,11 +1315,178 @@ files = [ [package.dependencies] six = ">=1.7.0" +[[package]] +name = "rich" +version = "13.7.1" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"}, + {file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "rpds-py" +version = "0.18.0" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.18.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:5b4e7d8d6c9b2e8ee2d55c90b59c707ca59bc30058269b3db7b1f8df5763557e"}, + {file = "rpds_py-0.18.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c463ed05f9dfb9baebef68048aed8dcdc94411e4bf3d33a39ba97e271624f8f7"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01e36a39af54a30f28b73096dd39b6802eddd04c90dbe161c1b8dbe22353189f"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d62dec4976954a23d7f91f2f4530852b0c7608116c257833922a896101336c51"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd18772815d5f008fa03d2b9a681ae38d5ae9f0e599f7dda233c439fcaa00d40"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:923d39efa3cfb7279a0327e337a7958bff00cc447fd07a25cddb0a1cc9a6d2da"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39514da80f971362f9267c600b6d459bfbbc549cffc2cef8e47474fddc9b45b1"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a34d557a42aa28bd5c48a023c570219ba2593bcbbb8dc1b98d8cf5d529ab1434"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:93df1de2f7f7239dc9cc5a4a12408ee1598725036bd2dedadc14d94525192fc3"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:34b18ba135c687f4dac449aa5157d36e2cbb7c03cbea4ddbd88604e076aa836e"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c0b5dcf9193625afd8ecc92312d6ed78781c46ecbf39af9ad4681fc9f464af88"}, + {file = "rpds_py-0.18.0-cp310-none-win32.whl", hash = "sha256:c4325ff0442a12113a6379af66978c3fe562f846763287ef66bdc1d57925d337"}, + {file = "rpds_py-0.18.0-cp310-none-win_amd64.whl", hash = "sha256:7223a2a5fe0d217e60a60cdae28d6949140dde9c3bcc714063c5b463065e3d66"}, + {file = "rpds_py-0.18.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3a96e0c6a41dcdba3a0a581bbf6c44bb863f27c541547fb4b9711fd8cf0ffad4"}, + {file = "rpds_py-0.18.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30f43887bbae0d49113cbaab729a112251a940e9b274536613097ab8b4899cf6"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fcb25daa9219b4cf3a0ab24b0eb9a5cc8949ed4dc72acb8fa16b7e1681aa3c58"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d68c93e381010662ab873fea609bf6c0f428b6d0bb00f2c6939782e0818d37bf"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b34b7aa8b261c1dbf7720b5d6f01f38243e9b9daf7e6b8bc1fd4657000062f2c"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e6d75ab12b0bbab7215e5d40f1e5b738aa539598db27ef83b2ec46747df90e1"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b8612cd233543a3781bc659c731b9d607de65890085098986dfd573fc2befe5"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aec493917dd45e3c69d00a8874e7cbed844efd935595ef78a0f25f14312e33c6"}, + {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:661d25cbffaf8cc42e971dd570d87cb29a665f49f4abe1f9e76be9a5182c4688"}, + {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1df3659d26f539ac74fb3b0c481cdf9d725386e3552c6fa2974f4d33d78e544b"}, + {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a1ce3ba137ed54f83e56fb983a5859a27d43a40188ba798993812fed73c70836"}, + {file = "rpds_py-0.18.0-cp311-none-win32.whl", hash = "sha256:69e64831e22a6b377772e7fb337533c365085b31619005802a79242fee620bc1"}, + {file = "rpds_py-0.18.0-cp311-none-win_amd64.whl", hash = "sha256:998e33ad22dc7ec7e030b3df701c43630b5bc0d8fbc2267653577e3fec279afa"}, + {file = "rpds_py-0.18.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7f2facbd386dd60cbbf1a794181e6aa0bd429bd78bfdf775436020172e2a23f0"}, + {file = "rpds_py-0.18.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1d9a5be316c15ffb2b3c405c4ff14448c36b4435be062a7f578ccd8b01f0c4d8"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd5bf1af8efe569654bbef5a3e0a56eca45f87cfcffab31dd8dde70da5982475"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5417558f6887e9b6b65b4527232553c139b57ec42c64570569b155262ac0754f"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:56a737287efecafc16f6d067c2ea0117abadcd078d58721f967952db329a3e5c"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8f03bccbd8586e9dd37219bce4d4e0d3ab492e6b3b533e973fa08a112cb2ffc9"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4457a94da0d5c53dc4b3e4de1158bdab077db23c53232f37a3cb7afdb053a4e3"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0ab39c1ba9023914297dd88ec3b3b3c3f33671baeb6acf82ad7ce883f6e8e157"}, + {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9d54553c1136b50fd12cc17e5b11ad07374c316df307e4cfd6441bea5fb68496"}, + {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0af039631b6de0397ab2ba16eaf2872e9f8fca391b44d3d8cac317860a700a3f"}, + {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:84ffab12db93b5f6bad84c712c92060a2d321b35c3c9960b43d08d0f639d60d7"}, + {file = "rpds_py-0.18.0-cp312-none-win32.whl", hash = "sha256:685537e07897f173abcf67258bee3c05c374fa6fff89d4c7e42fb391b0605e98"}, + {file = "rpds_py-0.18.0-cp312-none-win_amd64.whl", hash = "sha256:e003b002ec72c8d5a3e3da2989c7d6065b47d9eaa70cd8808b5384fbb970f4ec"}, + {file = "rpds_py-0.18.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:08f9ad53c3f31dfb4baa00da22f1e862900f45908383c062c27628754af2e88e"}, + {file = "rpds_py-0.18.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0013fe6b46aa496a6749c77e00a3eb07952832ad6166bd481c74bda0dcb6d58"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e32a92116d4f2a80b629778280103d2a510a5b3f6314ceccd6e38006b5e92dcb"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e541ec6f2ec456934fd279a3120f856cd0aedd209fc3852eca563f81738f6861"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bed88b9a458e354014d662d47e7a5baafd7ff81c780fd91584a10d6ec842cb73"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2644e47de560eb7bd55c20fc59f6daa04682655c58d08185a9b95c1970fa1e07"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e8916ae4c720529e18afa0b879473049e95949bf97042e938530e072fde061d"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:465a3eb5659338cf2a9243e50ad9b2296fa15061736d6e26240e713522b6235c"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ea7d4a99f3b38c37eac212dbd6ec42b7a5ec51e2c74b5d3223e43c811609e65f"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:67071a6171e92b6da534b8ae326505f7c18022c6f19072a81dcf40db2638767c"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:41ef53e7c58aa4ef281da975f62c258950f54b76ec8e45941e93a3d1d8580594"}, + {file = "rpds_py-0.18.0-cp38-none-win32.whl", hash = "sha256:fdea4952db2793c4ad0bdccd27c1d8fdd1423a92f04598bc39425bcc2b8ee46e"}, + {file = "rpds_py-0.18.0-cp38-none-win_amd64.whl", hash = "sha256:7cd863afe7336c62ec78d7d1349a2f34c007a3cc6c2369d667c65aeec412a5b1"}, + {file = "rpds_py-0.18.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5307def11a35f5ae4581a0b658b0af8178c65c530e94893345bebf41cc139d33"}, + {file = "rpds_py-0.18.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f195baa60a54ef9d2de16fbbfd3ff8b04edc0c0140a761b56c267ac11aa467"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39f5441553f1c2aed4de4377178ad8ff8f9d733723d6c66d983d75341de265ab"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a00312dea9310d4cb7dbd7787e722d2e86a95c2db92fbd7d0155f97127bcb40"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f2fc11e8fe034ee3c34d316d0ad8808f45bc3b9ce5857ff29d513f3ff2923a1"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:586f8204935b9ec884500498ccc91aa869fc652c40c093bd9e1471fbcc25c022"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddc2f4dfd396c7bfa18e6ce371cba60e4cf9d2e5cdb71376aa2da264605b60b9"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ddcba87675b6d509139d1b521e0c8250e967e63b5909a7e8f8944d0f90ff36f"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7bd339195d84439cbe5771546fe8a4e8a7a045417d8f9de9a368c434e42a721e"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:d7c36232a90d4755b720fbd76739d8891732b18cf240a9c645d75f00639a9024"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6b0817e34942b2ca527b0e9298373e7cc75f429e8da2055607f4931fded23e20"}, + {file = "rpds_py-0.18.0-cp39-none-win32.whl", hash = "sha256:99f70b740dc04d09e6b2699b675874367885217a2e9f782bdf5395632ac663b7"}, + {file = "rpds_py-0.18.0-cp39-none-win_amd64.whl", hash = "sha256:6ef687afab047554a2d366e112dd187b62d261d49eb79b77e386f94644363294"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ad36cfb355e24f1bd37cac88c112cd7730873f20fb0bdaf8ba59eedf8216079f"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:36b3ee798c58ace201289024b52788161e1ea133e4ac93fba7d49da5fec0ef9e"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8a2f084546cc59ea99fda8e070be2fd140c3092dc11524a71aa8f0f3d5a55ca"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e4461d0f003a0aa9be2bdd1b798a041f177189c1a0f7619fe8c95ad08d9a45d7"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8db715ebe3bb7d86d77ac1826f7d67ec11a70dbd2376b7cc214199360517b641"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:793968759cd0d96cac1e367afd70c235867831983f876a53389ad869b043c948"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66e6a3af5a75363d2c9a48b07cb27c4ea542938b1a2e93b15a503cdfa8490795"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6ef0befbb5d79cf32d0266f5cff01545602344eda89480e1dd88aca964260b18"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d4acf42190d449d5e89654d5c1ed3a4f17925eec71f05e2a41414689cda02d1"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:a5f446dd5055667aabaee78487f2b5ab72e244f9bc0b2ffebfeec79051679984"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9dbbeb27f4e70bfd9eec1be5477517365afe05a9b2c441a0b21929ee61048124"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:22806714311a69fd0af9b35b7be97c18a0fc2826e6827dbb3a8c94eac6cf7eeb"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b34ae4636dfc4e76a438ab826a0d1eed2589ca7d9a1b2d5bb546978ac6485461"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c8370641f1a7f0e0669ddccca22f1da893cef7628396431eb445d46d893e5cd"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c8362467a0fdeccd47935f22c256bec5e6abe543bf0d66e3d3d57a8fb5731863"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11a8c85ef4a07a7638180bf04fe189d12757c696eb41f310d2426895356dcf05"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b316144e85316da2723f9d8dc75bada12fa58489a527091fa1d5a612643d1a0e"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf1ea2e34868f6fbf070e1af291c8180480310173de0b0c43fc38a02929fc0e3"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e546e768d08ad55b20b11dbb78a745151acbd938f8f00d0cfbabe8b0199b9880"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4901165d170a5fde6f589acb90a6b33629ad1ec976d4529e769c6f3d885e3e80"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:618a3d6cae6ef8ec88bb76dd80b83cfe415ad4f1d942ca2a903bf6b6ff97a2da"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ed4eb745efbff0a8e9587d22a84be94a5eb7d2d99c02dacf7bd0911713ed14dd"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c81e5f372cd0dc5dc4809553d34f832f60a46034a5f187756d9b90586c2c307"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:43fbac5f22e25bee1d482c97474f930a353542855f05c1161fd804c9dc74a09d"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d7faa6f14017c0b1e69f5e2c357b998731ea75a442ab3841c0dbbbfe902d2c4"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:08231ac30a842bd04daabc4d71fddd7e6d26189406d5a69535638e4dcb88fe76"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:044a3e61a7c2dafacae99d1e722cc2d4c05280790ec5a05031b3876809d89a5c"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f26b5bd1079acdb0c7a5645e350fe54d16b17bfc5e71f371c449383d3342e17"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:482103aed1dfe2f3b71a58eff35ba105289b8d862551ea576bd15479aba01f66"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1374f4129f9bcca53a1bba0bb86bf78325a0374577cf7e9e4cd046b1e6f20e24"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:635dc434ff724b178cb192c70016cc0ad25a275228f749ee0daf0eddbc8183b1"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:bc362ee4e314870a70f4ae88772d72d877246537d9f8cb8f7eacf10884862432"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:4832d7d380477521a8c1644bbab6588dfedea5e30a7d967b5fb75977c45fd77f"}, + {file = "rpds_py-0.18.0.tar.gz", hash = "sha256:42821446ee7a76f5d9f71f9e33a4fb2ffd724bb3e7f93386150b61a43115788d"}, +] + +[[package]] +name = "scipy" +version = "1.13.0" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "scipy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ba419578ab343a4e0a77c0ef82f088238a93eef141b2b8017e46149776dfad4d"}, + {file = "scipy-1.13.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:22789b56a999265431c417d462e5b7f2b487e831ca7bef5edeb56efe4c93f86e"}, + {file = "scipy-1.13.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05f1432ba070e90d42d7fd836462c50bf98bd08bed0aa616c359eed8a04e3922"}, + {file = "scipy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8434f6f3fa49f631fae84afee424e2483289dfc30a47755b4b4e6b07b2633a4"}, + {file = "scipy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:dcbb9ea49b0167de4167c40eeee6e167caeef11effb0670b554d10b1e693a8b9"}, + {file = "scipy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:1d2f7bb14c178f8b13ebae93f67e42b0a6b0fc50eba1cd8021c9b6e08e8fb1cd"}, + {file = "scipy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fbcf8abaf5aa2dc8d6400566c1a727aed338b5fe880cde64907596a89d576fa"}, + {file = "scipy-1.13.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5e4a756355522eb60fcd61f8372ac2549073c8788f6114449b37e9e8104f15a5"}, + {file = "scipy-1.13.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5acd8e1dbd8dbe38d0004b1497019b2dbbc3d70691e65d69615f8a7292865d7"}, + {file = "scipy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ff7dad5d24a8045d836671e082a490848e8639cabb3dbdacb29f943a678683d"}, + {file = "scipy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4dca18c3ffee287ddd3bc8f1dabaf45f5305c5afc9f8ab9cbfab855e70b2df5c"}, + {file = "scipy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:a2f471de4d01200718b2b8927f7d76b5d9bde18047ea0fa8bd15c5ba3f26a1d6"}, + {file = "scipy-1.13.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d0de696f589681c2802f9090fff730c218f7c51ff49bf252b6a97ec4a5d19e8b"}, + {file = "scipy-1.13.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:b2a3ff461ec4756b7e8e42e1c681077349a038f0686132d623fa404c0bee2551"}, + {file = "scipy-1.13.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bf9fe63e7a4bf01d3645b13ff2aa6dea023d38993f42aaac81a18b1bda7a82a"}, + {file = "scipy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e7626dfd91cdea5714f343ce1176b6c4745155d234f1033584154f60ef1ff42"}, + {file = "scipy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:109d391d720fcebf2fbe008621952b08e52907cf4c8c7efc7376822151820820"}, + {file = "scipy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:8930ae3ea371d6b91c203b1032b9600d69c568e537b7988a3073dfe4d4774f21"}, + {file = "scipy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5407708195cb38d70fd2d6bb04b1b9dd5c92297d86e9f9daae1576bd9e06f602"}, + {file = "scipy-1.13.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:ac38c4c92951ac0f729c4c48c9e13eb3675d9986cc0c83943784d7390d540c78"}, + {file = "scipy-1.13.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09c74543c4fbeb67af6ce457f6a6a28e5d3739a87f62412e4a16e46f164f0ae5"}, + {file = "scipy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28e286bf9ac422d6beb559bc61312c348ca9b0f0dae0d7c5afde7f722d6ea13d"}, + {file = "scipy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:33fde20efc380bd23a78a4d26d59fc8704e9b5fd9b08841693eb46716ba13d86"}, + {file = "scipy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:45c08bec71d3546d606989ba6e7daa6f0992918171e2a6f7fbedfa7361c2de1e"}, + {file = "scipy-1.13.0.tar.gz", hash = "sha256:58569af537ea29d3f78e5abd18398459f195546bb3be23d16677fb26616cc11e"}, +] + +[package.dependencies] +numpy = ">=1.22.4,<2.3" + +[package.extras] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] +doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.12.0)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0)", "sphinx-design (>=0.4.0)"] +test = ["array-api-strict", "asv", "gmpy2", "hypothesis (>=6.30)", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + [[package]] name = "setuptools" version = "69.0.3" description = "Easily download, build, install, upgrade, and uninstall Python packages" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -955,7 +1503,6 @@ testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jar name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -963,11 +1510,123 @@ files = [ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] +[[package]] +name = "smmap" +version = "5.0.1" +description = "A pure Python implementation of a sliding window memory map manager" +optional = false +python-versions = ">=3.7" +files = [ + {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, + {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, +] + +[[package]] +name = "statsmodels" +version = "0.14.1" +description = "Statistical computations and models for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "statsmodels-0.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:43af9c0b07c9d72f275cf14ea54a481a3f20911f0b443181be4769def258fdeb"}, + {file = "statsmodels-0.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a16975ab6ad505d837ba9aee11f92a8c5b49c4fa1ff45b60fe23780b19e5705e"}, + {file = "statsmodels-0.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e278fe74da5ed5e06c11a30851eda1af08ef5af6be8507c2c45d2e08f7550dde"}, + {file = "statsmodels-0.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0564d92cb05b219b4538ed09e77d96658a924a691255e1f7dd23ee338df441b"}, + {file = "statsmodels-0.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5385e22e72159a09c099c4fb975f350a9f3afeb57c1efce273b89dcf1fe44c0f"}, + {file = "statsmodels-0.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:0a8aae75a2e08ebd990e5fa394f8e32738b55785cb70798449a3f4207085e667"}, + {file = "statsmodels-0.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b69a63ad6c979a6e4cde11870ffa727c76a318c225a7e509f031fbbdfb4e416a"}, + {file = "statsmodels-0.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7562cb18a90a114f39fab6f1c25b9c7b39d9cd5f433d0044b430ca9d44a8b52c"}, + {file = "statsmodels-0.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3abaca4b963259a2bf349c7609cfbb0ce64ad5fb3d92d6f08e21453e4890248"}, + {file = "statsmodels-0.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0f727fe697f6406d5f677b67211abe5a55101896abdfacdb3f38410405f6ad8"}, + {file = "statsmodels-0.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b6838ac6bdb286daabb5e91af90fd4258f09d0cec9aace78cc441cb2b17df428"}, + {file = "statsmodels-0.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:709bfcef2dbe66f705b17e56d1021abad02243ee1a5d1efdb90f9bad8b06a329"}, + {file = "statsmodels-0.14.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f32a7cd424cf33304a54daee39d32cccf1d0265e652c920adeaeedff6d576457"}, + {file = "statsmodels-0.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f8c30181c084173d662aaf0531867667be2ff1bee103b84feb64f149f792dbd2"}, + {file = "statsmodels-0.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de2b97413913d52ad6342dece2d653e77f78620013b7705fad291d4e4266ccb"}, + {file = "statsmodels-0.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3420f88289c593ba2bca33619023059c476674c160733bd7d858564787c83d3"}, + {file = "statsmodels-0.14.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c008e16096f24f0514e53907890ccac6589a16ad6c81c218f2ee6752fdada555"}, + {file = "statsmodels-0.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:bc0351d279c4e080f0ce638a3d886d312aa29eade96042e3ba0a73771b1abdfb"}, + {file = "statsmodels-0.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf293ada63b2859d95210165ad1dfcd97bd7b994a5266d6fbeb23659d8f0bf68"}, + {file = "statsmodels-0.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44ca8cb88fa3d3a4ffaff1fb8eb0e98bbf83fc936fcd9b9eedee258ecc76696a"}, + {file = "statsmodels-0.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d5373d176239993c095b00d06036690a50309a4e00c2da553b65b840f956ae6"}, + {file = "statsmodels-0.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a532dfe899f8b6632cd8caa0b089b403415618f51e840d1817a1e4b97e200c73"}, + {file = "statsmodels-0.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:4fe0a60695952b82139ae8750952786a700292f9e0551d572d7685070944487b"}, + {file = "statsmodels-0.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:04293890f153ffe577e60a227bd43babd5f6c1fc50ea56a3ab1862ae85247a95"}, + {file = "statsmodels-0.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e70a2e93d54d40b2cb6426072acbc04f35501b1ea2569f6786964adde6ca572"}, + {file = "statsmodels-0.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab3a73d16c0569adbba181ebb967e5baaa74935f6d2efe86ac6fc5857449b07d"}, + {file = "statsmodels-0.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eefa5bcff335440ee93e28745eab63559a20cd34eea0375c66d96b016de909b3"}, + {file = "statsmodels-0.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:bc43765710099ca6a942b5ffa1bac7668965052542ba793dd072d26c83453572"}, + {file = "statsmodels-0.14.1.tar.gz", hash = "sha256:2260efdc1ef89f39c670a0bd8151b1d0843567781bcafec6cda0534eb47a94f6"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.22.3,<2", markers = "python_version == \"3.10\" and platform_system == \"Windows\" and platform_python_implementation != \"PyPy\""}, + {version = ">=1.18,<2", markers = "python_version != \"3.10\" or platform_system != \"Windows\" or platform_python_implementation == \"PyPy\""}, +] +packaging = ">=21.3" +pandas = ">=1.0,<2.1.0 || >2.1.0" +patsy = ">=0.5.4" +scipy = ">=1.4,<1.9.2 || >1.9.2" + +[package.extras] +build = ["cython (>=0.29.33)"] +develop = ["colorama", "cython (>=0.29.33)", "cython (>=0.29.33,<4.0.0)", "flake8", "isort", "joblib", "matplotlib (>=3)", "oldest-supported-numpy (>=2022.4.18)", "pytest (>=7.3.0)", "pytest-cov", "pytest-randomly", "pytest-xdist", "pywinpty", "setuptools-scm[toml] (>=8.0,<9.0)"] +docs = ["ipykernel", "jupyter-client", "matplotlib", "nbconvert", "nbformat", "numpydoc", "pandas-datareader", "sphinx"] + +[[package]] +name = "streamlit" +version = "1.32.2" +description = "A faster way to build and share data apps" +optional = false +python-versions = ">=3.8, !=3.9.7" +files = [ + {file = "streamlit-1.32.2-py2.py3-none-any.whl", hash = "sha256:a0b8044e76fec364b07be145f8b40dbd8d083e20ebbb189ceb1fa9423f3dedea"}, + {file = "streamlit-1.32.2.tar.gz", hash = "sha256:1258b9cbc3ff957bf7d09b1bfc85cedc308f1065b30748545295a9af8d5577ab"}, +] + +[package.dependencies] +altair = ">=4.0,<6" +blinker = ">=1.0.0,<2" +cachetools = ">=4.0,<6" +click = ">=7.0,<9" +gitpython = ">=3.0.7,<3.1.19 || >3.1.19,<4" +numpy = ">=1.19.3,<2" +packaging = ">=16.8,<24" +pandas = ">=1.3.0,<3" +pillow = ">=7.1.0,<11" +protobuf = ">=3.20,<5" +pyarrow = ">=7.0" +pydeck = ">=0.8.0b4,<1" +requests = ">=2.27,<3" +rich = ">=10.14.0,<14" +tenacity = ">=8.1.0,<9" +toml = ">=0.10.1,<2" +tornado = ">=6.0.3,<7" +typing-extensions = ">=4.3.0,<5" +watchdog = {version = ">=2.1.5", markers = "platform_system != \"Darwin\""} + +[package.extras] +snowflake = ["snowflake-connector-python (>=2.8.0)", "snowflake-snowpark-python (>=0.9.0)"] + +[[package]] +name = "streamlit-dynamic-filters" +version = "0.1.6" +description = "Dynamic multiselect filters for Streamlit" +optional = false +python-versions = "*" +files = [ + {file = "streamlit_dynamic_filters-0.1.6-py3-none-any.whl", hash = "sha256:882f213dd3b846704a894c8e31271f0401775334f979a9e4e492a85035179d56"}, + {file = "streamlit_dynamic_filters-0.1.6.tar.gz", hash = "sha256:3d4f53007bf281c846477a2d9f202e61bb97c19c5c43d3dadab75019133c28f2"}, +] + +[package.dependencies] +streamlit = "*" + [[package]] name = "tenacity" version = "8.2.3" description = "Retry code until it succeeds" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -978,11 +1637,21 @@ files = [ [package.extras] doc = ["reno", "sphinx", "tornado (>=4.5)"] +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + [[package]] name = "tomli" version = "2.0.1" description = "A lil' TOML parser" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -990,11 +1659,41 @@ files = [ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] +[[package]] +name = "toolz" +version = "0.12.1" +description = "List processing tools and functional utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "toolz-0.12.1-py3-none-any.whl", hash = "sha256:d22731364c07d72eea0a0ad45bafb2c2937ab6fd38a3507bf55eae8744aa7d85"}, + {file = "toolz-0.12.1.tar.gz", hash = "sha256:ecca342664893f177a13dac0e6b41cbd8ac25a358e5f215316d43e2100224f4d"}, +] + +[[package]] +name = "tornado" +version = "6.4" +description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +optional = false +python-versions = ">= 3.8" +files = [ + {file = "tornado-6.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0"}, + {file = "tornado-6.4-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263"}, + {file = "tornado-6.4-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e"}, + {file = "tornado-6.4-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579"}, + {file = "tornado-6.4-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212"}, + {file = "tornado-6.4-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2"}, + {file = "tornado-6.4-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78"}, + {file = "tornado-6.4-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f"}, + {file = "tornado-6.4-cp38-abi3-win32.whl", hash = "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052"}, + {file = "tornado-6.4-cp38-abi3-win_amd64.whl", hash = "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63"}, + {file = "tornado-6.4.tar.gz", hash = "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee"}, +] + [[package]] name = "tox" version = "4.12.1" description = "tox is a generic virtualenv management and test command line tool" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1022,7 +1721,6 @@ testing = ["build[virtualenv] (>=1.0.3)", "covdefaults (>=2.3)", "detect-test-po name = "typing-extensions" version = "4.10.0" description = "Backported and Experimental Type Hints for Python 3.8+" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1034,7 +1732,6 @@ files = [ name = "tzdata" version = "2024.1" description = "Provider of IANA time zone data" -category = "main" optional = false python-versions = ">=2" files = [ @@ -1046,7 +1743,6 @@ files = [ name = "urllib3" version = "2.2.1" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1064,7 +1760,6 @@ zstd = ["zstandard (>=0.18.0)"] name = "virtualenv" version = "20.25.0" description = "Virtual Python Environment builder" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1081,11 +1776,51 @@ platformdirs = ">=3.9.1,<5" docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] +[[package]] +name = "watchdog" +version = "4.0.0" +description = "Filesystem events monitoring" +optional = false +python-versions = ">=3.8" +files = [ + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, + {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, + {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, + {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, + {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, + {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, + {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, + {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, +] + +[package.extras] +watchmedo = ["PyYAML (>=3.10)"] + [[package]] name = "werkzeug" version = "3.0.1" description = "The comprehensive WSGI web application library." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1099,11 +1834,21 @@ MarkupSafe = ">=2.1.1" [package.extras] watchdog = ["watchdog (>=2.3)"] +[[package]] +name = "xyzservices" +version = "2023.10.1" +description = "Source of XYZ tiles providers" +optional = false +python-versions = ">=3.8" +files = [ + {file = "xyzservices-2023.10.1-py3-none-any.whl", hash = "sha256:6a4c38d3a9f89d3e77153eff9414b36a8ee0850c9e8b85796fd1b2a85b8dfd68"}, + {file = "xyzservices-2023.10.1.tar.gz", hash = "sha256:091229269043bc8258042edbedad4fcb44684b0473ede027b5672ad40dc9fa02"}, +] + [[package]] name = "zipp" version = "3.18.1" description = "Backport of pathlib-compatible object wrapper for zip files" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1118,4 +1863,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "a604d3b769ffc5079bf789d1557a112f77a5fbf91071732ab27de41caf356da8" +content-hash = "d12b7177519b2078c58a3688cb984d42a5e65a8d6ff60a9ad83343b43641a566" diff --git a/pyproject.toml b/pyproject.toml index b852b15..0101c1f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,6 +19,10 @@ python = "^3.10" pandas = "^2.2.1" dash = "^2.16.1" duckdb = "^0.10.1" +folium = "^0.16.0" +streamlit = "^1.32.2" +plotly-express = "^0.4.1" +streamlit-dynamic-filters = "^0.1.6" [tool.poetry.group.dev.dependencies] pre-commit = "^2.20.0" From bedb247d876c3c09d7aee15a7ecaab0d7548730c Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Thu, 4 Apr 2024 10:51:22 +0200 Subject: [PATCH 007/147] modif pour filtre top dechets --- dashboards/app/pages/data.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 8f057dd..bffe624 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -270,9 +270,11 @@ with tab2: # Préparation des datas pour l'onglet 2 df_top = df_nb_dechet.copy() - df_top_data_releves = df_other.copy() + df_top_data_releves = df_other_filtre.copy() + # Filtration des données pour nb_dechets + df_top10 = pd.merge(df_top, df_top_data_releves, on="ID_RELEVE", how="inner") # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement - df_dechets_groupe = df_top[df_top["type_regroupement"].isin(["GROUPE"])] + df_dechets_groupe = df_top10[df_top10["type_regroupement"].isin(["GROUPE"])] # Group by 'categorie', sum 'nb_dechet', et top 10 df_top10_dechets = ( df_dechets_groupe.groupby("categorie") @@ -293,8 +295,10 @@ title="Top 10 dechets ramassés", ) - # Amélioration du graphique pour le rendre plus agréable à regarder - fig.update_traces(texttemplate="%{text:.2s}", textposition="outside") + # Amélioration du visuel du graphique + fig.update_traces( + #texttemplate="%{text:.2f}", + textposition="outside") fig.update_layout( width=1400, height=900, From 548350f0787ea53161c16990a6f04ec4ac5784a2 Mon Sep 17 00:00:00 2001 From: Data_is_beautiful_ Date: Fri, 5 Apr 2024 10:23:40 +0200 Subject: [PATCH 008/147] clear style.css --- dashboards/app/pages/style.css | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/dashboards/app/pages/style.css b/dashboards/app/pages/style.css index e1ce9cd..e69de29 100644 --- a/dashboards/app/pages/style.css +++ b/dashboards/app/pages/style.css @@ -1,12 +0,0 @@ - -/* Card */ -div.css-1r6slb0.e1tzin5v2 { - background-color: #FFFFFF; - border: 1px solid #CCCCCC; - padding: 5% 5% 5% 10%; - border-radius: 5px; - - border-left: 0.5rem solid #1951A0 !important; - box-shadow: 0 0.15rem 1.75rem 0 rgba(58, 59, 69, 0.15) !important; - -} \ No newline at end of file From 7dbdb0ef3e78183de63742fa33634592ff3ff16e Mon Sep 17 00:00:00 2001 From: "F.Hakimi" Date: Sat, 6 Apr 2024 16:30:19 +0200 Subject: [PATCH 009/147] requirements minor edit + chiffres cles top dechets, echelle log --- dashboards/app/pages/data.py | 53 ++++++++++++++++++++++++++------- dashboards/app/requirements.txt | 1 + 2 files changed, 44 insertions(+), 10 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index bffe624..f150fa4 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -1,7 +1,9 @@ import streamlit as st -import altair as alt + +# import altair as alt import pandas as pd -import duckdb + +# import duckdb import plotly.express as px import folium from folium import IFrame @@ -100,9 +102,12 @@ volume_total = df_volume["VOLUME_TOTAL"].sum() poids_total = df_volume["POIDS_TOTAL"].sum() volume_total_categorise = df_volume[cols_volume].sum().sum() - pct_volume_cateforise = volume_total_categorise / volume_total + pct_volume_categorise = volume_total_categorise / volume_total nb_collectes = len(df_volume) + # estimation du poids categorisée en utilisant pct_volume_categorise + poids_total_categorise = round(poids_total * pct_volume_categorise) + # Dépivotage du tableau pour avoir une base de données exploitable df_volume = df_volume.melt( id_vars=cols_identifiers, @@ -156,6 +161,7 @@ # 2ème métrique : poids cell2 = l1_col2.container(border=True) poids_total = f"{poids_total:,.0f}".replace(",", " ") + cell2.metric("Poids total collecté", f"{poids_total} kg") # 3ème métrique : nombre de relevés @@ -212,7 +218,7 @@ st.write("") st.caption( - f"Note : Cette analyse se base sur les déchets qui ont pu être classés par matériau : {volume_total_categorise:.0f} Litres, soit {pct_volume_cateforise:.0%} du volume total collecté." + f"Note : Cette analyse se base sur les déchets qui ont pu être classés par matériau : {volume_total_categorise:.0f} Litres, soit {pct_volume_categorise:.0%} du volume total collecté." ) st.divider() @@ -268,8 +274,36 @@ # Onglet 2 : Top Déchets with tab2: + + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page + l1_col1, l1_col2, l1_col3 = st.columns(3) + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + # 1ère métrique : volume total de déchets collectés + cell1 = l1_col1.container(border=True) + # Trick pour séparer les milliers + + volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") + cell1.metric("Volume de déchets catégorisés", f"{volume_total_categorise} litres") + + # 2ème métrique : poids + cell2 = l1_col2.container(border=True) + poids_total_categorise = f"{poids_total_categorise:,.0f}".replace(",", " ") + # poids_total = f"{poids_total:,.0f}".replace(",", " ") + cell2.metric( + "Poids total de déchets categorisés (estimation)", + f"{poids_total_categorise} kg", + ) + + # 3ème métrique : nombre de relevés + cell3 = l1_col3.container(border=True) + # nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") + cell3.metric("Nombre de collectes réalisées", f"{nb_collectes}") + + # Ligne 2 : graphique top déchets + # Préparation des datas pour l'onglet 2 df_top = df_nb_dechet.copy() + df_top_data_releves = df_other_filtre.copy() # Filtration des données pour nb_dechets df_top10 = pd.merge(df_top, df_top_data_releves, on="ID_RELEVE", how="inner") @@ -294,11 +328,12 @@ labels={"categorie": "Dechet", "nb_dechet": "Nombre total"}, title="Top 10 dechets ramassés", ) - + fig.update_layout(yaxis_type="log") # Amélioration du visuel du graphique fig.update_traces( - #texttemplate="%{text:.2f}", - textposition="outside") + # texttemplate="%{text:.2f}", + textposition="outside" + ) fig.update_layout( width=1400, height=900, @@ -395,9 +430,7 @@ # Affichage de la carte Folium dans Streamlit st_folium = st.components.v1.html st_folium( - folium.Figure().add_child(map_paca).render() - # , width=1400 - , + folium.Figure().add_child(map_paca).render(), # , width=1400 height=1000, ) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index 77eceb6..af5ff81 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -3,3 +3,4 @@ duckdb==0.10.0 streamlit==1.32.2 folium==0.15.1 plotly==5.19.0 +streamlit-dynamic-filters==0.1.6 From c69573ed272fa190229b45cd2dface40d057e3f7 Mon Sep 17 00:00:00 2001 From: "F.Hakimi" Date: Mon, 8 Apr 2024 14:58:40 +0200 Subject: [PATCH 010/147] V0 sous onglet secteurs et marques + minor edits --- dashboards/app/pages/data.py | 111 ++++++++++++++++++++++++++++++++++- 1 file changed, 110 insertions(+), 1 deletion(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index f150fa4..be6c288 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -290,7 +290,7 @@ poids_total_categorise = f"{poids_total_categorise:,.0f}".replace(",", " ") # poids_total = f"{poids_total:,.0f}".replace(",", " ") cell2.metric( - "Poids total de déchets categorisés (estimation)", + "Poids estimé de déchets categorisés", f"{poids_total_categorise} kg", ) @@ -438,6 +438,115 @@ # Onglet 3 : Secteurs et marques with tab3: st.write("") + + # Préparation des données + df_dechet_copy = df_nb_dechet.copy() + + df_filtre_copy = df_other_filtre.copy() + # Filtration des données pour nb_dechets + df_init = pd.merge(df_dechet_copy, df_filtre_copy, on="ID_RELEVE", how="inner") + + # Data pour le plot secteur + secteur_df = df_init[df_init["type_regroupement"].isin(["SECTEUR"])] + top_secteur_df = ( + secteur_df.groupby("categorie")["nb_dechet"].sum().sort_values(ascending=True) + ) + top_secteur_df = top_secteur_df.reset_index() + top_secteur_df.columns = ["Secteur", "Nombre de déchets"] + + # Data pour le plot marque + marque_df = df_init[df_init["type_regroupement"].isin(["MARQUE"])] + top_marque_df = ( + marque_df.groupby("categorie")["nb_dechet"].sum().sort_values(ascending=True) + ) + top_marque_df = top_marque_df.reset_index() + top_marque_df.columns = ["Marque", "Nombre de déchets"] + + # Chiffres clés + nb_dechet_secteur = secteur_df["nb_dechet"].sum() + nb_secteurs = len(top_secteur_df["Secteur"].unique()) + + nb_dechet_marque = marque_df["nb_dechet"].sum() + nb_marques = len(top_marque_df["Marque"].unique()) + + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page + + l1_col1, l1_col2 = st.columns(2) + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + # 1ère métrique : volume total de déchets collectés + cell1 = l1_col1.container(border=True) + # Trick pour séparer les milliers + + nb_dechet_secteur = f"{nb_dechet_secteur:,.0f}".replace(",", " ") + cell1.metric( + "Nombre de déchets catégorisés par secteur", f"{nb_dechet_secteur} dechets" + ) + + # 2ème métrique : poids + cell2 = l1_col2.container(border=True) + nb_secteurs = f"{nb_secteurs:,.0f}".replace(",", " ") + # poids_total = f"{poids_total:,.0f}".replace(",", " ") + cell2.metric( + "Nombre de secteurs identifiés lors des collectes", + f"{nb_secteurs} secteurs", + ) + + fig_secteur = px.bar( + top_secteur_df.tail(10), + x="Nombre de déchets", + y="Secteur", + title="Top 10 des secteurs les plus ramassés", + orientation="h", + ) + # add log scale to x axis + fig_secteur.update_layout(xaxis_type="log") + fig_secteur.update_traces( + # texttemplate="%{text:.2f}", + textposition="outside" + ) + fig_secteur.update_layout( + width=800, height=500, uniformtext_minsize=8, uniformtext_mode="hide" + ) + + st.plotly_chart(fig_secteur, use_container_width=False) + + l1_col1, l1_col2 = st.columns(2) + cell1 = l1_col1.container(border=True) + # Trick pour séparer les milliers + + nb_dechet_marque = f"{nb_dechet_marque:,.0f}".replace(",", " ") + cell1.metric( + "Nombre de déchets catégorisés par marque", f"{nb_dechet_marque} dechets" + ) + + # 2ème métrique : poids + cell2 = l1_col2.container(border=True) + nb_marques = f"{nb_marques:,.0f}".replace(",", " ") + # poids_total = f"{poids_total:,.0f}".replace(",", " ") + cell2.metric( + "Nombre de marques identifiés lors des collectes", + f"{nb_marques} marques", + ) + fig_marque = px.bar( + top_marque_df.tail(10), + x="Nombre de déchets", + y="Marque", + title="Top 10 des marques les plus ramassées", + orientation="h", + ) + # add log scale to x axis + fig_marque.update_layout(xaxis_type="log") + fig_marque.update_traces( + # texttemplate="%{text:.2f}", + textposition="outside" + ) + + fig_marque.update_layout( + width=800, height=500, uniformtext_minsize=8, uniformtext_mode="hide" + ) + st.plotly_chart(fig_marque, use_container_width=False) + + # st.markdown( # """## Quels sont les secteurs, filières et marques les plus représentés ? # """ From 423a1ec9babf5e12187c093e2f1b469a194355da Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Wed, 10 Apr 2024 13:13:54 +0200 Subject: [PATCH 011/147] petite modif --- .../data/Correspondance_Dechet_Materiaux.xlsx | Bin 0 -> 337523 bytes dashboards/app/pages/data.py | 123 +++++++++++++----- 2 files changed, 90 insertions(+), 33 deletions(-) create mode 100644 Exploration_visualisation/data/Correspondance_Dechet_Materiaux.xlsx diff --git a/Exploration_visualisation/data/Correspondance_Dechet_Materiaux.xlsx b/Exploration_visualisation/data/Correspondance_Dechet_Materiaux.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..5de3121e6b4242dc8bbda44d5985e4d5b613b6b2 GIT binary patch literal 337523 zcmeEsg;yKjx;8Gs-K|)V;$GaHP!cE5AWXZBYS2*YRD+W2xtfx2nYxa2z|W=%%O+~2uXVFC?WM-Y-NrE;XQT9DRHlWIh65u-`QPw&1z@5cu&Ef6rM9co2}Asrdd=rl$&0 zHWkB_I3rzs<`}Yyzo`L_m4Kxsa$cO$yk5`3k5Db1w-0f|3_q1rCAbSLB?U^D*q zIerV{y=6@Z4#2d2G=2sbjYLOo^eBcnwMfWM>Wrbf(XWQ};I4{vX&n(!=dww%eMuKI z3=ccz4fMhFk%CA=Ti;mpK>Q`f}R%HEBe>+kmeDEq&dr~grUb&7^o4-a<4iSlFQ zz|HJR627dOmyB{N12`~PWf`wAzL<$>rIVE!A50mBtQ6E9_&Bt*A|8J@z;L}OP#sH1 z{EG3L*XM}LdlwIMb{5wRMVIQ0UINeAtJ&)hN~%6U&-O(2s;2TnwZSz;#pyHoulVC! zdX$(%CDak55+6bgerV{gn%~wS&dTZ>eUA9@UAW*dbv!3%KBM#iM=DBG{rA)dk^wjK z_X{=t1NID8ca#tvYf+mo=6SASFMLca?Yl1Ib2^Eh{9kJ34!nLLKzPeHuGG(5a1~_G zAaFMP(Wh4!v!`m{bTBlQu6*@>B+1^`hf7fu1O!oR1OzHO~Z&6=j3s9)~R9V1e3+)etMC}@?9c$`Yiok|BiMfHeVhB>0#b-@`bqsS&N z!wU@ooq3vVzhOo6ndY=iQsFy&Cxi9KGUQ_7b(i6Lsdew?JXS!Gzk zS(D*0WrScEoBcc~OVOTslx1ToAp`zfxAnD&4fDASQ=4(Hs$9l}KbIzgL6!2!ozs4{ z+3#@EcDk^wB+mF`=5*2JI`gqt`l4?+_b5gtf+$82Y|;%}7`#v9>%T9n@BDVxByxSL zLblPj2`J;!i4G+k=>ryp$_JDn_xLM#vb_09PAU)rsh<{Vjq&`Jev#FU;3}nzVn#vQ zJIs&xdI|IgqM!cLI5#Y6Qt-|gq*&l7ExV3u=(4{_L1#fw9LgNf=T;_6EuL!}SJSZ^ z_+bvVH_dCEH*omE|FC;YZVbphjG*x68Cd~%+fWErT71f2j zJ10hs6|sJ|+!<`>-={W!y)Cs!d@LH?;rEAeUqsQ=gJ#xEEYH?wSD6Oauj9~T$zl4J zM}(ZzEv7L~KaQJWly|B_^nxCz^3m!Pv++Df#4)LW@3%j4YmZ=`AeV~yKvgX(lhzW; zzI+dcNb3NldZ5V_$?QZSx8xD7-p(dXygW9{Hb-biOYnxZ5RaEfNL@{Hj4do{KoUa- z04pp|Md@R(xk;yK2~-dc&S&R-<=%+4R@aVna-L9E#}tv3YCxi_5W2x!ipgh> zT%Ac}CAnx-OqNuhrAQE>2{RlF`=}IwJ!BV!gHd&s;B!iouWW-9-Rbg8T&C1fwVsbS zCG<%QdcEYV-;riIk@npb-&&*z_uis?2M^tos_C7#w0&*&0`~?B??#g;s*+kRF^Xf? zXG~q1O7vz?tBP5hxPjbvOevpnQWnV_Ui)=w)?IE(7StKty;-N&Ht=l2XDxwQzUg*N ze|_hDUBMQq71V(Cvd7`>^S2d=SxU532OQTg?~kMx?S_Rvf7|env$E!?y}OgWZaj0M z+Y_?>Be`?kXC?_`;YV;K0N#V%STXm4`}eY}JCZ_u71st@5XV1$v>3cZzv_;@?g@Z>Af!vEm&XHU*;1RY90WLY2Fh#oq0H z1-)V*<^}_XGGv36VJD|=PCr90a;oip)%?kUtX`Jenekb`gSXW%ldgjtQu>!x(cO$u4jwi8xF&iaprrltp{?&nmBJXd1hkuxi%e&y&%Mk*up8ed1Lro$O&g znV)#c{X^h)ZQ@M({lB&>L&P1dKTOJIaLSOCPWJ3xlwGM*7LQTz)AFQ=$u#rMvSYYs z^UE}HbLhf`jM-&y9wCv^dNGsLh|w?T&B)P1&j}l`IRqo*j&H=R)+`OMbh9($zlKsy z)Vdkl^s`Lyx`$R;7-_s_V=wr57Mi0b7bBirlZRUZcGv4_9v3O9@X|eHKFw(n(0F7? zU9U(nIF82~k*T`7z8yBDm}q$4BJl@ENsSuQ?y}UyL2{SHB>t;;Al?36rml&#e&=;n z_Iu2G8qa*pH}-TJOyT<0;@_{)9p*-J#j8dQ=1SL@{dDtcPz8RULrKol=*effPGNY> zUqejmaeWPcn91Agi}zh)E^f|x#`f^kwPtHbiM~1rIa&Cys^ZgtrB&b|EwcRZb3|Z& zL3^}gXp?MQ9XAsl#mZjm7YsWznNC3jV8QoG&t&`Hl^wH1RB5Bo>q`O-?sQa)urk)B z_6(l*%jKUsnX%Z`Z;{H1dcG^~n}?zNN=lZ>Q8+VSegDjgFE)o@riY%JFih?x{RJ1p zqnXiV6|w8tgvFa*kg(gctEBTyuiNA5LIw@KJ$i0D@*?i{c!aXBlw^aV7++a@l#bf&R0H$UPSIUAW> zJ{@GtOlf!e(g>U9AFrH>v;_qfDk1_cS=g?JMYYvQrGZ#LdYRKqsps*ar}@sc=d+Wb z$E(bsr|~}z)184&laII4bboH>{yZt&uKlrnFZFysER{dkDKXmlob`M=-5K`8>b#`8+d-(wvBZKhoN^HzS}ya5J7@n)1v`am2Paf#8Dj>-*j!Me1Aq48bA` zx+==ExAG%Ke${rp$pkn8htUiU{9!49ne@QlM#N~H<4WP2kK7ni6Z+EYWGxsNkMerj zWVi8>s3?J-a)P^R-O2E++wzf+G;Z*ZusWyp!W(K17T+052Vr)nGjbMy!zD&N!bf#M zsqtOah3s#kAtv`4X6t3oez3%_=2}<0-)8Dl3>K9z^_ZHedR+p{?;Y(Knq)XOb z{D_>e$1xO3=*lHzkr+C$_SQ2FPN5^Hzde3D%@i$*t%$;+tTF5*B!7yYZ8?(|MXK~5 zgX9Nq)vo?!FSX+wT*NvQJ@5c`Wi2M@pL9hJp9J4y-RQLza zAQ4L{HIAPE^~6MlYtjJ0R&?7NX=!RDQ%%g&XgJV@6)N>Z6gi`-L|yK5<1$I{M&+UY ztGix<72`Fx_XF6k%n@a-#feKJi#PT_)B)Lc30$lbqK?F9ffp|=NbRuQu>Z}ycP7I&^5~w$SyT1n68KO_?rqEqWBXqg2LkzXU5K|Udl&# zZ7otzsIx<#&iF&e2dclKZAwZ04qiZk;M1bVWC^O;X#902}QCaF&jy_CWfP12$ z>!fqq=vCeCLDwV_q!!Og?|q&?e)X*P+w5E{O)M{@%~;|Wz7)#xkr_UnoKR3SY@MKe z*kSP%RudFg@4ist`Xh?mWi7T>CHkFy7(xN|ng3Ti|aC(msho^5umy~}T# zU5VeuHl9+WW|KVODrTnqO1^f)4dZj5VVF~Wc+mZwhS$+R9qTZ|+e%-?ZpfXobI&cp z9<^xbqRNW;#6})ANsm6!GQ9&slXH8)$b$6rNde+@{zc!2Q=K7%ZQvws@R8PK-pDj+ z4IpRKh|)c9o4(|Pz1QPZS(jRNC%7jFh#2@>NWC{o$i6u4(_*KTA)$}Ez&GFnk#t?< zA=%@V69ItsX9|BwDS%(~`hlSywq)dj+PPoLyO||3-g`pPuR zTI_1ibaNs8XsE;7R(mSG{<|K`sT5ZEi`eg~RaRTtTCeLm(modz8)i}hQ9|w0L4G}C zGZW#DMJ<02D=NZl|iQ=WM$AQ~v! z4#0%Mcx{*D!6bz5C~+}coce420v|w>%f&#-sier_Nv>fm7yc(Q#VWGSMhW}yJXEPDfPz2-XRO$73l zJAVu)``8+@zb+!Zw$-jk{(^eUq0A%Pr&-!9Dzx_9VmePF(ws8){je(N%TNCMuAH-6 z*@DG)DoaBz?44^I8f{lBUwt_2Lm{zorPq}Q;nwE!D z*t+b$NoMwHif=xtk){?~@bSr`81tuHByndjDOph(70V;ciOB`Fw^LdW1ImJySc)on zf2j9ZVE=p0M=ovSv^@&G@PY(?Omt*eRLbMz^ekVcnTA=!rSJEX^Uv~IP>V~dGI#Vv z4nB^LT1x8gNm1LK7pG%le5e^bsM7bTF{jX%|ICZt#&J`hA*9c)_wuOzlbApA4P#_- znPOQegRNI>s7zr7kV&e)XfL}qKP<~egnYk)rggFYz_e7ayWmi|+mwFXl^7$T0^rfo zC~qd(jV9MUbWvzpnw`+V$;`ee)WDgp&5SVMu0aw&g3xTvj395MZ>;<519)M$p*t6O zDX){XTv{9vSyor}0!G}qZm;~=jtSf9&LQ-!cj%Z}vl<(Gy!5q7A_uomAoop$DhNbA zfc~bw>tkX+*penvjrbT_C|AFauxeyTU9uhk4qIMl^r~bh zgXeGC@lrWnoc`cSl)m7QO7b_!37IkBy{=WGLX1oTY{oP2yRwpkzKGcv|eidq^)ycA)BCa4sLavSNpU%ZL z6JyyHRXN4Xk5i`RzE;BkZ!vWKcGXNU4UiohtO1zQ**{OLAoOr*2+iGv&oE|c`aV&q zb>)2795o5rg}!F}Zu!6$LK4ds14tF0D7$N)Dmbd;JmoWj3wWSRarSp1yVzD^`EsR2 z(Pp!tQb)d^>hg03Ro@ZG@fv?xVvHqa9H7IgzZ5>mbq1fd2E|OlOdt)fWp%%qFC>?5 z%223cV!~at%}Qem9j4bwf1~6Cop{>1AZc^?nKv|HLri@dbc7sLDPy$s_7|@v}-umIz71aJDR|NXKMp+qyU&6OVtM_l*mUG}$2^D&} zBXVP)>#VxXXQ8|EX!Rx26A#rdO(;b-NTljxrH4zKi#DeT3*bsS<+>I5q#yApZ+8xQ zpc?a*lxg=(HQ#w}R;B~;21WF5MbR$TDgCb+boKiy*VY46Q`r%UEHRtW9j`DhByoxS z3*GI?*%3D5dklpC#c6c?m$WayG}ci(2iFjUtXeUIQ|PWvS^JOnej<5hx2^rZAntC_4?#Ozv zI*0h>$BHKAF1(7mtQt(gnbe?+JvPSRX3Cb6ykja{1i!-#go3&IN-9y-ICyn4hcg)X zf^B+?!Ll1weM@`qsFD^_Y(x2ED+SE9rrl(WO_2;ogMa@I}W zrgY5{54j@BNX}%-7Z43x^OCXEC(<^m;n=&cN`!K0^iF(UmApXzVA6bea0PK0@lVyUC>tO7KAd+4SB7*G z^=P*Mz{Tk7X`Hk$RW<#i`sj7&EjYTirQL+^4_iY8mlVLcw#7fh_AEHc!OlEyDF_bH zTlOy%M9vY@r`HHcKm55#yJv9@A;U#JNNu9}Z#uzY^6y zD1=fWhqQEFPPF~qnwUZ}TZvt5^I08+{h)wetxTG;1$T|oZVIOgEtco3hhu;||LS&| zK|q0qf+3W9LG`I2h`p|;GheafS|O+w^pf_tQ4p-~upjsaee1!~9`0OSrCcA-h(4kq zEn37#;d-imky(8E3cKXCG2eJmNvpj9K3>=G@zU;j3aHR3Y;UIgeX`@`9FcF@^CY3S zsdj(D8kmS6PQ#4A0rvh%`QWE>Zdha?+(9q7@?GtQ@{P}TM#oX5%RsSlr{b7ntQ@`*@m$kTyz#V18N82}e zXH|qA5!|sBoA5e%zYllxvrf;(_)c!^iDlBR8RFdB)_HumMXQA*B|jW~c|^N zv_uE?; z;9*wl=lFBkeu?iBFrxkN3bnGYy=kce)2?G)c$(+o;9tq&@BBn8IH5)a_VZ)lxvKm_ z<5=*%S!cRPu+Iq_?u{5Z_CcZ_4dKYNlx|T`=WJEB)p>}?4uKj%T+4;AaDs-h{1kWF z+0u<+JoiDT6u`H4`2fhPSGY)8xAwWPlGijYYZ%VkhWFDU8b*9LN;1r!OY=+nte&>9 zejqHbfqHz4OvAG{Vz-vdrd;n{k$Wzr=Bw**94Z{A{xU4*{$%K@q$o_aC%L^jHu~EPS+}okdoA1(2lX`k!~f|9l|+BN)w2D^9*|IT9~ofmpzdT{|2srs zG69OjPSZBK$QsyY!LTBdyYlw4EP9^K=mLt=a-YX-ij=I>X#RO-kxyEmamfwRtmavK ze}#&@WIKGCM*nk~Y`CBb&+q1eKiNn`vsP2@Z;jVT>p5gDye+s#kLt=qKh#tLOGP1P z8OMGyZmlj^ySi0m0%cR8U zQL)#UU@fgO9s=>}F6T+f?g(pRo{B2}TBlfn{H^0Zcybh@$vJ!O3zNATdJy8t+$wJ! zoUV2sS)3_79-S(Z@T^N_%cGH#ksl*ylp_45gc%DdK;7|6k+y`GIP` z`9sg=*>xqh#6dC?WR_o;LIbXL=r^>w0XZMdMf!T;rCu9ZxULF#S8s|UicebD1ZuMs zqD^1CF3J5C*ZkKhcYPc~r}js)#~jz;MP^W%`X*w$qGE=@+ajN0g|gDT#m2hlNi_D$ z-(ZQH(_%^auEqO=pPDM(0DKO4zsE0L^(o@uw?AAcNQ52Ez#MY==vw)68krI&ce8IH z#9pVIu+oQ!i!3nun&YF3(iv5FE&V*-crjRDTy>{*D!*TxgnPC}iTO7Kj~jG^J$?0| z!j{WO>mNeD@@)Lu@gF&p&dDnXr@kHzlAY3OF;!2VQsLR0?$G@JC>V6bXCZdD)Z4H!hI-*pnxzHsr*y z)GT8<;NLiKX5Ad?(!0ODJ!{*p8nZYZnbQfy+2z&UMA&V=wur_FIQ!jW`r`pjitYoK z-)cXMV>QPn>!x{%`7^~oLH*U<>PoJjE!j~8r^9P+?!dN?>Rq;XY^zGizaS@EpqG+E zp6^0GoUDurjQIU~LHIAV;`sQbkKroIAMxaDCmDrmyFFcG-~nLKR_5gH|HkEq-^k7Q zLET<}yWyE}Ll<%6_wjQ}=xyb}sdu?wWiQXgXu6YeU{J=dQ|6$|XWC9BlaKLJdL)$} zH)yHkAQI2y0qOr`R)6QOBk8*LVQWxt^ieeLCf_t{g!RK|d&f({0F8WgLGY$%M~kcG zyu_rwmPYvoZr;@lJfo6Cz4CT@4&ALT9il7D-<7aEv`@>f1ic*l{5_)5T2fJC=f4O5 zS3(=EaB)EgLGlS|ydf0@+$sO?Y=Z|C8u4%Ek=?#TzB+FuP6pjIG$VY*T@T^XaUc^x z+ku+zC~1eODU@V=lOlO`@rJ5>skIjt_?2(a9bm; z)ZbPSUXc!Mqr zO;4u%*Jpfh)i>1!dwx^UgI2-q|5MNrb&lJ^9y#p*(b}5m_a^ zrwjX4b-lG9@1eJ>97w}_ofHFqCN`&iia}*@s(4qg^j8~Mm(^^5wFWVX+g4!q6WNR^ zU?Og6iNUJkvIiOYy$}?(j^+zJxWnIWQ+oIG?azJji#uv5i6-8(lhbU`8dLj5?)~6Z zlo%B*qe9%4;qpt4<3U$?HCB$`Z6T0?@`~eg?XxX;!tgbgqVvuv?$<#w zCEUQg?5zE}J#CJv`KwdNLA)Yj@;6G<!HA0qg?s|GUD^$L*jldxvu^l1NhRZM z5^r$|wLSqL?2WR*DI}K4b>tsVX6%$3LbN(@aoPp(K9R0kzuXH589E2G8W$z%$|*#g z;^6zGq3|qi(~K_}{ZJ?|T0Omq|L9#=LrKdG&e0O*?=H7qLk(*r)L&*q#mi{u-o zAJe>i_zg6}wLGvi>;1Lg@{ZG?n^&)=Tt|__nDqoYU(_agC_uN%c23!O;XS4&QG2G| z6)V3%no{Mp|AGMT+F5!cDn5#65q#8!e|< zej^7&iJHT<3SSLq$y#eoJpJaYyu7c-V@8^osyKIv4K-u8)jB;|E0*7Q)mRqCj)?yY zEZOv2BeB6sibH}Nyc#m3XM~`B`f)T?Q3lG2mT1q(sY`{Ie;-@7wFg7tFi)WY9uQ z!x@x;8x<{gog-;G0T@5eUfFrr13O9So<@>SBFck9ZCSY+{hXqib7n}_I#N$g7tdtz z?k+N(m-FwseA!EPy@T$_sfwuz^h7V8qAmSqr3?OWQ7d%@-*ZD>XKhkPA|a`7OwmfD z+f~e9=Y`!Op7p)TJPde1hG+EJo4^8 z*42M$r1i;@1s3ElSxG_n4{BuH8{8}1U--5Sr+QW z=+UiuCLvbJPX=Wuip|EK{Q4|Bi!1%q+Qsv^*?p0%_VMy+Ut;JmS8$rSL%KDY9wYKI zO1IMmi}Z+xHnSn#Pb($p?}Kl$o^T}Z7k+J3hhGom#&^rjU(qW24Hd}VY`0E)RC`<0 zc4n4PZPB0TzFReT0Oi!vHpw%OLGO3sURD%o!NQ)-!P+v;;eGaFgeZ0F6F@qTJbu*m zGJ=H(Q3jWELkq^eiRd@KyQ6R}TY+|k_2KjYpA9(mU|+lq&hW2x96aZuK@5|^ zp6NO9Z)D<9TV@JMrk~NCEw)Ik;UPw4q=sLLFOoI|0@CPiaAw2+zTg?us&anuDF;TG zrt23nO}8(ksZlFc@!eW~m&+qYSBAqeh*7I6#+23glqA9LL{=oMK+#CA!6l3X07>jL zFXEZnzK<6>%cH3~>_df)#&f7>7dc29ZSHZ!`Gnzq=>9}={>RCxWGYVXRfX=Fhl<5K zPLn`&LzEb{&$dvGnbzMG&e=0MF!xIT%?5nVgY6&1KIfktF+7IKVYYrH#^SO}B~6$VX( z&ynSsMsU`6k!n-d*Wm=fM{RZq{8F7hqL2EmTTbCQ#kOAwG6gj$zys=)EZuiTrCdZdNZ55$x<4R zw6W~OAtUe1m4L}r8J=2e#Pu0J*If(dUX;)Pa?FZpCAxi?00<|_(m^8$O+93z*~l`Fk#5S*}c)}9y;6=`2FW` zFaP=KS7N6G^XJo54`!r|?^MEs@}l3)#rU|7<1kTF)>84lOK_^yme;(= z+IQ%?5z!oW?I-nq=M3DkCRn3B5MP6yBx*Mid-K>pWb=SgHT}VGu(u3ZU+j#2#F^pK z#*4{GY6y0}gH{09$_h?h+2fslj3RlBUdi-Pp;gt`^4+Wwvl`2DQxrT?UhQIk z-2Ul9@{a1>Vc4nDD|NO5C*KgcBU$SAM(oN3OkbFq)6IZeiQ8}rf z<8|xAYmy6`Bp4>>DfHx7@N(E{ReJ(97qkn-+0y^YguIU~dzrcqBJ6@iQ#X6qVBkFO zCA2p5{A+{cP+3*Nxtrmeuda_QpV=Il+hc}!c4kw1%ugM{i(9)U{0JC%@YYHNcQ|;b zwezfcDBk7flpt+Pk`jiqzQw@TF-}KA{01lRj>^DD)-C0}Z{6dCJIxsP{WG$rd*wS~ zoa*+kj!p@W!0wg+v~{C3^n} zUYUvS=!xp4`y(m0W6f?7yL|>B>c~%9;tm`pkh+ULZ3kYO1W7{b2)y;uwW!}WIz;bj z(vBwYdDp|9@Zhn7iS?t7Z_yS9wpGq&XZ`zJ?p7Q{j$CqM0YF)4k?b$bq|Ma4-z-jH z-`cV8Z+4cS?Z6Inu;f37STZ-k2xen|1|ojQMe|~rJ7b!Z`g3sq49MS?|kx~EfL~~>WW3t zPUJ>WTb6rP(eg+gM_5@f3IR52?;umqbv(BCooK6Pa%u??`D6MOYlC_|D{d?iBlRuB zSChEI;HEM`?J)z7$q-48lj;>ge{ZNy;%Ht++T4zr&mTn|$khu_t8;j|D!iKgLTxQZd> z*Hs1^%g&h+B-g2*;B}>I{eEx8-2mu4n-rh@-LJpJRt$Jh0jKAIHt%`BbPvUPotHn= z_i-kF_w-{V?CWXMz3a4P^v}Eyf=wGqO8DN30$Me^hxah7N%4n zOutm1BqQfnN9zrw3R;&t2!sif=h75VX zUCRz=$&P8x(z&6H07f)((FTNagO(M&;X$K4s*rX$VENL|=Vo}9w*7TTm+M|P|0UrNuUV~qgZj+Wq(6;haIXdboOBc-TuLHpICZ|EKWTSP%& zN+?y3Ai(PxKs;?YYm3wQz~#p-z}o!}lr`73WT|MFEMACOnr`lrrF6!Py-Two4eZ{m zJn!h}&UObm9T+dmraBub)HH1YFKTA6suZfAx?vr`Z6L{1UYmMCV5(Q`Ip(ahsu&ve zFhr!CFQXo?fx$Y`YFGmECp5*Jjz%vuT;YM(hTWgIVDH0CGKdvXR}41IgKarQd~-5c zmERGe6*UvrVoAq^2xB{j%tQ#GPV21n8nv;#kpPwmRR>_rwF*_gx~0YO?6YOzJbn*%i zH}3+{c^LX?l^Mf>RhvT7C7cfcT~WMk#=@q_ui?2ROogS1T50_Gi$iJOQ>(8=-2VQv zc9S|Ah#I9*_p;RE8Vm~-c=I={0g!bObJ#i*0tB=m^y*Z%$(j_97a;;~jxe=|c+xyB zClEhg%%=4ByDq210V7q$#<|YD(F%)OfpHh!*Zg+t^WL|l)ha8DH8pRJa+90bWd!&k zdDiA)vWDp;lWoK$GXWhhxp`Ach-yR~%%D!+!p(N96AqUnN=twx&v{O29{zd>%7%hT zUq9$tn)5>DWH)U+H|jD-VycSxMtmm4-J|B=J#n?2)?W@Qx6>===xA38Hu^dvTDIlI zjPYHI9iOI)LwfmZEc+y8+k*DNyo9B3YPGFm?Tp2S(R>>`_yj=?SWCXcV&{r1z4 zs!~@79&#UszIM2LnPa~5kFwC(BJ#1HUtNV=G6#M(w=3(DBh>P zak@L{ajb7Z!P^$y8+NSP0?__?Av5t{*k|ElT(pQTmhlg#HVh2~@!TWv)+$L3<|PjI zzteQQj*QfGV8WQy#+3B{2bZ^IB!Fl}3)Fq$~@qMJ)d(7qgGu(?US-Su6;(-y$C2H1FcZfsTIiQdPolrbCOd zH{K$k-1Bd~Tz3GVB3g&6aQhH<$zyI2ojbEmU?1BGnS@8q+%hlK62|f= z_$8J-7pXl_6OXjA#28h4Y*9UJilHI+QsI#6wi&+qr-*@>vfL&WbOd%N-5Aynh?H^v zV#g17qZjI|Vi=rtQKb6w628iE&ilJksPx_xk9Bg()u$QN?(_Mb3SX3nXtk8NhYxR{ z9WE5Y?1$KYTP=NpwSN<~}OBO^rvf zpp1V77o)2O>`ih|Liw5!n|nUThk1RtYoHf;koJHtf5hkY^A^BUCpoiYsA zLr3O&uE!Vm)LM5<%J_W<9Qh4N%6J|`iCtLHNWC;tWKvrRCL#A?&t=C<#wtci672Hh z|2!8dQgtI0^l@wGmtu(X#jI{<`PSZM)+M~c@4sDbO18AVlS~zt5P4Wm)yoR^AI!s* zJvju9O)|y0<`b)1)~`cmoQ0336vMGvyE49a3p^tpoDn!~a>@8QLRcg$M7qfW zeTg1v8{J3GncG!5wdwCuf>8+el*&t{2reC4;ZvjZbO~SkG`$3(mSUd?uc^ zrmO24N`jOLOKk3Erw^d!)qP2(UuLA2X;P97ZYtA*z$B0sw9O=d@Hi<-QYctX%I9_(+nV1@0_iNO^^9 z0KH$FEb?C(zmd@ z8>II6@xAE-+)AKlOu6hzmv@ZF%y-dzzB4M^cM>}QIB;bG=5fj(Z*M&vsT& zdUZij5H(bc1bn=)a9IQ&Q?!p&in?PS(qM3e)wdXaNWm~hh+RNZ zY2EoD?)45nlG^BbFTVsfk=CO^L$Rgeuz-6JE+NFL=In!`ltfSXSjJlp&I&@<*q#ze z46zw@%jQ(BOktx#nROnh1z7jn*^$O3jYwV(aYbJ84IJc8v=Ex$LoG~o<>pa&jIvA8 z0>ur&PDK|$FLR8e2_#mhAl#J=KO1IA|KUd)Y6-1u97Z+p+z&LEp06=-768LX9n8#O zEnthoD3bSm%Ts8t#|kjrab;)6m%R6=?W`4&Z*e2hpN0TsygZr3Jm*!ru4xH` zbBll})W&G9fyX~dVqRSn$ktSo9{w;pjDFobhplPyrui+BHO%oArQrKjn z1yr&TKvm^1N%9F#kSMZ8ejD&YX@71SB7D(TOo%*40`NVps?jns-QQIx@!74)tG*_F zO^p`)p%x+9g8_PIM0Pmo+vD<)nz}qXoFZ6O2J7#3<82s#IRa@1utvpMvka0>;VzbS z{^B>ehBd2)5-D96PIT`F_K};DgE{evniu0zgfg!FB%?m4yb0M0B&MV7BSM%(BBriH zv;~o~82bUbyXgt2k(ff|3Vp+hl}K?d-I&^CK)sV|bkHrIvSyhJl=2Mmc&zWnDOd#QiV~ zBYm=guhK)V&6665DsOd<`=krEb;xgI;d@Vodu3&tIRP3Bv4kDHz4S%qoa>~8+l-b3 zK8|e#Zt?;EJi_Fwjr11~8*(>UcMM{!e5mBeWOqTvn1JriRbU;{@0(xI2`5;ML-h2gLa<9gLYkcZmyd>7SX?(t%(QiiZHF$k}Ex8?aqXuMO!7rQABLoZ0sl6jouP# zj`yE-Pp|}s-x{i6e5L=S|Hw6|TkI_LQGNrL=_p1w)3wo%a{{ZoFEbX(`?}uehI7xs zW6%$}$^7Z}a37M|Xq|il)Z1chwFS)R#fh0yQ@I+t%lYe@H@0elXfett1(P+`Q1MI8 zlD3H~=?h7kW;2CGn=|X`E;mfZEA^g&sz9Ap`C9JSa9T$_f6>bNj9iZ0a!EzCZx7

I`KklOkma-^oPAzRhOI$ zNWJyzu~=SDy|=zRfG95Qcgf}Sx`C+tzDUUj8gtHJnc>utz$KV&?8Y;F+AxPX9 zNQ_5jLHtgiK9RJVXOC)_I9r_o_yahCRyh?!yw+pdU3oxA_EA>(3rxKfpKyo_kHpyQ zM*{;k$${$!xms^gl_C+^a3<`JyR1@gqtIRGj+7wui~77s#AulFEj;w%C8DDA@}pcR z+Qv2*78>pH)ZVoqacUrOB3c|_afoYQn2c<5H-d&4kF?HudpR;58o{;?w<{Bxpmz$W zAV9YH4j~)qD4jbGPd?3o52m3Ua&yq~N5ST#LjG(!%Z}+C4t9|a(@cG|O4un1C)QmG z0^*=7Q_~z&*<00(kFe;So&wTGa^k?ZDKwP+wT$ZVB!KY*5Rd?Kh(tg&i<>De7DWK6 zEUgoT;>n#6{br0fxvKz;j#VDcnt%Sfg9VfNBu|k30&z`@W8R2qy$9MXd%mfS?_u^l zB1o9rMNfcPD*v<4&>_)q3x%Pml9_e}5ZGlL4_ z&YOZ*SbQ;>?sy`;}-az zQA-arw~165o>3!QZ3R?Rw;@nq$=BhRC#WkALS1hv`J5 z$|3w}t-{p8B919T1@uiLDMm9$H*ea&Czb&|u^7@i7;tC9Ma|&S^G@;0!cCNh^3oSB zlLwdbpaD(9xueR-0pvLh#PCP(Xm)KvWX~FcIhZmcPi5dpH_zpFQUxFVAM&`gD-h zhF7GC3JGOs<=zVAOkP#x2VgAfVX?ug7 zCZle?Z}khERf&rbuQt~fE{)Y|^!n?Ir3i;SOI~MKDnKTK*#e>>KFK{4iBKBH_Aro> z_tO1(4K(H>tPxR5*^ya1Ezkb0nYt+X@$Lc1pT#spZIXdJe8078w1;pXvn2C;1eAq2UWRh zhBa0Sg5Xcqh~yB66(8zUJ^2#GlpjPtDG#!P=JhMw1s>*t0EOKogoSX=ri4>;0zXeg z03}LJF4$LYtD<-Cj9%59VQ47(5?gE)pAreW*Bm(k0*b@*3gxYVdfrh^E<3V}D{pk) zx-~9WXRpNips3}Vi^FN^mm+7)ltzU&K`bkG7n(1j&iG{7MJ2$yhw`_Delgf>wcdf-;bRmnaRvV&ROT)Ypp#q7}NKq@sF&3UsizRpMN$M z!@o1giwQLr)uyHPoT&~@s0v`-A;=dwY=~74YOsvA7O?Xq>y6k_2|dHX6r}2ACEkz>p!z*-f_9D!QXr@Fx-Eq zjWggahqW3h{_%EVcn5z?pRi$*tQ6*x6xUl%mVthP0%H~q$?Ll;;w4iHb%~SLX_;8- z92|~|xD>@OpXxYxh;CKB4q@H#k{&J=c7)#Sd>$Vr3XwH;ex@7io4f{{k%8lHz?AQ# ztlZw}Rf)ZmsubL&e*>-^=U$Fv_P-dVVi#tnq{Nqf8H_^V85}ic$|NXp2rH{!82U%v zD6~q!nw>;pI{BB;av^fya3hw0S71KnN-F;FEs;g#NkV37lQUUviLGxE+@n1lA37Ab zIzuG)+HxI1o0xc3k}^F8n&xSP;e}9rO7hVnIa~ZhYoU;Q8Qs7z?EEzKkA51sxLCax z9i(L7q0nwLHD*wfASJlR9@t}l#o!<|ocbn5TIM~7kYq>&s~|N^K3h(6P~YE|Sq`gL zPL~f(NJ?nL951We!d_iqX~a3s3+aH)j40?Vt9HI;MSS{=H$!;U-N|Y8oP3(OS>C z8$S>06Hh-1-nWhD>4Dn_qKxBrlpJprt{)C>J@en)mT67?lq#pjK74&!zuiIVk~+B@ zaECAd{TTHb{@^A3?tsL7`eXAt=B^S-QLZz=(gJnUx$V($*=p^SRfBxYy+EB*hmqvY zPE(4q=^ccfvcEetV@|SnQEre{+JHN&a~r*Xv}%+JE%@DCTh5ka;RbBJ!SB`R2MSA1H&3p3w!*sGQjv0`orgClipQWHvxw2t~zb9&{kCK@tY^ zZ~lD!ChJkVfJ!nLpK|M5RTAiNQua;XqFLQkR^IE`%_JyQeKmUc^7Z4DlgKzL0DFZb zxl&_N5P<9=BZm{*|5qY3q?(W~oW{TgccfR} zBDi8uIi(Gb#&Yng3y}Rn8_Pxox1kS~wNsnG>lrT^@WWECsXL?P`OmPEmQez?HjnJh za4J`uivHIOK_c;&7j3dG2Lfo8s7SIpbkc>N=uBf;(bGGRdAtEcE=6_n?+-udpi1 z69e}XZ~xPuPRBQfD``?WF>EnL%jxHmg+ICPBhp8`t^}=icjJ1x_attufEVnRDXJ)nT8A+LvwzybT z2U8=T93dHN#Shaj)?zq6kwSF9vNrh1U~`=GVaoCX{FiO;{bNua#!}oqV1;@izVuiB zz3LyNZM_4BE+wZ)T$RaSh8F61ZGkk;{$N#I6?=3mi)+WtQsn}gBx)PdS{AKU6-yj{ zpf)K)m}GCn2KMzge?$;GHYv=h9L?9=u|E1-sy_dH4R`Oxqviqvt>iU`2 z&seivey!{_%LP``^^P1Z`IiFjrm@&lk^WW{Lz7@MrSbF)oABQEwY7VNcczvjvnz>h zozIdB;l?ohaz3R$;x}t5^eJilbZe1Pnks^C8`mVy?sdJ7I<9`})YSfnvhYp+s2S~B zdZF@lRziAQSBQmc;Hm~~_(aFN^RC0$*&^1B7`Wc!s?%93^;KiaoH%+%^3{Z+OR z=+<1+1$5;O`3TGp5{G(2C*Q%Q3iogDJ%sK!nojna_TzPeJXIjV9nv@trm)?giN6(H z164kBTzxOfKYcH22+2Fi)~wT^*8Kg`8X_`GtkYj|M>2AKC{MTcQzGgJn6SFB7yb6b z)=kXx^{N9^?j`8PZO%jD`iC;nv0$YdeilsG;r8qs+MM)nE)7M4{lpLjnT|xBunCEM zJ~r{fG7~*?(n^T4AFqxI{ngjtLxRDfupqH!cwp`G8?uyFPmKBW73W_v>F#uPVnAxd z+B?{HT@>0MVpBfb;`{TtzThwF#n)+rt1`>aUsfQ%g5?x!G4#>f$>3I&V2gZt^UU?R zVh`yMJP%r2iU@QRN$er>gg9+?*yt}@wb)Oykvwr1b9~5Piuev3wTwdI^l%Thu{{eW zX|#!=f{Iu6kwD3zZSWJ80}SZ5z1BTOdXheZ;6)FhDGCunXd-B=xuvP*&EK@BTaLcn zdoJ^mE@~P9#2vv+hz`U}03v`BYMgS?`#2-b&UAZ2f(c2aqjY z&Z#A=BVkY`qCT=kn2ALZAnP6#d+r~Kc|sfOjDAw%TOA1B{_vW*1T?U z9htF7AdS~ABQPV*50&=qFr9CIE=$Cb^iCTgNjLN8pjuAZdA(n0xmdy9D z7*CVtNjFSZBlROeRU14XXU2S@NDf&Zx}9Ep;_+vo|7sN>zXF#Jc}`3eD5!SgGCXjq zcHS@g)cmr&#)d1SS*N7$)AADbNIUxk;Tjy~A0H z{)cvWYbwpqLq1uVa-n(oVI5we3pB7*ABgT|!0b;G0kGvxEqrV6tBBFqAv4(v%ZT#* zpX5Ennb(x#jmRg}>rY6R8nH~}ZERy>?=fg%T-ad_=y2ueSO;Cc)a{dOQB9EjbNaC| z*|j6?*GI*O3h#K6mB`J#$xycoO6eotx_F5%ILa}rTK!Y)@EG;xN++S|iPFUhvn+3; zqP>X`-*;=31^GFeV!eaZGCi0;dcOYH?UW=a8}2C^U~7J9Xy)&al7A^~4<6>z?!Km2 zTJgAK5a}B9QbH_S2zmbhVUgCk1*ee&c$?$4y2|`F5e{)HSxq3{%+)=Kk@Q6tWpP;I7i= zD^be=AJDBMOA!$1l^wu-#@9!+_j%ytyDW}k1hFZrxc^7SDP+Ks&$S;@Sg88I{iS8E z-V(SyYIX_D7+rxff-ElpgXP<%goAp3|5WA7C>XK%&zto<;El^+TW?a9(_BRv9I;H%e0bUX>31` z#P3I$&HL!NAe$tA8YukHiwimN34-faZ=CK~E*VC@tAAq|lVl{3@VYjAMg|eKiB7BY z8L2hs#Ck@GUlE11Y*9fvEbJ_wWZG!w1Z;u^5$!B0zGjhYlH}7G!*bOci=?!JN5mw0 z6E(TR-%A`yvJ<;Ewb4XC4XFzC00_eYVikmtcd#>HYn7wEQF1q}Pj(XvcX6QY#U`Rw z)Tz5x^dlPIZ!CRU4&KnJ9)kI<{>Y^MGP2!!U_Ij(PLuIiz6SPf4MPdU=DjU>u4yFs zc|v>c^B8|xX)3YJ#GwC|$&}S7udcYlz zEG`tWLFUVz#z7&tibHo-@OdKLcC!^LUeFaz@+F%zk%W2wlTI79)wf=@w;OtGmn;sa zPme9voisyF4J&-dy(8VpSTF70uL9`Y{=UK!_x!=}1eJU@E>-4}MiTJcXG|^db#Ons zOz7gT!9^LC@p#6HD7C;|ecHV{sCRr;JIz6+a&^LTw_g2VkE8k-vK z#gzi!F_a$3TSkPhS%GO>Ia)mg2=I4#*lA_E@e2f!pp$~Biq__w zf7_@aQLun_G$b8&uON%1>Y)*T&c~g==wDRPPzCqGws*b&dv!K;7D!EUZMC(e8%3{| zv~Aea?CaFj__p&7tylzwvp~?=D+gCS5~L8h6cT1bXO8Nlbind8-G~ zsZ!voj}L!okm@&vI{VMos7_wqJ#?Bjx>o!fQSOm~GBtTf9M?$YY~(TaT6T=qrtx7A z?j+VXjDPHp@?3zND;d7jL{2aoeh<(;Z@tD!X8K+w%-!iBUda;=LP$A1i*(pHNOdR( zyQmwS);n4+rg_RK84Rpl5F>s7n8n1Vk{N5~W8;yG)x1-BO5hyK`#{efT$yw*j5Zm2 zM%ho0FB|$wxAdbwIy!2A_7LfyM$0U5(!F17`3h{d?(F4ffcRf#L;z&`2=p~en#!Oh zxFG+Q?YZ-~tps%ZC{uOw4rq2qg}+uNpRQ)nz~ewld%EQ!Lj`M9Oo7$qDCbpe&7>wP zzZpU^I)1Rn2tMTsHcWh6@;7YpRN?!LqyL_Ncg3=TlvQtU>M_Br=*wUY%qoVV51$mb z!CB@;PkBmly;1{Xm6FycruLMoZvgF$cR@??#)|CCJ%DO*{iE@eC%}M45 zT2**S4a9N|v;JI?7_+@toR19#R;qodhm7mj9`y=ByJ<~-(N(! z1%r}DE}$eFdaXj0CblS#cyL1C;n1A|VY(Ji)HO|~T>N9(ezICComRg7t4Ls!b;BR! zOormVSRpJ6Oc<>Kpm8WhsfV6RJDH)O`D;Rrh9|)iLH8-0P-s7bvdbT6U{eVvGDe;J zsUry8yuP?b5)0NMjW3vz`h^))7xq#|FX?N0;!S0sm=@x43Qa?<)Puj+Z z&#aaR8}enHS?RHa0#fd#J9`FQb&yJd{_Ftd41jGSJJ-p#X;vGPF9c#DiyfnUWDS+U z*Cj>C4*5+B)&zWccuNSi=3f1Lr6-&G2$~4?_e{14;+)gL=G9zWNp1%JTV^|I3(Od; zRi|*Z?@xI2xqu3$P4fgV3h4+9DWIw4ZNU<^i{%nA(bND*LTHohYV3xj1$eeUOP0m& z0&BAxShzV3fC6FH1A2ce+kZiXPC#46rmPt-7J)VN!SzrSqd6G7w6cIgFh zp>FX+cEQRb6I#{Vjp2kgV4)uSYski$-Sf2|5j2(hKc5>44HAH9twC(`z$OG%9xI-| zh?0O(D4cvUf8vp!4CP@?QTdKLQuIbMFOk)jEA`XM%DBErIdUkuQXZKSd7RON??@25 z9)~@_%^-r!lr^D{pA?P$g9_&JSZ_T9UBk*2ZX!rdAFCrb?`(G3R_w`D2SJLbOhpR& z+{Mqz=P=BQ0?6a?H ztAw~r+!+8Ib~deK7uHc_eIP*QA;I^CcLD~4#GpbQ27oZosV&}h{9N~=_(c;IgS?8k zt))yRD*rP%rkjn4x-uh}SjlJaLQm5B{$o10po+AHzT8G6e;|5*&1OiW1OzFxv(=0_ znAsbB{G4{%d_qVyoP?}Nng{3P+{-H?Xqso%@A;F2>udl)A zj{ard&x4*(Q9BlzGJu_}Vog_I8ZGe|`%^lm&IJW2QS1mQXzAYEP;|9`#`kAmpa=7g z*2&`-FJR^46Zo%4059@z{OKcx3z_ytz%qH^>JHRDTbPY@?v*C|do=FOt-EvDv6e=@d9gT9IcRhvB}$|G^%OYt^- zyR|8#{kK{T_3MK-bTnuJ+?TGfOaW)a{GHN~%jt?Ng68JO&6^npV1MLvZ;F@$-rS40 zkcp0;8iX(durnb6C$V`Kpi-c*#iD`_i1iN^f#!|*58LQ_vlEg617;<4*R8~lS`H@1 zm|j@y9AZItEAqrAm!ji>pAU>QzCd(X=_w>IEq)La5t=e!>BNrn9PRXmo&KA4`a7Sv zgklqoAyv1l&3e#Ig*35Bg-^lhP{;|I=Zh5A1^wq9&uZ7_SMEVXn99CH(SwF)@RE0H zyx>j+gDgk-+JyUavy=!Ky2yA1U&(@{>N(LzF-Drs!_FP~(|?ta&lL(4Tl2>qt;STE zeCEBs-jeHh4L$ds9+<3q_s5=%DEA8s;yJyAa~)x?>>2tq!UY2xThm^B=l#c#^#xPb z0rk5wPvJ?gF6;sI8!a=^uTQbq`@~!^2l8~aKNl9iDh}4Y(;YFe|4@YXU78oQ*k!*G z|M{;>@IdGl)<7uD5ZRp=oaNK|K(Wh0+kC4D?fE4X88}H~ksbl$7K{%Luqvn;l@qTN zZKiq8P(pJcj>L8%kje$+cYJyNi#el8SlDG`(w{2#h$^Z3`9DIm3fh0-PSt0qaWGid~j&tUq0i>H5U{P4MIRCuDD(=E@c zxg4;u*xCPH(aEE)&vz%yscfC;4_z=EO5JYmOT|Pp%L(dBo$mdSs3tgl*%8y?oHW`l3HsFPTDYD7)zHEY{~%jAua|S zN6h~c=Ta{uz@qU}(lO`Xvpl9cR$^h&hj#GB{$qTRd*%1bzyAcoRwn@+R?A=;R%SMn zKZI8f)ld3>L(id)eqtjwbDyETdo%l(j>OEjbOj?Voxpkiq`DUNL=46FOxi#c1k@@5 z9rN~fw(q&A^L;|uL<(h}%jeR}tPgCD#w?M%6S#J;0==5|O^ugEbhzq?csvZgecEKo z;218^A;HeYj$0eQ@t(zY-Ak@F&dxa3>C}Rms_~&7F(iA|F^RgmYNqtF@@Sh8RwecH zV6jU^o38UIg+BUWQTg`U_4fz>t_nJk8!wIz2v}17 ze~iO>{XKaQKnRp#NoOk$Xwk>D@U^H3y=)!+7Ab}hYW%FbVw{~gP21#tSx8&9r%(Cb zcB0(8J{_20FZH6R@44!h!PE1?`y#iZ)>~HZ{EE*vuDkPghckX`y8APVy44+pJjbr( zd^Bd_Po@0w1khka)$L^7@+v75$t-2o_xWV_hywG~y=&Zk$9eV51FKp7T8VENiy*b8 za32e|B$LJNEgr_aqVet>gTBvNspvX;Ze}Dn}o0XBE}W)W0$YE;3Fu=^2!&Q zaHW%d<@Q7?^1X8ni}UIzRwd?a-OMHNi3RmTIRqL2h{Ej3{yb<$pldS9VNTNOzgMQA z=VFh?H$+3+H zlx4LIJ&}F*LezA+K^M`IBIn`M4YWi_kuYypEz?F=ntQNzp*`g!x{-RLhoedzNQRUP zfY}KCV&9l{&Q&OnpEMBUm;AaBW`Io)Y*Pz=hDp$v+>PwT=V^mC9)qS(Tz(@!`1E6A z2$zjA=2;`G5e%^`={GswHujoo<5C~ad28%|ApIpZuu*^e({Czr^XUfi`{}xadHS%H z?Mcx%W%fa(Hqx(8Yd7KQ*smR4@ig*PPwty@#BK)oibw3~r0ZzoBI{->-r|Qp3UU4} zJK?3s{ekf4cz#_l3K#HQj9lf|!w><5PpejOj30`kN*(E5DV|!JEnD5m$p31cbk{qZ zk@A@Q?U-#gUlR%D`#C$~kn~7Do$HsvkQjb>L!`@8c{HBYl4F;w=#c zeMyqT&KOl`F2nR}xSymJI#v1Ek~hHQI@&3cIS+!tW~fq6cM%tabjxljCzDbm*CKt} zn-ZSjFkDpbk(ztW>q(B|EhZiAUwaRfbqKZ5=gMwfE_Ko9*BdcS5s1LTZ5SsfAf+yG z24Hi;jH!`*fy7)VsBwf2B`28nbxg9$8xAy9cJbHdFydt;Z~1ViK^=h!-0b)}k49gq zF)CP>F;>~r&&8^b8yW2~S^9nA)Nk`wIQEUmXX(w#9l#Oc8Wyuv2a|q;E?vcwAj=Qs z%IZG&&}9mf_IC|Gs5Uz`-$Ac;7Fc|t7$I6?JZP3UrPuBkT~ zKXF)*L*%I8p2LX8&gXUP34#na(gqURFF@7hPh?=jt%EAOh#yjIE)V39ua?4|#gp%` zY1oM9RQT-ey(+bod!c{*TOL7X$hMreplMuw;nBveBp-j8TZu0rDRaEi#ZbJtJCpf1 zMUR0{(tIpj#upjwiddp?I8do{M%<)Ew=apfH^z>0!~ zS1hQPc7+u;#odjUjX3OAN44vZrmR;uR#I#~g)9`B#(w6TL=E&m+(EtKmzm0kA3Ub) ziT<7UF2v%L&M*NmD{7jE{w^h&hgB0L9M3igwu4nANemkfZLg!X&liOy=MaE)0-yGPRW%Mx=6kULeEjCwn!o30kq zVJqQ77*&c#yurG>rP2~kivM$OwdCqWqiyL{X74U#0Jvu_NPHb{))V1SK_sFG_-u4y z-HZ(Qw%PO;uX}=NQRhIlgvo>vb){1{m>Lj^hK^6I`EzE=mbbJG*V}1GCWd*rsS*hCpb9J6wm z(v&}j@B3l=U3|hGZSV{1|J`XZ((r^O@?0`<_1zSXeeg>!oGOKjAgHUKby}|ELu1#E z-?{R+LklZUET~E8=Gz+ds{~_54oV3tZ>afwKPd9J$+YxzL-RTEsN(uF)BcNIgnk{} zoJLr3U>mp3ILY$Ho3k$sXbsHUHAHFuEHN=+afEMwb9o$58~1O#l2K2R@C^%GmeG)8 zlv@j`VHw_d!~4h)xp`u~|94Scag5RL(thXPXK?)^_hmQ3Ywm@7!lMQ@ za?P+GjI5y!iRNs?ugU!}y)V?dL1;Y^+iU)w;~IQGH2qz%C(DYsu4HqfJ7!`l7MBH_ z{L8)iqRj82#w8i*dtK0oCbDY#zZ zMNxQsUK*CU#!YGo74Pp{SFk{9NTiZ(kYU$uoWLGibFIK|5t>4)01Tzp2#o;;O_k}o+^N3gu-s!2L5O=oc~IzMgci>M|~6-Cp# znYyz_E0YPD4#>yWiQaB8M;2tH{%V6SYR-E` zrh3Q!_#CJEbEG}u)aCk(m)N(%!JLticUUv-3y+d1E1Y zO3kkv3!izl-Re1m1_&t_Ml$Nx3RynoHJRX|V~Omd2yW)mUVP~``28{H1cNUA9?*K` z^@+CS*R*uur`V$;WAhMDd!Q*R!aAF#^B0^zMzDMW_x@GPIvl5R$yyK+k7?1N6GMl^ zou6VHjSup#_p7k2?@jWM1>1cO*S=kteo|6;WmFjF)nWGc*zB~QZ{SD7Q^|*kha=D-wCY1U|Z9BSN z)nJP0wf=Q8dmXw(M`Xql<>zmk|02+DcyDajd^VP)hNB1ZFx+xYGGZ&Ui4#*o`v4A? zf6d&f_(@YnN^wVQbV=<6P2+f8e-yI&C~44u4b^Tt29cq@`3btvpv392L)EqGnT|C` z;j4c5A9)ZqUX`z9Re{K-F`n3wHj8HC4nb8t8A^Yt!JtuZdrm_Cs)DtuBGI_oW#zuo zvBf^th3?(IKbe!KH)e6@_yHCjVO|(-eJ@hXN$F~p*W}w5Ao=-E^}*Os6h9ypj2-u? zruqpYv>yC&|Z@# zU4ahx*~(!YZP%aPYT1yeis|fbxsSUoi;nqtN$`anut{0hq7gU;RE4wvYFV*=;X7M& zQCrGQ-4#7BTm+bMMog(z{kTsJV!AtOek8nW>`IyBgBTQ(Yi@InFS{8#NNcG!XT~>m z4s?9)S1t+}XgoUiRU-KC6^@8sxp%S&Al%+&1OuZk4APUr{@RvBp5OUNOY6vtr!uih1A4sTbe9F~a{@0-YP> z7X{;v=8K|&TkDt2Z-jc2L7$%xJzx8Wd*Z%Bj}gE!-0k#6pO;eszP1~CP}%-D`$LP7 zH*#T>AJ62Lc3Y+kxfrx>+VB2KO$BK3Pbuh*HlSg-<>6apNUBna7cw${L!yL8m|2=s zkr;WFI3H?im+8~xkr=bKBZJ0GOaCyhVdIqAQGaVZquKbHq#(W&UW342*w3yl=^SyHR9mEhTkW zam=4dmzh52%T|28Jz}Iw8G&{K@#JG(lOyzJ>iEH~59@AZR?Bu#A$Rv$NjMjMI5Bfd zO(I?cK~ zNO>tN>jO5emh6;W^Bmt=9uVnfhjKSfQ6OPlZnq!CF!^mlo2V)l_W2oW9>bVV__%Q?*G#7S$zk zj3IwUEfwncnXJdqa2{Ig!4fga)N(S()yOf00&XpP2Wtzj8Y=CIg61o^W`8d;d8leS zlCWy-XQGpeis{n)Dh@4LUoZ2+QReMXd~(dAV;FBu$VAvV5h<`!TF|H zN-m#Bzdj_G^Zc0hL;3`+VBagPzVgE%y6GRl8)GC*0WtPy_hDFwJuo;Fh0j~q{JsL+ ze5V;&%O~4CD$Kd1;T9faPa|+Ty#m!=F8_$!JhJ89#uuy{E3YyWDm#q*XVSd_M*~>+ zasis~67uY?EBq$mx=Omsa8)2YaGHGVhYCrJlJk;wGZTruTj$n$U}(QHDm;PyHoSe! z>##|Gp2BeCRSEb%=2|!cD1W!|0rkEJU}$=iM*A>CH_Ght?|-U(wv!b9{j`N3r_aOH zzf%tzHulLX->ro$JJK8+zY-BP&ODma(?62MzGPsffQ$YafR=G4hKBL*b@o8ZN-=`Nd*%S!=&Y}E>#FY8t)Qxe(wCQr zJ_=DYpR)-FfqQw45-NnDdB=QTU&|i!~Li@87w!MU@Ht6|4*4 z%z7(UNz@pZU48z<#(&4PSh4f??J}P6^}fZvj>bk%KAa3U@g!1VkiJEeCma;%KlpXy zWeI3gS?*8+hZDSqbc9BxG)t|16j#e&9^*ub`rOyAc~?g}Kc`ioM1M{SCMREqKI1?c z1DG*?7^7ZF?eE(rIBM#XU(%3A2Fz`INpF>Wiw#2Apk9PUQ1ae9CjJLv4O6hf9n14q z)0f}bQwNeMAN7di(OC~N@4mz9RHkFDD7Zc;E%~GC`*UiTQ$E9Oa?9yQD6z;H#kJT! zq1Od!)_8^Vj&wgrAt_hmmheeQk&+KEDCyCySYDEDqW*c0{Ku!B# za^|iSsAVJx*VfYQYZ;_8Zvz|6g&dCTVj7Exb&$D$OqECmf@wnSy+AxLZy@GYjzrdB z+G=_;jldKR16pf_)Zq)OQhA*-r3ej2%Ebzikg>>5rOq&A zlqn-=@)W=7C4f;@x;%xvfv61E(uRwgohc^hxd)mTDP$LosNV1UUs62r+kPN&0Hvql z_`8%*YsTp&A2w<7mw60gO}=^H)pkbH?3mZMp~#nwV9_1t9MpZQ!JyajTYfmF4Q>KB z6Dxvm2_6>ir$DGE`yQ}^^w&OYl^H|1Gz{|iI9UI?f)ahEly77tL(x`!EG zKK(3DB>0)(TB6S&4;o~w`W8@r^v_tpc?hS+rT3?Q^2c~DVfsi6Pog>a`Y*u1Pl4|( z-T)o(a1hqq8YaMwP>jpmU6^H>QboK!BaOM=ETdesajA%SUM*tF|qz#J(_3-4AW3 z6X!v%6lS3QjVz_%Fc=MEQeXZ1%Syhk$Rrid_-BCVK@$j;u6r!M86hkU^#GQ&)A?Ee zCxlz2es^zTUllB`8ZVZbVoGgS!zqXml_?c9L?a4pGW2_ag|$OvkQp+Mh4Ne~1w4iE zXlH$$IT*k&`+U)^;-pU45YcN!&qqJCO3tm+t!MQ1%CW8~&hp+R-Chw@@5#j>AI5uR z^%5|db5-gvU&ERRnf)XfCK+>u6Ro~S| zZ=tep5-4Xlea}XnDo^xzUzjN(&Qg+4UZ+Bji=b{0*3NLv>JSL4$5I;&r8_Mll=^E? zWQg{s|Hu&SPZ(A3lf9>?*p<0l3y{3>qSm=*uYoFL3HnvG-@`=WbhgIgdXB4NQ>^25 zuJdSy{dY)bjt5%34>kd8D}y@#q)WQfaUB(JnSjmtMLd!5fa6FWKX|d0M;kbbYzATB zj`06{YplR0k8rGr>p_uh%7y=PKg^CBsW={WvkiyC#963nNm8B;x|&rzne!Zd}-;0@9K0L6N!ZHwL2XW zA9u259IS<9QAdVpPAWaAZ;wgajIU?%n_908HF?~XnF~#d^IgY#^^?SvnK7`JqiO^M zO$3CMf3$u<^X-#`o3DdXL0zS^7{NG@lk_#eV}6PD-he?gPxDSYirN1 z9!?0bdc79KjqP^giBM(a=>tp_n|&!sc?RR_M3-&Ax7Eo}h`yr0+6i-}qta}|C(sre z1?~80sgKg~H&gx--02y(7su*#>Bg(t3Zw|{Z~YzI?9x57wcDvtP{;PPS2`5QOEWh7 zcx3n)IPRw);`}x?Z3Xr(Yb0Af3+>@%=d*jbAm>}fc+pos?kBTFe-`Zyyjyh*Js57f z{v*FGJBNiwEj*Y?A{V>eEYz<~iiuk}8EX2T(%jDO*H2IK8$Jhg`T3vobpYT>WAU823a2E z+nlb@4_J4lCERnB^`bwK;qyIVow(?o5|_s(I~a^YIR`&wzArUEEp(w zja@D_y1i`l#8Umn2(;?vf=dlPfOM}VGZ4we0Y>*yh}B%=POr9Z7R!CdL-jYeTSdsi?M7tgr@lESBlwp?GVxal>?dPqlj_t!=mW6KqPN(zk&K%!UUlJR4mQrZvbma_JJbqv@?psKxf&zs)zipusIQix3L;4)*3MLo zuZd1R%2gmJlj#z`xw^&C9;t$f?2?M0Gq073Eai!~6~OZn3h|#>#=9he(f$g?gUS}r3!kBtw$0e*DYNSd%?*qjkOkTsu7#7l z`H*w{n@NwH>2@Q5<;K(qRk_Y#4HwqSIRAW##hNu!cgth?VM&n(WQCP%UJN${@qp|( z=V&HFNEkm+k&#lrXaS8Xo_w_?0-@{e2GuREHQBbJ9C;{*?_utx&b(coJ92ZIXzMA@ zfXRpSwH&vNk7U6 zMJa-+fs{y>-@YmyrbT{|cX^uF|9Q@vs2N^oqfc7#v6=*}k2PC!Zr3lHG1(yNR9>Qv zz~06_AK&WVKA+xn)nl*Cl!zvXCT(ItASg$5nxare!zy7_i>=WkUY1}H;m|-dyLn=e zMfabbhNWTU8x3YTy05jj`E52Z%{Qnh%NGMYm!J2C?W2)ydZDbFSDdRPoU4=+@{ZR3 z^h>6T7p&-~PDrc3tXBVSI~>(Rvw3bVl`XY3tFwAHqpJ5I>&q?b;xKea(uS-xC3IF_ z&0aMHxrem~c}hSXrRhyuzszAdYM^qsAS9{-0D+GeSPHn1In@>ax$L7-)#1~Cew zfzQN*Z?G4}%#_+bM!z0Hbqmn|?|O8-6&w!o92EAZ!ok`mgXKWpu95EX1=P$=GfmB^ z0zf~=KVKF%{N`9T8;ip}wDJY+>1t`PZ~kAP8;HOBg~RwJ)gD`Fr2_q5RXLDF3t=0K2U#rQZkThirqF ziEA>c&xCJcmDXHI2+r77?QT>d9i)AyLN26zS6k-|3O8GnQC$DJzWT?jK6ddFOS`<|?Tjfw$gQ@R|kxT1{rszZ8$C z(^C|tcgrw>qUS>}%2Z-QlNV|gQyn|K>F8mG?Q5eB{Q)yY1@PxEE9x*R`f}cb@v2Ec z;Moc^hZ2YMZO%pZ5@vn?9dSOhMQf#U_QzxU9kauVw(pgf^<-8m=^e!i-fefv!Qlb5 zY0P}wZ*c(_mgdR)ubwsI^WQ>KX=U>$pHKUalhW;Q^25U0I~Pc~EL)yEpv1Xnwxd3J zeWmE`y!_Dv?lYfpDEYPX=bqM(9pUi~=$4wj0na8JINvFg1{0K%+G>4UvE?7~6msqe z6!DQZNMSJoa&KUD-v|4zW8wZ_<~L9F{S~kt@^1>IM*by_A?L{>OBW0{^t$>^DTQ)i zU5ctU?#!iN6@kflYR98!1ntMueSkV`JF#3mF>@nGtR4YV>&)N13nv1@>8D}G?>%7M zeF00gDbQpAww?OcQLk)k36xX&C_;UN_TYPNEF~8y^++XQq>aA6Ol=p$D&2hn(v5to z$0UG~b4^QOMkYze3MIk}mUrjNO~z{Ppd*BtQ8&z_Sdp}IZ^`SvJ4sYO8A#_d>$`Dr z8E`kmq(J#Hl&(j@scr;>5&=nM2DB9E!|8JGgXBY%Dqj&7VxxX#NWG|kZI&7f_q(6K z`?uCGW3kM`+8#8hw)HW{89)~w&s?PW4ZU?y`zkfQ&q-O|8l3R{`@8^xk``sjoYsGy zW^rI2Y!)Da>La-sh>#0#w8m1ww@w6_AwRw9IA2aVEy9%PkfW0Az-(>~>Sz6p>KtV5 zxFae7;r9rHfYO{|^zf7UaH(y%q*kQtsgNk2mpQ6J#}@)cpnGxi43Ljr9fyM%^tDkn ztJd^0+j*jn?m>vw^B?g3{F0Pdg1OoJMF~M)%Sy*VJPyLb!le~Tt zcKRvsM)h|Mk%9dwyB#fc?eMi5Dl==7q}qWE{kj5A66p@y{p|kC`JpFH)~;vspbVxM z-^LJCG(D%8xw@LXwSHh)xa+KswVDHL3APX8jJ|x2pT)2jx#H2ZHag3JMf`@L$ath` za}RgK5YXd74e)aOCcZRm2n(ZneFd_CnX6vUOZ$N?rbo-2e!-!B28PSBLe=d|6qW#Ofi;@ zKqrrEh;lmgj74_r-V6J{_;-KOOhLiUSzvjCk#rbOu^SsLFAk*!{M~rsPryrt(@mi^ zrsLpS2nXp)`v;`dU=}|28a(f$G4(*Os6V4XmpTHMALJgX0(rpU@)PyY-~*{vp6(LmmB9W?j3X%g;g%ig2@<~YvY(G6iWCA%)h z^64Wg9o^m99!4m+H>v?NK*3r!U54wjuZrqF+x_Sf4{GfkzGoYJGhh6RJ+s5efoNUnJtsXq<*!OcRg z_13`bMj6HayBl_CI7V{fMn>K${-m+%O0pMI(?xBjL>f$6O2Q7A%=JqvRXt@EzzY5o zbo8EDhct!D40j|M>d&S#sbB5mZeW2by*W);i~S$0l~IG&tdUHX86`+D7lpTcL9WZ= zbJ~uE(xX^@hqB;}lMfLK*mPt?#$|k|?}apmM9{Rz{OKfn7fy3z;&1wV%drY>f30j+gTfnWgpe{pgh_tZE$Y1yPquK8NUi-|zeW`(4+$ z&JKG&!+O@b*L|;b;?fiT@vO%i1g1v@4V<3(w!LFbvS@1U&0^f&GkUW;@^TpNV9xb{giqZ{$nyy{qS4yIJ|-9 zTh5c1#hJaEmywS9SrR;U(n{&BgtffHw6Ge!E5cdWxNc4)7+h=k@nqR;fzeg??KB0g!uLc~Ny-(0s?-i;%EWsEV-$XimG5Gj z4KxTm2tVsv>HQF1Hl}sGnpX1@L2Eqpd7`~>vVsMttD+|K&{T%g#m?Y9g6)!0MD^=m zv(@Aca$Mvyd@Jo4`E{Q0%4UJZ>2EU*P*;9t{a8kM)XeeynGK1UcnP8PfZonSh7BCH z=7eOvhdPHg9>pFpC{0UxN>v$;9`6p_tUFhb=Ob^V1*~ADa!j{0l7((f3hC`tnDWRa zvoDaJ-K%^>HgXeJ0I^_T`N3PdQ=0Rb&O`Uf?C3`8K(*yU$$P{yR%fn@8g1QBpAe2& z6W#1ROZ!#5A$N%0YWpL)SFvQ zOE3R?7rx{vQ`bK5Acf7(<+$%46c(1rot)!MFMq@_(C8;Hcv z)y?Q`BBGPl{9QfNOZ|tl&mLU39dWFh*$nf#FCNlnljbx*a z@+1$)k}&e&o=yoJA?0YHM@I^$aD!7mVc>Vp*hzo&p&spFp)4a1yK{GCZ>=_)2ibk+ z@auXMYRH%<9l0z=$Ypd%n6luj$D@MhKU2Bcz?UfI7DXEFmM9j@R$Jy7?zj)z6u5&2R|=6`aH_1-z6rZbkvBsqBKIKZ0e3f@O}aNvNba zr{w>*b1)#@wntNze6yZ(e5$>KRBWpGdv^8`KgUJ3fhSSpXT?X3YCS(%D1TGx`D?3f zH0p=FD*7|DHFp(CfeRy*NwZybK$mRw3*}P`OY#t3AlLo^ZhP>M7rKow6 z(|qsAM;D)(IeH0o)aKO?&f4pqIQ5)WB~R-4nZs3|kaZIhI?GQIgV{CfL`>_Xw%-iN zaV6;o3i4AEngj}DF%Fob6AUj5Fn%3e=@WixJ=(l)mH`&Ys1x3`wI z=g|)p&-?twX<3lpel+F_{v=>-u-Q}YL)!1|zFcS~xs=jZCEa}2_wM?1hxqG6 z&f*e$Ns$8Ok`D@L%S01)-W@$(T1fOg_^A_jh^ zoqfuW#EVs)S|;TvcPh523c68H9!tJuuk(5+jB;-C!gzZn>!yU_a%u|oN4K2f&$W?T zD)!{8X7A5%iP-`Gdx}g@Su^MVa!EXtDQ~W!Q zWU_>dg>jO(-iiSEH2!#u+%v+4S-{n2wqi$@z^6sHM8F_u2NJ*Cp>mSz1NhU6#s+OzARq z=%Xkli|T8F-snD5ks_(Lgm_`=p-zFy=Uf&=ougeZ7^`juvIKTL`VZsr^``lU#fel5 zJlJw`y>(>KX0*j1`eFJ5^2r}BTTxVrw-wdvb`Pvd5Xd!!+;$T4}l6Wxo7iwezXpqKg5lqg~;rZ>-xJ zOXB$1i|O0ueOTf+pAhfT_|PKG&1B+bY{?gR=Ypgc{Gh*xReC7OW0jJr zqp09rPIjgTLqCcJm(EUz5)ta_NaB>D&=MJWeec~~ENbA*h-z($Pug?i$d7LtV(A;g!fgII#=Z)9p@K@%T&gZi)eZZ8<(xbivoEYqdZaxG#-4X znss+I6nHsb&GzDAZLaV95#a{bNb4h~4fPdaMqAXT4gIN#m6zvOK2)sms^=-)JjuKv zro|F>T&$T+(^Z)rKiVZNr6i54p^_V~_(ec*=Y{b1XWUDJ&#;iZBsZZQU?nZm4_{+# z;qGC+LPZyf4_-Z+`r(?wz=o^P1}%A8>5(?-8h z-j7YQ51P5Ar(@OTdYDJ@rJD_X!yAXZDx!RcjUA=4i&K@S1H__CK4#99QPTH0pJp1l z^E$0N@E)(8Sz)?xX&r0O`U!e8!@PFQt6<8KvdUx*R{Jsr#woJw8BWb7AJ0g&ETtZ# zE{tusB6{M9e6$`5iE;nsy5v?*>Z#t%P}V+rbct|^r33Oy+mvs){fiq9t&bFyJhT*d zv}j&J+f96#3N8@65!P2F&^xiZHEDC~TTN+pq@5OT8iIQ;bS9NJ!^;ieYE8j*(e0Je zzEE!Duw3Ff^hTtFxy4dvOAGoyK$89uwWPcz?}ztg<{Rj8oZoh_6LxyB3uQd2W%k3o zSgr?4OR(QKt%A_joabO|JVWS<@c*QFv_avSe-h&xW`AUzi+-Y#B;`3xN<6chAHq_Y z{2e9N*_!cuO45=vf9a`EX5VKxvlZhlbIgp6gC#JDiRV#Lm_IvRDE3v0$KkL1>hc@* z4{Kvatdk_Egw0g(5_umC9Ik96dd(@6 zL7SXjND?EC=ocC2lz95vmD8Fgi_I@(H~ZXOpK%`rp!2)IVRJJgc7rE= zOmV}{hjmM4*rCpP^VGxWn(>?HwHstBQwi-`w$%?tm8!3pZrMsC`LxQ7JBVBBd_<&x ze_hlKj;8mI{z_j|D$q*IutCC^YR@5+yX+#RFnRYq&9);t{mRtx`jBmv$4np*fSe0N z;fH$IzE0aU=8KMrT5Y=P>3DpVU$=Wy;HQt?6ctS=$Q}H2*F}kc&~xja)I3csInSd5 zp9eQ9SPrUrG<8mHKKW(MM)D;vCS+flJ^nXaV{~T2XE8di`Yhdm?fSqUWgBOS0YP1>|Fg_ zoFYbi`cmTRckz%>i$vj!vXd`=H}@xeQbdk>$Y+#>nXmcwGxCuHZBG;#SSwFl2{h9? z_HxNy``O|3LbCNCH)27C4Q7^+ZRbr+iVv3D;RmK(ES)A7C$&*hC6{=yq@9j95L(-a zV_SNfV(4QX+Lwr8Xz~8^)_vauHhzB=N-MIhC(DhWw2xC%+*)@MfBE%KjpHApeL>fwE2B9g6yMD(TgpL5@^`-GRz|uLBL1yN4gYXSIWk$a5 zt6M}M9N4GK6bs6HPwS}--^Q&&i}mr(6jdI!an}58UfknSZ+Lo6He_`D))iCx_9%QQ z3%cXOteaMOl{rZPfM@aFv_ZZ;JZV?k(eh*@a@}V3ppCV~aaD$yE7P$W_yO*oq$rx% zm|Oa9?0bbYS6Zma-}W}2B^3F_{OEF>^wA8Z9%Q(<63aL1A-N ze7b6~?3rRKZwb({DXYnn!0fb?W@@q(DdMz6rRr)Q+PO1rkeP=kRjIf=7#Q=0Osy@{ zd_MAPFLe1s8GKk^(-1edB(t`(7SJ9IIrSCJcy6_n)h+)FB{n@|H=@?#u~7}9-Q4xA zZxR~8cqrbUTW(|Cfy9dM&zLiXYcp=}h?+5V3i8R&%0HAy#y8NRJ0X-~TFo3tOp3xy zbThwS-1gnu`b>h>f+)XP!bg7ygbfV41OkbNT}D|1+m_#*slWVd^LDf0_@hcj8D zDL;@+`#Ua?IUGnF>=r8xk-B{`RnjZvEHe1+-1|X(H{^C@zjjYqet3V9 zh^C{cY5bi&)mf0;w#bdsR-659QCjk_BXm0*ZhYzEBp)}Sh6L_|tI8~i*PRZ3E|#fl z`?MK4GFeDU&rkE|qvW>BX7A&E6~wb1kpOZoEA8+x-``;;-pFtd$tYw za9t?z=5d9bn&%^m26Vlq1*VHXF?+VGph`;1dT%A`4d3!x5s_3Z4Kp*F;DLl&&A&cH|wsYNDEuvy8_!pZO zT}+$rbrs=kikJ~As!g^Ee3=zZA7~al@w0cbOM%B_W#BwHo(EIZiy0lnedYMZi|ynA z7r6MTMu&^ZPJ1Rg-(WrZ>$T$<&C+#b-6^{0hw_5tf#k@j1$h%z99}&SX2g-ZUBz22 z8WUUhoIkAVb&D51q&+glVSlmHCg7*;bY+f58j7CV9ye4-DqrWBQ4r~hMf&?px10z` zdV)cc@9P5!Cm5zUtbDINY}VBuI?}!1;(ehfL+wjIe@xCG!$Vt#&x)K5jNDGCQOLSA ztF)V&7e2g*azteh-nNQh_GY!oL-@NWd<(p*O3D59*ZG6?!XfMx1Dh1gek>Q{R2GI8 zZHZ>&8>$vHZu=*dqGj$Vy%N65=QnJxT5x$eyd0OqpQT8^<>h?Q<&EL`4d?2g#61Vy zMH%^9?BAb|(ps&F=iE4MMk#AaL@_@ZDoe@j=?zd!^`JH48S%IL?s8LdJjHMK5&p^t z8@(UWa+s5Ry%-(Nu8A9kTQvSm&L26?Q-zM-_%NZjI{zcN*80+yO0@zeuE$geRz`sw zd&bC2L+xj;8D@gM=@UIxO}lgbFqP3Hm*|uEP>BSUgZVs9h+;Jm--2Dkr9&l5<5m44 z_@5BHsqsl8U+zG7Ugi1-^rL6b@kXDBE^}JV8mpYg-@U2|&m39M_@y@d__gks>I<`P z%P8E9bBVb$nv%E~9EvsKAMxT_lDx0*Mxt+qH)u;|lX;4)zD1@>Gp&{&{eHB7PiyPU zkr0Ycro@TH$UK;zSV}dMWt&tizK`(j2eK-DN#7a&Pdgozx# z!{2&Ed}3IaxMM90{tlN%^<};>E6UK+$kC697wsE61iw{D(H{Q!#qDz40hHuHPa_@_ zj%U%&+#~uN2jp<_hFwO#Gx*jIzX;Z(qkKn5JC>4!6ZeyX&#b!rV=$nHM_)^j)YPNEb@jNp*=KJGSl+K5uJoI(4Pkd zwBtR#O!(qT1~y0oeN|oFj{8T=b9C>1cw|GMXfT#N{Ddy_xRg#0aav)o%N@$P-gm*H zJ;xlaQy$eUZv`LkNmO>8VPxU?DyEbywNe}|Md)`z3dNkC_8X}S#0+>sL*P9;4w#>oa;$Zit|2m{Fcr@Hq0mI?Y-gXZ5K!aLXz5r=$y|MC)8wYi7vF zHgn5i90;RZztqIY8rk26z_VvZ)_6Qhu~`>Ec_JogkD}D1Oq*(#9IVG3H{!R%SFEeI zHm8r#5rluw)^rm++xD8VQf6j|Qp+)4s9;9-p1z9gNwaGz=KZ%N^p>7XohRmBjB_Vd z|J0WCW_%V$@v7g5_Ikcvo)}?4Y>&&@d|NNx%oq`qo^Tckdn6AiiB$GaKXCVpsy%e@ z#>Qmayt@+ODwEVV%@8@Vt*JmVbJHR(6bY`W`O}EPAZ}Xm^`wwb!J2v3LhxK7G*4$M zt>XpI_7FVb(D^P)l5N!Z(ieM{#4yNU1)88N+3J? zLRwESj{qoJj@N|sha!U8e%%R`aJ)y#G**1bc3tGi0|Y74?6P4aqAcxC8%`0mLp^J>-z~l z)9Z8YRPaiSin+OVh;Uo;r*7P2g}9CRbLba4?rR-`wo*zP@GIe&W4kMh&8n3ya>jhA8t=IcM&tB~{{ zze*e2>6Oq>_KkX;N*YJ=7|n!t&+#`}evGQi?4;Q{mq8|N?}J2HN^j&}d{QCIveG*# zfhOp4Cibpvjv*o3H06+4v0EpXH%Qb4e z_00Qb#=o4aACg{_C2!Q038yQ?n_{byH5)^6`+H|IJ-NzRcsP!xe(0y!a~q_>;Ts9$ znu2a~BNiM(q>c%C>bB6IpqNYOEiA6sk#i#UPO8 z)2H$%+WM#i0c`5rn%UXV=5%Q7%3g2P%t`|uSNBJS*m%i21sug^_p{Isr8 zAkt;u@(V6Hnmfkm+7<3%Owu%^6mM!ep3S#tkMzcWd7QynYC2+aT&Ch3y(V*?s_OM1 zg>8~@B~yf!)_3GV%M&P0(^E}o*~jRcuT~nwSC}&p)B{L=?)49$3hc_NoUv*twewz^ zbMr=3bfIceH0)H=x_TRoIXAzPMy3qkGeR{mqgaa@ZW90bSO~=Sk>!p!0B$;WLat9Sq zbe4BKg@rhd=E2ZU)NxYO@kAm@xfgY7Ew{!JxUN6_ndaNxOVKdM-us-m~Wli#Wr_~jnc4_M;rhamCYkj?Xqk6I`K)pngZR72ks~u@L;T3Zef^7dGaO$ ziv4Oz=p&Y5*HAVHq07A>BTdo6k}k2A|5aqnV^RBu#O-D$!Sx=Q3EadZx;OHzD7 z%UmveOp(>VdovuxP0HujaPIV{^mEGtuhl=*&(|LRh?2cR|Js7_!O)1x~Ld=>~ZB`h57lEBGUeJeqB~}D^`;8 z$tguL^A49%i>`7h*dY}H0=3>C&A4*7NQyTnS7vxG#)Nl0J7MZLjQ}M}XzsU+6x38m zLIolOCr|K;q6mIYlAfd@!kEs;VH34pZ7TYRQCWTtp3a#qjkF=WTnQqD-%;EOhP+;# z8zLO7eSt^Jr(~K9^;Sf z!Wy5*Ox#iPlxEKpWogq*Q}Dh3Sfg(zDwv-~2>T++JMhln7TD*|$Iob9v9LHkArUs= zY$0n;in5=LxsHF`Y%OTIK*%LSYA%5P>RX|l%Wr#vnE0ow9ssLr$%NL@%0GTE@;vge zw9d^CL*qb0nmkzRs$CUA<&c-l>&ubCHGFF0maG9!uvwcs*KU4S3WOz{CP{ zxh;~#K=tN?f!{`})u@iDy0kt@O83;ZUofJci{T7+>V;k*+ttEfp>4g*33&nCkw0)x zkyztC5W~B)+8*|NMW?nbLP{i8%fjkZ2)BG{n(&qmZKy`qi_`i6F`TC6+U6xHSGYSE z&xmbC#;^pc_aF_aE{IT7tJi&sbYaMG45p@}UN?>k|0R?|q?t){W2A$MbJ)>I_sO|V z%c78}ihADuONjpb{P~bm-Oa!7vI91aj=ZF3?y#Q?e2=I=))^*(GP2{AA);c&P;*?N zs)xEKrJ}yziGtGrL6fDATr`f3kw%Tmt)E}&NV{=0e!X64q!oM4M-uQT`k`#S#PTdPhIYv|eD@aeG6SUZH5py!;mNK@rFyU-o`*`vvNTqZI? zI-yPe5l;crg$GkFc@P(G8BVrJhSjGu;qYH+V`vs!{z=t*>cW=WSrlLXi+A-&OihGd zKPXNll1aL{vKR_mGq$5uDJd95X0*G>M9&IrKbh)r|EeX6a-~aNG`DPd%Q^l++)5?h zk56^c%4Cttoei-#!#0QaFi@?Yf`M)*ZYA+vt`&m3>jL=^3!gy#W~0e5&28F8r^CLe z(wd0n*f=f_5T}G8>s+}$o*g7CN(gsva(hG7mVELna#Mr^1?Pk3*2VL0Z~6v}_sfQ9 zt_0&Vkq8el2L@C%@f0sKa&;-aty{Hf%$omx@ih5l$l%k%u}SlFK_B$*wyV;P(}=Ix zm3Y)w-(O-&-f-}h!%0o&(^l#rL**Dd&Z7Lu=I!sZ`oHyFQpB9t z4)UWu^odvNP`&OU9G_fWulD&-R?fH+G7aejcN=R)m2z?yu+nC zDV`BJ8yIiTjGeS->gbmaixbKb6!J|;|MWEd$)V!Thxe))iF`hrHGI5;P-?WmuW{64 zeF|7EF9NSJxZWY?sDzoJsP!lOJlT^}{8rrezP$)6mJl)h&S840^F<&s`2v!t7w9;W zIGQ3r15T2nP9_pP|COC`^QhC@cwW&RvMaq~MM)&SDSo zKI&7LX}%(IBkY|WWSN{PwP_Lw_OkCmJH`tOG#pej40P7+D?OrucST9j^WrbFoDB`p z*#0?-7bx!BlL+c=l6Qm#zfxGzY#4O$2Zgk6Ne_hAQ^tV5HixGA_(l8@HxHnC;X+wR z&<^1!_Gvs#YoShFRZ>cSCb^||>dm3_4?a%)8Rbh){F!*ql`hhz-*uXo2@llA>^o(A)MsPb+~e#)JC1WT1K_HHi{uR zvn&+W3Vb(JYl3{V*XKVJXb8RXJ)L-0`#Rp?{L?MfnZzMf7k+%vtaLi!&xS9V|HR#Kv^!c`$GaQ% zir23kD#=R-NgoT{4Xrar%u{o`=A^<7eI~mmxf)kVu9mba{~Yn6gKb;y1>%ReoWBB_ zZPWd1yW`2^Ev-4!L!DKv;!I4jIYlZa)zq=-9d}DUStPf0Nll$aRyx|FOhO)-fHHHt z*dgr>ttz7rCtQx5mUE9gEk}w{wd6GQzqe4=(zF#sn=+M%Qq{HYW_d2%sb=2l@FFZt zBgV4nP0#!=9;aE>NB8DX*^O>Yen-Q=jZTNugSq`e*xF9x{jHtc7(yk1keCZ<;5wZw9XFq0`wf zZaf{Fx-`8klk~N=3iRDVw{DVc*=?EU%-d9YTB9U-0Jtk;>SXtO6hE+M!t+y zm!0{ccV(rdaLF@6Ks@fz@;NCsFUO5#qzLo%NO^KHl%xpaj;C;OeMn}$+<1yFHGUGx zWT~T;RV2{nhe_M{z*Fj$fv2d0KnHN}IG`4>fZ1AO)QZ2F%M z0ROlz8P9=G_nw}7*C&gwqe~Xfixqw&du!zzV6%3DzVoQvdWe_a7mJJ^aq`Cwa=duJ zyIP7lb;M(1RX^)EJ_^3A?-G0$459$l&@o%V zL?N!kcL(WxdF&uTbOe9%1KzySlC9{|(Vz})Vu$4O>#ir72)yt#C;3_qe@AxCX95%s zinrp{arKIqoh^VLW(L=*m&w#}US}CXmC4Z(L=+RHMH0qnN77oTwP?G3j$W^rkN%AZ ziiFORX*VmGI=r+2*l7c>qZU^(zk@Wtsd`iS=D5yD^C2!oPL4<1VHA&UY7~bZ(j3+O z!&x$!#N27rxkipFxy9L28=tV7*nIYl0?z~&QK~1U98F31>9foi%nlZI%GDhB=q&kL z+j(xXfKnw{zw4Jd)#Ih*f)g(uMj@rC*gq=c5f*{}b<@Ohe922aA!nA)cQy@i`~uaX z!q80eCuB4g)4jcGCNYOT>TuNsNRwZ#Nb^&>V3}GR_`~XXcU~u1H^EO{jp8nUiKNx@ z_h{XF*fcz6<`L|1G`})EsVE!LLiH56pGa_7P*Uc}DpM65GtDTIlgvr&kyIv#*H=&u zk&~3A#go-Os>VhoIa_h|;Hnd?CtYZQ6wO$-MrT5}_sgs1+ID0h@QYa3eGxA{_2B5$ zniqXZ7U>%rYQPt{`1(Whb>D8X=W!ie!=w+g|5UfO{Op>@ljFWpf* zxZzgTuN0-NbEY~aERTKZEf+NBiTJ`^P;4aZKk=BRl{e%e4FewuugD?oqagH;op8D1 zI4dtz)I4dTaElam%aZ%C4tKPL`hB@+h8UyK;(I4VeG!E{$Bj!mwezWzLP|8k$yr}j z_-Ee9D&^~#akL*(UEw_B{f#HzaA>uA z)z;Oq>x}o>e5s~%e7hLMmXfs{U7c5%rfB!C(*p?W?rg?4_=r2HZ#PaHimr{gKy^*r zRr-2}+?{+S)b-fU+e+lugg#6iBB#-kyq>|~ACQg6jS%oB>S4(xj46xsx1#LUZg%Tk zGK~zTb7n<&3M!i0jGbrBsL?`uOVeJZLzCxp%y+53KTX$3UnXHl%wUo_{@ZhVU0eYF zzOK@JG4eH;vGbUoD8*WDBCU7VOZqjjJZ}6#=b`V~q9+W#1^@diPfD7Y4oysGbWW4H zJdB#5TYZKZpOoX@UtIk4)yxeE64c(tN6x?1ztKnc?}LKL_KX{uX+Yd1ko|uemYEq0 zd-M4JW!O=tbNIgx**x-;_W##c4I#-n8{|3m;0FEKk#qQ2HyQsgqfV$kuKAVq@VH>g zzrV6Oj^g@KdLPsx&QuZ+G5Z*8ac^@lU4BhQ!r!xy5_j(v$IZa|;qeYOjjrR~>Axr} zh;s?#;t!849vVB9sPsULVoho+utPZUC4J^O)6Gn=d?{-0UlhLJ0kIEk2hqf16gV9C z3x4-8gW|ZcBuwMB7%009AH`j(XM-jJ-(zaHFLo^0KqCGy@?3(_p_Jp7-G)^c{iMOu zFTZkpfQ7_SHi2D|@{ zx1C`k32uLYk9wa0>+wl|%`~+l3I42RpZ#_Y4mvJq+tqW`JTdTkAzLGN-g*+4fl;|y z=JZD?;>RJ+YC{9NANQGL8*75K_R#y1;(_Wn=TiKQ{J( zYz*7>Z!_-LSPHT+?7M=BnCm_p^I>efCX<(fX95RA3Lda$C-7o#eBjW)b!uAK-t6v$ z`1yWE1upw^tv)JRse&O7a$dz9uh}uQbx=W)R-BhXXu zTSK*0Gc+FN%{y92MEtVrFmOOHKZr-@HV6?hchr-TI30)=%;P3IEvB;0_RCWsz*G9q z<6uqU1Ym*Kx1IgphxYV8jy1ahZ4?ZReFMP?%^!?;`)hN1ZUfs(IbO|1jwS}Mau#wi zL*fIKaX4;>1-}jxDZRs$$DG*~ilxcj+J~w;*%C^~&_C%#g0LO5;ozb?oL*t&5Ks6ofF1NJ-q0@nNyTJz&`2Ns<17eYGNq~edb zF)zrC#ddU)eip&7j%4bq(1GYf*t-{iu}%pRH}E1TUCR8+6qwV165Vm&XH7i4zUP_# z@*oUYW7rEY^DcA2xE~ZMf08%At5&wSuSx%6za5(rwisYX`S-(uvL!WyMt7jpRlNuk zv%S#)yNeGTtQ7m=bjN%LSe{4;-F?xuiO zqpxifkOlw+OH_7TaW5cXnNR=D&vuLp^DYoFFtPdH8A;EZA>AcX5!}6G)nxD4fuYt; zOuq3jaKle_({p*`sl)#lVFh8cOC^B`Cc~N_?plMf_tf8vNj2Va^SV{D_Y!>{E$;tp z^2PGxbNIJEh&}JSsIZ*J4*1T0FluPl?-9}rWwNU99s6vuV>ICZ`EHg#Y?NQvvwBq9BkOeqB7{5pd4KwEP+=E71r}K}lA;%~Z+aJ6+ z>U?V9>X?PEIq`~Uq^H1W%_0{oifDr&KgfA>_T;_&|A+iV_e}^V6i-T@w zeQzzL$-%TXsA|F16+2_h1-Y&|2AO+aH)^e`Ol#n&wX=d5%Wm` zBp0(+?6?fr@gS_$twNIN@NGoyTL%DV*OdpDfcM5*1_?C0E1y>Z#S6*6U2qf{gWxb8uzh4?&W_cpe+ z$6ver=!q~!C99hKK6g}tBWI=xar;T4$;>FPh{>SEeM>?DkDe zg&`pKE5T-s5%kX%+lWhJpU>HyU8*NY{b5Tw;WwwL`y-#-nDi@Kv*Cvb;x+GR+-301 z$gI|y455-sPnY~`l=ujiWKvos9awR})L_EuILu@{6!Hu$E#7F`OW+14?)JuZkJMl= z#D6U8nYS5O

@JeMV^syc}p{QxqflV+!JVY%C*U$6bc&4V^+7lNQnzau*U8G8R%7 za_(MR)9PI1erL#$D?~tM)AU&@xN(jpNS5P75Bw-TH23RJPf-;H#XH5sX<%_9vJ3%e zzA@P|cKS-;jY(YarQo_T>Bf`@*zjqFmU-W`i&_cFw$zw?7h>%691q_6Z&$V_B9T$R zLUY{E93qS_r}5x9)_11@mLz@~Cpf#`d{`?&7p>gTnjQ+q@X(e^dEiTESK;fB#>pn^kc zJZo&5SEqGIGo2-TL(BatrB4yWV}1%v3FVHqTU z10lE7F@^g^__qSStAj?f7PKPpB^RsrR>-9jGySiSR-{{mpJl~+V5o2oQN+K|OxHmX z!AF2yV?tCdSTi@eMSo?gqDHjIty&ChZqFUBD9X7`ETEr)&C8mf*tnP7aw&oBc8AKJ z5wHfC>32enRCZ(;PI-rY;ydmwYmwrmAT*U{hs5ba+W0A>*Pu!&XvTzigPj`x=SCdM9f)s$c6SKs~-$nAA;4prHxPun-3dW4P*!3He_5fR6rGvos3pmjc805y9 z>0ms?T)3}AV*(MFt@BxDd8cn@dS_H;Y3Ge`6Vy*(2ys3ao}Gn9hw}jlP$&msFUGIh z>}>2YU<{iT#=s8|e=aPW)Yq^JOlieL)XqiHk| z9w6|4nH&Htl*0sf&ZOx6rle34k@)d*DEr=U^<9Iq@L~2ARTJ^_ivJ*+t+@&qSzGpZnIk?#ip?Vw>wp?y^42bWfKm(O zL9Dn{!YvSe*qGvmXoySAbla&QV;7LN&V!IMslgBxtWo#zm0lxZA}FiC283e@N?ki? zk_A1ryXJMx(FDbDOuWqLLeYoyC#_sQ0z;+ej_{H%raGjOr7ETJrdp?#ceaPNEd%!% z&NR6>474{X*v_c??*q;^Ib(G&|!=y=UY6eHi zg60E!3(MY=*|AZ)_clOLrhVXa{I4ib2w|(a_)|yjlV8KGrS99Z0w&Aei}Nl7nGN#F zlK_4D_YpFfm&vU00NX><#S+<+0C?eE!ztjWQ)G>pJwDCPA$A4XY;1XI0yb@*u zA0i4nu=yO~f0$SVtc}lG_XF(wdm3$QJ)3@Bk^P@{~kDdAetU-Lm$1l(Z1VPNBf zWT@Zf1&`RQHB(*);leU-8nB>CfNAce6+pCicMt-RF6hkvoRbiGdk|xr*AOi->1=l9 zeXcO%u}$9s+yX;Pr{zFK_$!YsgLO5ix7T72yOM(ycUzbmrli~S0Wc@ueu4*r7n`a8 z8bPxFWW_9)4kBm(Oa_TCVVOIH>%^^Oy}5J-AolLAup&YWRtQs&JL40MsB@Uqz~Q)q zzrbM&p)I1XPjX)#j2((YV0hY&v+inx}0v6sF7rO@Y3Ck8}r!F$e&%5CjD9rQrI{OM>Np%)0~j zf4~C5PHp^;kjjFwbZsYF5x!EPSz`9!x9W^^2Z0_zXxD`Zu}XTGP-EMLRAVGaipzcJ}+2)h9e0{#i^ zXu_meZdV2Tb-S=Kvftxw(^sfe6DGbAQf)*ak=;0Lid41VjJtj#Og32V8d@ zaUig=7!s54R68;W%mUT~6!8UE&k=uRJ9xPXn_3-`V2$$C55kv*&jUn%++^W))E`y^ zDI9>|_2-LIZ(-ThKP1I@$W?i~t9tJ(LaKD1Bkch^=$;*;?v-&^CB?CWx_}qK*olpD z82+~k!~;)1i;*TlPAPDc2Wa+N(Ph5cB$EpQiC95`G-1?A}morieOyyH70v& z%g!Dm2}9OTq{yNW=iip&wtC5-R!fNz*q(G4aE= zvK9zkEBlN|F-ne)U_k~@r5n+ZD&rG;12&FdNC^Rj}QV%uH)L{CQzc80-Ua) zDjHJ$V7^?8i|`dAo2mL zJ+Sgoph>}PU~)}h3%$nItsou4Pwy3SJIdA0#&*XR&2Ak!I>vwQo-C5V4 z3fLF$cq$`Cjk#cx?|rx7&}Ns!YmG)2y#ajiA9bAPyHppl`QB*VrMjzP$DO#>aj)Ah z-UgMNg~;u$YddufYFO z$CfF9yz7O>w#8cXkJ^Fs)s8#^8~qP?CPV<#8`cgxSif5YZL>Q&r-=cUsSt@o1V}2U z;OqnuE+D+%3=a{?Dq(TW)4yrU0&M5rKBy6H_y4VW{{aADP2grwBHJ9Ik zv+uvzcvWndc>yl^FEw4P1fGB@ziv!HBjY*iGS@MF2nrgI+kmw~%oQ$*Kuih_<6ROC zYvfAGE!8z1Y(>N#)F&~uQ=q5z)?0T|=DzU7mb@Q@Upl@F33*?axlDkqfbv5Lh7RNh zoA^giEhiKf7?8U{1$Y$3hv#ljke&8EgEb5=A2beJdD$}wkQRF(&_C-vP((v`;8w9=r%R|LnP?LZ?1cs-*SO76uEC_rC_li^&@7NG54U3V`g(LR@ z0j$iyFqmo=5cn`FgEs@1JEklG9~)!}EzHBanrxT0K`sdP$MScXz-@OI&9SB!s=cSY z0!0mMZ~{;+!wfA9`IGFmfj~g&J5pgO3zlJWa}6pxKp_h1_&^0>&mMcYn5MwtWyD^9 z1KM*(I<}~{)nb6%bwvFi%D@g*IEQu+ULp26W{RZSiOeqbk#bH7Ub#4TUn=n!t!n)nWgQ;x8?H)R~5ISL_G^aa8 z#RK*$1D9f;(m^E_i3W(=9mGy)!$lsTfd3iJky%Cn9Y*!PLS=Y>4nV6FLIM+$4EMD! zYBH{X4iUu6|7cz)?BPNpR6$hbaJ60c&w{8dgx>!HmHiy@BYy&rcKCev#sX*?P|Gp2 zY#oTYeyd}>=l4gQbS#aS+P3P1XRu7 zDi3BkKwU#6-n~u$hJ)sXd;w<8Zm@u}V+xY({;NVujOplL9L`x4(A9UWdr=62)g!}jE4vxshxZWsa}2;4 zth>M!78AfYb^&Q$m}Bav`*3p?@38$pyT{Lx*+uetu{Ex2btK}HHyYan?D2{%}2MPX&5;!Pl( zs56YADi~BOhs0JW&}UF<5AO3pcjEvJhgDpTz*?Y>2}+=F<8Vp=A1fMxo&jntA1MG* zzwN)adEumv$slx)K)`{N8r&6x+jR@*6{Yb~B&KX|=}(DzZ<%Q)+@T3}+zSJRM$j0T zW{7RIsFV6mR{>jB!1la_wx5~($ftwBZxRWxWVhb4=8fr6MP>m1JG5?39K#9){5~y(J7WSP##0AUUcOPfki61Px0P1fzx@c}=TlGe+En8fsAtYj< zHT?G}-sG=AdtV^#u(hzg=^YYOkb))shk}LiXGhdSPlL~R9OKxn)Izxd zs=mWjBk6x@D}v;YZPtPx|9erO9h9Xm!H|N<5_`(880`S<5fU4)AunOL%T8q)ED#jj zJ+^PlfL~@PW?%*pM`FT}MabrV-MXIOVYHPV4nE?s0uUeSU%(r}GD~b}cVXlvU}}z$ zSTco;6DMA*a!&+>@^215&zl*?`smW1Cx8hc6gZ$@Qhwtf1r4l%fj_cwYLxzE<{y?Tv}&wqT72IyeW;l=d!cEAF(Bj8FmM&)B#%y2&v>a?|ZVtenqhyaBw zMhcDO(}9wnvxOqu?1Zc7yM=j(x51w)`rpFbm=qcSa`(N308lA{K6d17Z0iJn8w&K! zkhN`%ojAtSDKJU(Uttv^xI~GtKDRI9!^{tAhJz}>6S(}iBmkzhL}! zN968D0f=1c^X!sm?7Y576by&z_v_#MdyAQg7HGk^vh7c9j@+T`ePW? z1;-CmsTsKn{{CafPInKEX0VL)T@1i}$R`wZnn3vm6Xc^H1YsYA*8zDv=CcI~^1qaDR+LML$lV})MzXJyp0-w2v8vq~XD&r%-uz5R7SA~xN>K!_qhIJJ- zxlS(Le-ipvq{CkV8ftfT%MEgaXvt|qOsxaNGhkiVPc>l3*Um?};6o-&!5#rN#kF}s z25k7;RtvR~F;2g)J_EczNNq83E2cV7G^-t_Ftpa3%FG4dW|B@N#4Ll!GyutrRSh(0Qk{!&ML32A-Ta~45U0fZ4$eXSsy|dcB#mU8`$JKF;o2Vr2x|DXLM0i}5P1R8O#sRwRTS#GK$1mD+Vj;8 z)EtHrHRR6!mTdxntp5`hN7B2T0l5R|NDgI-a;#!>9A|A{SQg?A0BR#rF2~3!hh->K zltJJbG|Ybt0%__0nDodC6C4Ze$f^GV{2FZC1Yc`|jyja?Vfm9u{7Jeq|1@u5s$cjX)*yt#b-UNa`&36Ul>gKL)P}gE|gEd5^Ooci_3c zup4S)%mbT0ICI;XaLxVs8S4NShm~t4*_YhT4D+M+~~~_tH#I^LN@<|GGOSa z3698As2=ZO8^OvO>UJZIk_kF^7+LKmZ%K(b7z_bP88Sg041|0CjH=Dj_7GwYn!#{- zBa3Y8f|^2N6M#W*vCmlC_cuJ_X2JfTS6;1^Op+*pr-+4&=J{BPzIiSyv=T4{tKCIJ_A0JkwsHPc+<|wf9 zzl#wj)k1qjVq8c9{t?v>h8POrjMvR59PkGYt5&E(ghHV}=Cu;j`xA-oJp~XW49cL; z4v-+D=t!Wc4RNTp3qiLKnh(WkLJ}V#S{Fz>LA-CO)O7}sH84?lQDB~g?!)@^sB4Pk zb;ZDC$T7hj5%K}ZALG4LvVs&nxIO_Bba(-Y=r2k;l;c5^wSJ(5DE2I5fgq}ab_~p% z*&J{j{Rp_)V-ip!9!{SDR)ne>A)VpSfNTe_SO47$fIK27<3?PlKvSfjCf!uR$@uEhNCbfquz1 zrNC*T;I?Cl7}8OIybsBuho*v*6+|;+mltX448SWwVl;;i z?a1bl*$L1_Bz*_67*spqFi8t-|3m19VKvBP{sD|1E74F^4Woj8=0|~y{l_~CNXv$B z734~hv_O<*{7J}?b@YcyIDnB+ zO_IYO;D*6gBB0oV779cqEFFaXy3Ux@=`VkQkw+X~L5Q|_rnu459mzIdgr*0&^jClD z9P6t`(4B$5WOFR;fdnbT9q@3G()n%61&WkEcmeE80bYR9CIM0G0ZA0q27_h4@D1j_ zK^6=Et~gU92nrp=KRP-Aub|oi1X-dZrv7m>2h(434W=W(j-Kd0I+k!GK%z>=Kv^VmfGULnHSFmu`?n+3 zHA=dmD*gfm5|CZM1l;3(;V`}+({K1M=)xC#7YHWgwY&W{m<(>+0Bf64I7&BQd>PJ5 z09u}Rq{MMBq68w?(UxJg2M39~3-&)d+6ty4M~hMeR8T>NZ%E@HgbL~LonADc@N}pu zg7NP^HUK+Os9;-0*bJ0~Qa6zKw918#DG2l0869MfOH}b+qStkDXLa(sYBMoqll<^8 z=BE$dJ(yR(l`ItGxk_gUvkMLzY>)-R%{j0m6omdsdmO++DE)N#0eGcv0fu;ZTLjQ( zKtzL_q^X850Zg6*w#fxjdRRUG%6$6NfNg++5OxJg&CnGF`*(ks`U{*1FsX4(0_AJq z0to7Q9hUsBuRw)dD&W`3Ie(c3!4Z-))N+DtAGIY&+u%R~!Kid$Ly& z9Kg+#&`?`pm~8Z)t_!7}NED=)VO#=vX9(buU%=s@5}8lhP_>3+*g{f+?DD98;JaUq zR2}VOmofy8-Eu*twWbI2M2 zVFCc$S)B%SNMV9hg^`s54fPv<32iCP%5-LLcq<82Le9eo7}ypJXMu7ERYcmI2Bb;= z%mEf3#6$Ez(k~m~!NGksXW}nVY4xAe!W2V@3<-+5!aMWcj$xAk zo8$5Yc;15-a2Cu2e8+!^d~8@ ziJ-oP77TjB$duJCVSf-}W9BMp0fk55G>2_nKnaWlbr$kEFF??t(20b+kLUY6hqq(k zDgI22v}02Z5Guf>Gfh}*<>!DsfkHJ{3&qrh54b-NrksNG3h)NE0JCJy;19w6urfi_ z91zHk=^KtL7~zX%M|^RpvV+VsR2o>`5ml@%>EMXW9|&naXnf>ZSam=(9MHqn4>lUd z(z(H(TZ)HN4FHD2PT;VHL}6W~U0VZ?fy4C0KkI~>T}Nh#WXBHxM*<%V*f7+*O@UPz z2E;&)vq3T!c_WfRJpl(*g zPFEXH6~Ix$7_7a)E&{aKUz2fiLiaWy(-Vfty%4GF_>_@{yaDJdqOGVrACv+pFL}s= zG#a2bFB%el!*v+a1%t2z2oJB*IZ*_eLvw-~hp+*FV*zOq zaICPun^Rnc7)l&Q3FK82IQUwCG`o&3nl-m zrhjOS(0L*4;kb1Ic|xF2C!h*Rq}d;76s%Z>#a9Wc_=1k~5W_?<;ZS}b#r627_Ca>u z1K&Ug=_kAbWa$RO$)E)Q{RMr#xd7#-=)TJkz2ZfK>yLNGC{2Jp_9m)I7sL+Z1z!kb zL)twu+}giAOc#)5{SSLPvP^{JA&8cF ztapFh%;CNQ>eIu54Qf9sfy-7fF&|z75;b}B!#z3=+%|ywcZV4Zbl!((D=M4)7sZ9! zK1c8}^w;OZQstkva~Lk*G6dowK!*kuD1YBh0r{Dt*{5T%6&d(Y-Eg>4J0yVoYusc6 zx5Z$`?vYcrZ_IY4ll1O)i(x#K{H zk1M!?uW6wK@<}TII{)<#W&pbWAEEsb3h0lL{2{HwmkL2VCD^S0m!J+p79i9l{12|p zww$~LfH{ zN)SHpW1bMuw}Gz#c?8hgcJhO@ z7QFAvJJ)+IXg|(5)4M0_T=HX(EB`sc+=}$hmmlQ^k5lU%-t67UDrH^y&8mNz!raons(|%WGSVPoOV8CYrm$9`!=B7V%gH3}?xiTn=eWqxXVM}EWA>T*c0R(vl>(%p!MLk+h@KgjsvIt6Fy zIF(t6Gp&t@T@2V4pF2s=>)(?QV}K(-nxzs~%AFSY?XpiqgObF3v|^LNiR-u|+v(T4 zqQrlYr5QD`2&pK)jV${1_VM?t1rd0a{#+e{L&dbuwHJ(rL~y=szW8`WarWogA^RWB z&6MO=1z|-L_a@Y2@NbpKizHyQ7B~9!q-|&M(i<5(W5bDjBd@&Ch!f7%QC1NyG4_77 zppH~w6&+eU2Ac9s49rt#*l4(59}H+63e)6oh0)O3znnm$1Wz1Hx$MoYOspRp7}!{I z!9RE}IhdKozq)BY%1v6kz&>Xm;No9HN_*AkgN<6jPudT5bVk2!ReYc^GV`y^-X5@@ z^{EIFPr5ej88|6;5c7-=F~EM-tn%)ghG#$Aq#0R@o39o(f8Q`CDZ3b$`Ou&_Fc6+<&gU08jrtwlgQ_Y6OsP(OuwYU zpFR94d!4GPQLFmLRR$rK)>3ulFW3(xdHuKNuH9Lezr9Np|6n33+es?}pNyTsR}_;q z@KmCB)=jhS8xIDYV|i)kh}^I@X=vLp53VvZ6}76L$$mao)as(Sui!$?H~8Yx3<6)K zt-C^f6URZCNP;;qRHSnl^PC&0z`S0$wxN2k*m?Kx_31b7+^##-{opd{64h+1ju(sj zs@0_!bn>;rBNJbe*<8$Y`?HlQW;9$6$}9)TxF#&mL=jMv4Ml#JeDOk5#5v`9__l(6 z3&{yl=XFsl(>45)MHubZuaDWRQQ+H?s2V;BB)+o3o;Bn$Mm~RC+3@R)0OA}9#b$#a zG*M|Ov-!#Rn~^)hHWb$m{CETKi_3IccI~Y(gfD(XNGmib-xi@!_?FsR z_67VyohB~0a%6o)4^Gb)>|V4glxpg(rN&pkX+Lpm=gEz>M`Dz=_PVR{WU-33SC@Mq za3<7FPTqSUV;S;l(VSdN)|75GyN;iB{fQM{yv>Llk(mv-zXE~fL)U$41F=7(yT8|!_$6_Fx%L1AlmmVDy_}^~#O{jH z&cog3OR3;nJ6(u_m6-$gy}JE}2b21S5(>?1aHz$l>$2AlK7 z_0pZLgX$7z|BLi%^B!LIICs|P^BgvE4n+N@1~xW0C`Y()srohW!BA$BLblO42Rqkv zwOOtZr0v}(S=%YfB~j(N=6Zl`-+oTHrzB@|0I|O?Sa71G}uZrJDM3^7xi~)0#xZ-ux_#s?5hMZuy?ycqv*; zG{9$N^g%_iaxk6w&}QZZUR$=zd5_O?QFSEz6z%!qrQ))U2J7d=B-ovuDejg~YC6;1 zeofI%yqrh(l(ayoJx)1&GVOQo*Tr70N^92AZ-p%FquWc`?4~{_4hQk$OePZQ|S2tK-{4JUZK6!j?$ULA?1g zQNA?0auD;Tw(i$nVz0R`KI{It#n1E^q+9K@tCC1^=U!x96QQuM_ekUv+r5?gwwKnS zCMW8_(+!K49fIPVGoCtF12n^g4x0|!nXCqq=qp2Lm8uwb`sW_@7Ze22d9Z!p*L&?c zcD4Jbu)hmA2ft+YWBu!WJ4~y2sU5~cTY)Y7-En&9C)l>vdA99bC;e)Tt`>-+VOMlT>|{pZvz{a3Fesgf8i>CRiy$*J6Zx6XjyE>~QvN-1NW>EKM^XYWCP zp6CA2=xW_Ws*qTV+sPYBH{*1DM`%x%lOXQ8s=o?;kdP?P-I(Jyh}l3$d53cFgaqNv z#pb-XuTBug?VfxFEuWBt)NkifZd?uK#=eYuhkx~eRlJ#Rdj)6I z3$Hn9dzaxwQqlJq;8trVeZvXL8JO^k(m-Ds6N1D>NlQO6EuJ#V95>2A<@vER!S6}0;0DqR@ z&Cdx91_zI?i$dE^9$zhJx_iEJc)vdD2Y|<8>NM}3+D-R-4ag`b=L#5G-H2+q=(({+tIr$GY3V$JS!(U?!Q0y{kfsoXy;w!d)%^(N?RM zD;EzAIC0@rKmg_hODpb8QOTz((r7x>xFSDK&rb_}((TeS_jz|Zja2NXv;*!Cx7nB% zR+Qetz0|mqZX11eq^dj)J`sx|+nl9A#xs}%kC~|T%w6vF;;<1vC-tA~$h%0K!*MydjqM-GpuE# zg zw4)DnfqDwKxY#z$I;xM@<2Vph?IPZ$U`eX?DW=?4K}+!({wTHLH-@k4_P2r1zFipY4u{8C^{#v)|U)wm3bo zB+3>ZN^^;pbxo;|6LEAVx&im9SqNVUx$a%PI!ZG<+FhOcNu0gDI7~_16*kQKNsLSA z1S7U_z5CqONSE@+kCAb z7h55FGJD942DyWiT={fj+bK!FH$`J3-ZXJvDRw32IKaJgLFd5*T8ZeGUX0vMno$-H zRs@|gyD8OqcUOv_D9Rul#2t5a<@*LMuDCVcy3}P$nmrHnR4g5E@^u%fn5`(k0?FxIn0uwVr*b&-w?=Bm$IS(-xLt%+m?1ruvI!^MgR;=<=Uj}8V3gLQ; zD>yXQZc1k5N$XcbJ~woAe20jt%#}yd=$Dpjt~pyq35v71I#M_mQaYwuWPk9AjY`~g zOh1#n8_5ll2n`i`@3fN5Jdc(jD-}WRDct+Jw-yz0O({y#lS(l9i}?Iknjpn%8gyK0 zaB#Y3D7o?DEwyeyrw`98bHrrPUUZm?`6ude{g8LbM*Ty$4e@EtX%+WLv+!iZlFP2t z#&B;|cNwD3;**T%;Z>I_amNn#iQie}#GRD)K9Ehs$y`gRY`NWpJDTi2h;yQ5oaW72 ziw%!8iC3h$qSi1t3^m-p4c;n3=OYjJ(R|93 zmhFbUXw}>)F_(!UPvc1kJ#?Qbz4REg-25~7cBcc4d{fMCKJD|rWPn~(s_cQMT-jzt z{wO-zOI_fuds}jRm7kp4Cykdw4cvH~xNRp4nb9}|rTm_RwN87jN2GhGY-+!yZRC&F zJkipS6{a6MmQTCC`bzVJegR49d=~qqkw*NAarN)q3ZL#hw}>w|&%=@LB{c8Pf`fc6 zPqch5S-1DDh9oZSo=CORD1PHDw7XBep0y`^EFhF1Pm$5*2>a%}S+Z%y$#+6;it%w& zz6eD<)|;$-vduH(7$MaeBUO)nQ8i~4k*C{pW?8fA370wTSH|OcnG{|t%QF$m8&%Bh z47z`zxL{q%ih#fHJN`X2oOn%37M+_tS)n$CokFfxuX5&&Qq8;G=~MOTsd*ArEV9OW za%j2FP3fh%m!{k%>)p2v=0U^NKP#Vf6^j(23B>G3H16HiV0R<$vgjnSXI#HzXZ!T3 z)?For$uH+yGwW*C&Qwcx@>eZ~hK*3k7L86#g+9vrj)_|}66U*Bm2kI=Kqp~CT1!od z{w;oM#4X9Y%6u2jW|y9Ml!?udeUU9a+~#x)nv?^+i-DX{gF@I%7UwiZ1*3b zbEYRJv59jEN?l67{+xX_tf%OPr_)k;-MW-+RnH_^kvfYH9b#s zbL}<;-Gl^@6j`N3bJ5zfg46ivn*PDGL;Sj3n%0?eilNoLNnSP&>yk_6`uaP`^!FK*SC+~?-bB`E2`>NGTezs517l(qMXENVW zsOwyEaPT9rYFRC{vFT;{*qp_xvXXb6d7;7(vqpUyH>mx!b6zNPPIH;o4=qm_)@G*iIdXPxGPc@ zuCbF&GEru7&1YuWw`N&Ce_K#6#LmNRVYV0GYKdp0-miK8=7}8QQ>A_um^+dMJJJkF zT%NwcX!14ewk#J%J3ND;h+gTSk9rgfQL7H{YQ1VlkmuMBWZt$%=gG+p*_oAhHwmEe zkgVJ83+zT-k1D0QOajl@q|{txmOMqw z&FA@jxgE3k6SjWN#T4G2>j&}?zNWU;DeT-p=ZnwR$rIJO;Yqg8lA=xdE>jQ)Dq+qYNvj<@ zgtRpkVTS=(~ahj?J85k#W zD|PSd(P1)qd?olzdV0RNCUw00!uiV=J+SZwh?y!XFoVXdqb=GX&lHAD!M)Pq@P?8z z?AONwV*c{?N!}qvbLWdQ5p1#kn~sU?6gIO>;*ydJBL+e_4Ei{_4B@wUc6=;~DR@=A znWIBoJtxnf<;wR`|M>`O*oys8em1YRnXV7U8cn+68;VmMb@=yUjier&85P;fmZCL% zHhU7!sWrs&l(%<<7O-GZ=}YgW=uAVLr7lbt_`VFVa}sqsd7o5BQLiVEg;wkEI19<%hhK&6e}qvCn2AbQ*LQs8_Fl2?`gG%q4cR5hy>&QMPB%H70UE z9k9M`?PDZ2s$=1O{)TVX{%P$zbT4-||6iAeZJ%Pz-SMAE7Ewh%&+d>cqWq1OBXZrh zoiOh{$*=HUc0sx%fr{;&ajrMB%*p{%Q$_i~)(PwA@_uVP5@Fvp=;jlrC{J$>hdzoP zU`TD2_IONb${ya>b@GDGP1||Ndmfrx{djF9{aub+HR96}Zul3@T`QAJ9~xmU2z?zX z8m^`C(xRJs-g;K~B%yjTP)st}5F@@2?ev+_^Lo#nh^v@ruDkVI7Rf!=WnuU=;p*C@ zZm?qNw%GFv3iY~=v1lgoJ7X#Aml5y6qX#g~_^IDC$FsM2i55LlHMiC;ivxNq|2gzDDVp@5pz^JleB78CV8RJ`aW-2#dbc{J}tWz&!PaL z&O29zuU}{DD}JloZycak$h|ap+FmSspqaQxaMX%@Y@15<+ZWD3ZKW@IkFm0H>frE| zrXc-LyJ+!isAR!OirGgaW$;VJMWtNIx}gFA9ym?vYXBc zZUgi}0@zL0Z=Mg)=Go5Inh9j*-eH}`oY(FHE|s#VHrnGBmHs32Iq6!=M^5+VZ(8M1 zG1XS_3(%zdd@T_oGu#Q)=T&^(5CW!9rHLE%_F&>UYl&r8j+xt!efHCJd%~aC@+!|v zlR$HQH;%|2ef`pk?Y?-$v}Zq=eF?#ovM#ymGD@8I3oQ_!=4E8)uszodXwrE4@7nL^b1|91x{;L z7UeF;w={^~_-6KlgYS8euAuC_Rn{8ExeGJFqA#2^@I)5~^u85FD|&o*lw0vo>lSO# zd0j&j>Z^~#J$g=)EF^oK)s*_^94xA1f4R{qU02q^rg{SMMHBQ1?n|%YUEl)zlbc0DU3gwv459$;n-&;oFV85YdgY>|PX`rDL5I zZDB7AD;WEl#j*HU(O#YnSUIb*etqcfqrS0QnkU!{nV*jHCDeOMpWjJ%rs1^`RaZt9 zMVwXgsJo}V<+P>c;{uFd%H_T0!e*-&+IQc={)r%Y)>*UBpN5jH#Jnu zbh!J7MTqOvV^_Zd%xPOY_KiO08fJ8iV<=>!cOAorJ?fZfp1kVW z=5*^4Otb7$dznlwJ119^`bq42ZHYshp%;O@8sjCJgp^24l6!{tyTh(1XC=zoGcx6I zrVx&RXwrVojzrq8P zNiiPpT@BAqO5_|5J6hu8zj0*y;~Ja|mZ%{E6&tp(GEd%C27)VtWrU}COZH8$(2T#R z=RN!wO*Yl|iuKn+yt;4EZmDZk#ZcIzuVqu&QKd;WWX_BvGe^c{N>aOY+CNykL~ZvJ zE9JFsMn@(Wed${{24axcX4{YCTC?Bizbv@NJoxB~`BF)e8qXTmXjRW;(H5#REL&Zi z)wU6*O}?c{SZs^4MZBQM|2|(CBpPg*{ICh`C>0WaeGGrc@DLCPIGEF$vT8ICcEP3&)Q%x;%?{#&OZ0|$Ma;Y>)ZJ_jl?W*51bAld#^7q7wM?pi< z;7!Z&ll@eN3Kt$lZg`JXs(k189=gt1Oh?WlB8o;SLC)O>GV(!?dt@+SAScz zDnW`v%v@JZp#6^`G35vHSuIxW{w>mQ!s2@wH>yeM!gqC9ZZ3lQON7DB_crq{lw?L# z8DO zI)!RY@NBB7t7t#c`Z7%)xBx7tDIz!arD3Ve@8dzOwaecT+Z(^(AWA zl|^Z%Gc}}L5kU}^;6_IO=(CUL(+2B)TKpKaC8wng&uBdR$e$&--Nks8QBI+^%}=tP zC+8;hH&b!D*Ga&ug=8Bx>Y3Ouwbk|IV63dwCnrF@ab-<%Z;oNdhMR85^=gt52I47_Thkq|4byhp(_QswX-F|p8w_A z$NNo1Mf|RE)tOOLhFBwaJ1saTJWmoBtBU01V3+@^F@sHN*^fS)5wF3>_E^n(zui(8BImc+ER8-0}_UVi9OMX`# zOxLfpOpV|18O!{O4rHJV;04u%##@1X+*tx<;0J{07#n z3W=0z_7I)T_{{r=HOeV-C!N7FypJ{1KDOjIhxV|fh-35J_4&F=?skvYJ3`v<^BJ10 zUp%aIG~9EF`I)q?#JR<72}H&hA9Z?%#b|e(l;;blOIC`r90^-iF>Cm3i242%-;7jq zqv9H7koT$YKk)JUlUwrK=sI-rr41uI-^RS__7hh<$soi2yQuTGeF)I;8Iy~~of@7W znJu16A*MX)7Q9yLGu29yuq3!>lI*r}+w>!Cn%<`6n$1+5BJsi+{bs>^Ix*@#gGP^< zUgjJD!p&_xP2-xn@-XeMsQ}S0hGp&i>;ZQAAHVhUhBMrE{nRnzaJOWC@xJ1^N`GLl zYI`P&N*?FLqc>@qVtvkH&2xg^dS+=<^4z+JMGsg#ND@!yyj7+ho1`7Nad!rPw3;O` z+|1R|sDSfo)Aol;oi3+d>eBSdNu7wvl#CxD5M-andbHAT{`ebTKZH0As3UG;qQ=jLBu(%PpqZEH)<_9rvSlsniL zW+y4nBv%mVhuyJ|>u$U>ai_tIR3PqQjSLenx98{7U!%XlUf@ev_TWFT=3=&ZhU+fG1? z!e=2kEWoiMo`QjFg%R|j$a4L)1!wz%PKsT@Nn_BWx&8W=PzCsDO=QU2kJ~MiY-s4c za%GOvRB*p`vn>ZPTcvWKNm_n>w=QGygf-35iw}2f(s<($Wn`9Ha!bXDP3n^G9ZI83 zy5F^mojaG^@1$L$sVmt?D-qfzxOHFVyvaAink1Tt>IUn0irYWTzhLv7qc?PS3263d z888)K$jHhU4A_R1So6=VIu8>!6?foK~=~?(7j?+#}5Jke%$nr4; zPw;eG^`%pGvRDEPs>69o;{xQYNnUuE1gciM`T1ug_?o+YilRdWHG7C;!tP-)lQurr z=p?XzuyRS*ZjgXSfJ;J0lS}e)i2Yk_Z_rhGV0~WeTNzrhp|vs&opKS!ytGzL427Uo zwg0uEXIUQI)QTMUl$43jYI+K_tVUA^cDC82s*7g#?nUe3@wy)%+;_Kf4)*&H`+GwO zcZK-J`!>IWHM@71c^-Tb(w9&ac@&nbHCW?k7>u9TvQo=R+^-SdDyYRQCvy3VA=98P ze-3X~vlxP0-Pj_>tT1PL;7pCCwPPh;&AXNAYm2k59r-s!R$Q99a=V6EIHZUt*N7&a zxNhD1?U+bj>ngCPTl*{XrF%B{I-=4XQGh7m`+V)%oO&O1&||Z{ni2n3ZQt`qwE6@_ zZmfD*&}r+&y?-0~J`k~ICzds=)mBvZeeHR9{byGL6*rd@iOLCKY9XUf2)r@I`8^_| zz0I=0r~4At*4S?0cpKa0%_3A7bLcpQQzBtH=lZADh`KJz7VR|Lrn>3F=ul~zgOf8&0i`-s4Fun-@~d!Z@EM12j79iE+Ov6`MqgP5B{4BePy1B7R; z&3(YJ=Vaw~ve+oiQ&f(3Y!zj1w$;7+m=@iF_{=r4s><(ojLqcUFUE!3N<8GZeCvUPsQKwI6M9?I@WvDl& zycMIgOGv1W0B6Sk1IzC{=EjrRBaP<9WmV?*AIxGGs#@Y z;s3FvaJNIZTC=U)>1x}lJAIeIXt6CrT5T@0IK^F;g$fCam8%)9b_;|UeVBV$CHz>3 z#cKFd`pS}2W&fwnAC26x;Jdqv{mVb3OmRkwo;U*q^2wSs5n74mExe=PgH(Pb6xX{h>;IH9ekm(SQ}h|Hy2sbkqSb`$ z!;{#}KJ(hP$3F{$<@SHz*nU?OP-t_cT@$|3-}R!?oUWbPeiGNve#GY)(HlF>gb9b< z$&*6+hMc#VySjO=G~S!1in|5^KDiICUy#W?=Acdb#_Srr`#f?5#sV&P`tpey{m#Yj zB=tAX**Xn})!CSlvsMO~^fK$T>pN5Ibed!2eGb&Ie!}Y_mi6RpG@+nS7w;9Zvl1zS z+6|k@@iJfenXA=s0`lJDecVagaN%`n@4ZZP3k+!zOZS?PZaXRaiS%rC(dQ?lWpk0| z{B_6!I+Jx;FLR%jFy0*066hS0HA$9o;*un5?ZsMJd=nJ-S%J?n@VkaraLL09ipGQE zk-Gl~)+YJ{ry4I5$MZG9nid0>{C;vK-$eV!}~m6u&_yPPOA$^Vp%T1i)} zXJnimk;mEhnDG8g?I*RxsQUWbr_~Ij>P6Oxrn7jc275l@y@RdKi2`aBQm?@HQ-w9EiPF6xxWv+gBhG&5I98}&BY+&y* zj;OVNTEkLon($4!M+q_dDP5OeI#^3!E#ul?Si^g_0+S6I*BXP{2;-F9BGYM&5I@=| zg}o0d*}p#7R(MH-yQ%s1RR~f=2xJ!4e^BXZSqcI#b+Cq8_O$0v(Qsdi+g3|1{|dc- zIR}3K=PBs@7Y^`Dy*J>QS_2x?e_q&rlziUDAVZRLQ!RP08h*tZe&sF^JZ0brw76vs z_?1(>&^&1`ETDNPNa1D$yqe}@I0Rm!!`<=<%4JxU25QuovnHy3`tMdq5)cQbAlCEP;A?4Xps}( zfA-b%js5m?tZ7TM#SrGy#0IyJ$3ou(t}j57 zGPA@($HqdG__^bSIyEABLdPasa4kc8Znbm}jH=ZHzaJR4+nlaD7vnm)2IQEsuU{rt zrDZzFEEo#OwLh;B?54LZSaDALWY}=R`0n)gAD*8QcPS9o(;5oc1kjeRFM`OU%YT*> zn%1>%w=@9=VExC(!H1K=pRM!ipdpOy^ozl&n*^Am6lybEt!MJHLg%=0HMToGN?{s| zm+p060>hTF_5lIqB~(CfJUuf84eKHrsj#*Fu~^|bydeErXhB?$Mb|={e9zU)@7>D> zlH2`I%oH*tOp~+DUeP=QX5d;YzCG{MPbM_f-Ltyl1ctj*@xb{uBHU#sdU-B+td9VF zZor&Zq`+RzLbI4QQ;^J4_jz&ICh0ok8qC+?7KLAe-FppbZgsp`o_p`iKH-Dqxpt%6^~TnE z`im3qDDRPK?*DxX_2{bqszwOHsgY0-wIX(1InAYhtW z(lMdT%Qx#6aAuXLEVTJ%wmkmp$UNQf5YH=@V`+ie?(p zA%%}0h!`HJ3_g71uR!gf$MBRNmW&m*)-@q9bg}P&lQDwKp_6@UUAW0S5gJA-2a-;r zfg5BKKXXD$n5tq?FW*1_5&p}&b|o>~=lX#B50^Wn`K=O+fgJ9cHxN?;5Yw=e0rjmd zyAmJ8uZN$)YbJ4Q-`oSn1TAbC$Yp_Jj}0uwUSRK8A_qzbT8@bv8VeSAs!NGBQTpyy z*x6t=Ca?prn;5VIQ{)Z~H**L!Q|kOz=Y{6w@L3TFv3pGc(8o-nOKyXTS1D;4*bX| zLYMR6P`Ny?n8BYb%-7?KLK!|>@kjzcs2z>+s>y4~Z1fE6DYE92TD zd|(f55fg=6s!H#VKSsU`o*%v(3+57!0fGe*+?oqU>hQMOe>`q~c@Z~LV|nf-^c-Fq zWMTiiUvq)sazU$t&kr`(|JARM9+e-7<^SJ%D1pHKX=(0{Q2+gRc&9+{NZA!U&r5OQ z4TeMjIXxhO@36)EQ(!WMraSirV|9A{V8fd$-xR=;}=^dRp zWU9Is{^&YVZmuv3S_;d59SkYD-tU*WYLy$Bx-G~T~|`PkF{OWyl)h?&REmJKKw zY+kX`b(zP^tNxF+WE?ZE|9lK7oe5Ab_(VYf0~W(}s6-QmIfqX6Umim`mjgZj-yFfQ z(@1g>gp?>s&1ENYc`gpA9LV2Gxe9>SD7aGeZhJ27uRq%1z&%IuG~I27`A~;|?~OaO z58#ciA3Bu({`k<3MNZe59=a%CAIyiI>%TvSBn!#v{_%;9RyhS{A|1QNH@DUmV2}S{ zs{^$=_;CL0bY-1$ybqQ@SH{HL{rW=l!$bS$ukeKbe2h$j{vUqtL!q;LRhe}7&mgP9 zpD}h>dpCtOd@tCtf2q9TE*1W>zA|(5Ov=T*6}p4n+AI4m2m6x?h<$f(RT6a*R5lbK zLz09(x(0RHC7(_e*bsO*tT3p@?o}G!hr*3mEW&Kq|~1cR_WiS zQ7Fb(8#BI@KkleVUdf$_%531}(*d9SnFKtlox?*y7|HqL*$*U5*x*QsqD z$9>9gfo1Lr{SZzr!WYoBULtjtS~>Skm@Y262#BUyj=buwi=YO-3by8NZbZ9iZ5=32`0 zXEA8h#tF&EJgK&)rbfSGcDxb$NTt56if25WBO<$Jqe8 zh)JxSOTpK2du^D5)Y>KYw!bU&76(?dV#)1Z3W|&BJcaf4?huV}`tNKn zS6z!6!NYHdZw^NlU7p)jW3D7%D(H_RjZe;=n3>CYt0o&<*cjZav6M142K zUR9#USCrLuRIqI0^Hr+)9+jr@x@keKCbHKwX{nE#d+!+X#`rx6Y7;uYqQ52ukn`*Z zgn3t!Lrzsc9S-6>6MBKTA*6+J)p1+#WTtPA;Z16>5+ngN9vZN4vx0f#ae#oKJwbT}ezY`nv$r zdBT=(gp9FtwLb>~@A(C?lg*P))=w|5znH$qFLiPK*39Fo^QUSGAIV|SexxrqvwJj5 zM)bpJSc#g_j@xS_+2(E-S+ME$=P%!QjE8O_FI2+@^(U2 z72hwVc0_VLc}JJj`dH7ciSP?Pf6kSBS&_Q3(+w={Ouw`A;**<{x$PF?uN=&|?{&HF z?d@OL-=}lmT5&%}JlNnm*fV#}+~15s>g1X5*QjyuqX3Euwlf{uOX)u01Kk6CKK(^!^kU_R2 zU$Yj_O>W7@xWD(TXnMYTF2I5D0fpcieOOUO3wmJRHe*;(P7ZqDxgF-PqP#0uCkxlL z=`GElqX)+A+@!a(zk;iIP+sEBW_s{;xA)?Ku$Zcw?Z+VY!HcZfTZ&Sa9UGWgzs_xD z^akEvQI^WMvVTWog5;UlgGXF>wQ9RNXMd7^>3=?!@A+lN4WAhYizz2VZsvAw&o{O5 zw7}uL3@@)Ob4=~85eIifm&Qxo%_rr*GiUBHjxj%3wEU*!=2nUE{HD$4AA7=L%MLUf zPAp+XQTFJ8qK;f)k8&j+Tou0~U#knx=Jyg|B;PV*n_0=?iMd3K9>~9_XS1{_Nm5-x38aRyg4v?Q(0A7^%$+<{mXI-j_OV_AtUv z=yjwwaiufw)}h^JppoKP4tP=4n1CrR}xAf$vrdLw*XYXZj8F>vFex}Ff&Beb^ z{Xx>}`jwWj(4^%U`YIKA%X98{7XlU;!iI*HC3|kB&Q31M_cSEcDbg>Q4U$<=3{8kL zBxT+WwiyCZym>t7wqPtg#oxG~8rv24O-6gFY5kiBKy)tkeDB z{g!zyc~k=$zfc#=>;ci&#>?&w{SA6OgG$CA4(M|-!=+oowuYBc*MLyXxdG(D`xvpbt^4FmpG^&ItP z|IX^dL8Q2Rw1^KS(sjSqh<%b($>re&1)NLapx~hv`3kq<6 zrL@olovj=`e6(aZ%1hP|S*g@I1X92*TMm1>B2G)J78I>@ZLyLH%Z)l>6H0q&X%l*wtkx`c(B3;D)Ep@mt{Yn@J!o4_|fMiv8>zcU7G6E zn%>ZSKt=d-ab0_KaR3<1_lFN%YDV&^8tX_dS53@qcr63VOtmf9IUNg*fuy$p5xY{lSgAFM9^xxwP zYS{%2AF*jMLU6*)ZT%|9@?^DFeI-iB+@WvhW`x4Gl~@N>Wl^nNg48rcw6WTB2@%Vg zM9nG*#VT|~1p`>`R(+r2)?>0`2Px1Jd!Wawf8|eeCHUeRuDiID-*7MY#;haGRtB5o zcM&4KT&mT&XomQKfDf)H{xNs&-N`&^d0ym;?Xg5HdU5r=SFpEN?c?#|DX^=!@c@Z6 z9l9RWwxu-BMR4iP>^!#%CHKx6Y<>)&L$R}0T`LQRx8A0Bo1Z`9>fdv5Ii1{@XtQMI z!)&oWVr#M9Qy9+AbL$X#w=oh-DRqm5sr3yuWQJf2U&tKIl%=>r0WaL00ml4QQ&jsM z9tI&}qxsiearQ;_Hvbw+mhguoC^kfEv25E4pNepJ_|gk^FqnR9s+c{*K}A-B_^ zp7ziTO24qk{rf|1FGb!$E_&HyXefEAkmtROP7VIna?F4P4U*Wl9}W(y6vtCkv02)vYu>4xZNn|YuTdvo5@ zRT1ZorGH$dHX9yMA_^q$XyidhVrFzKQlF1^E<{j9j0W%I1P2p6LQ@1y6>Bk}Pl1(w zJh$ie60BzuEjn64MzRmZEa^#oQ{WM!|C`<+ z3>IXTA6z_jp!pE3a&Y|YnG`C%@F_EK_G>qNXW$S?tiWI$k}=x%cN>=tyH6+S=aO zC{YY9l4L%HM#olf5>HvVVcmQG)GU_)YMwmFKnuvdDvzqRtJ)%A-2BL45)g zrV#eQ4yghlfP&fYD=+yDJFUwMVK($=5#WlwSM)0EMCfw57JEFB*gtQZQOvI}F{B-I zdXc32FIIxYdvI`K{uK5kunOl|Ior$~)C~0G1nMs)%$dQj%J`foja~nu4DG&q61VodfPdE$3&`TY|X^GK)TB z9DXOO&d)5lGVbR}R1)KMrPr%#yG7rEP0sjXcALHhjvh2xv6n@zcp23Yo@MF@ucU7L zn-ru{TJEH#Dty?8M4q33)~0%9s#u3vaZHg01e6g9g6~L(TdBAUBPQE%6Nu1K*&p(? ze$vnHvVU%I!tS$Ia06cOin0|`mZrf8^9`m|(fV{)UU=j)V;t=+ad}wK-c#7&+8xLF zM-BC!gvkS)FpSoKdrkj!^IL@|H+oRliJo?|pBqjC$lEoW_i@I$!m`1SQOgkr{r4so zYsyHI@F4gvYHZZ7#I&v|zpkNe|6xv{%B+hTy?3}+wOF^TOs)3Wse;VcvhMak3TYzRRI-9t6g&E$)C^}f@HDl@6E0XxDK@{zlw`?M>Q^e#AvVPt#=%x%yq76d9%J(cjjGK(qpF6cu25o{w`Iw^TnU zajtP^B6tOR`6iAWY~yS3f#63jFUB4AsQvHKxx!bYyjwBLFR;FGV$j-J~3O%i}UeK7RU`>0pY^rAkIVZaKkDIw&l_K(9UK z;HZ7@_|`;$OOCx3OgetKDR=4g#k46a^Zkrp0aU&cwMl_!xf(QKT)`OqF82);)Mj<Izdor&{jXF# zIf&<9a#?H>Qa57`6tPhd+-SbY=ohV>p31EO$;KOqC~pq3%UM#K*;fC&91?s}qkvWl5C|V@XYnZ#a{th#HGD6&+4AyU!`0P zR|cY(hTE`BqN!*Vd43>uzHL^E_rYV~ztJ;o>HpYKHvksQgoO^plszZIAj{_$hHczx zvg<;mXXCTK{hQDqG7-?9GLe5Yml}HZ2ehUTntMrM!W&($$*MCk+>;@vK;0O0P$`+1NMfXm3#gFilQ2Ptvg^{zps6y23}|(oQHrCzMv8 zrkIGRAtuJQ8v`R+0;t;Se=>9M`INZ~Xleg2!G>NM4!Ld!KjE|nxaZS#{8a&01bR>S z2Z)=N<(38Np4p|JPAQ7E)2V@fg5V_3vgO)8*@k;-s6u_TcK8sur1p-PJ=+_=VjBDJ z<3Qtn#xCVt6Xa^8>yJ9Zvs8F&=nclcXInz4YjwsAnqE*xg?WBvM-pcnDw+fQOzt4r zawXC0oE_xINse@tGn;U4iyO!XWuM!3lLt{;IE)oJ^+uS;*AR%>bCwh~XAywtJy~xo zZbc4zN4D=x%Xg6wixCFM@P8;=9z-QpVZ%AdRbM#c3jY8&k9Vgk*wdGpENMw^oboS# zSu`p<{$ZAc)V(=#d#2oqeAd|T;*emIT|w@2Yv%-XB5!5NdL{#0obx>e5gtMnRzG*L zR!aW(DYhG3-Xa9q7<=w>o^3@}z?o?a4ZEu%UM*LOqHELCWX@RyTjp=mf*j(4rR8h? zmCi5pGU?e`14>J?y{9<6XrS>dahEUiL`7WvhnjG3*W0jz`Xsf2q>@kHd#9CIfDe)3 zOEYDx;r&!q!c=z9g)3HE>>S)6!o@+}ofh#lEesrWa-u_j696oXoF8}0>xh5i&$6Bl zdRr`Jy4D!6wt@tlts}qa7RqK#ky<*OEo)MEGV zsurd)9Yn0+`F%C$OvCc5Vc^~8GnKovAZJkQzO?eUZ{tA59IaTQ6f z-+7AAVW9Z2eZkZA$a&}rB?Yy|12jJMEl@<0Glj?*#truWC1;xn{Q9Bd!55?x2;`%q zOZmg+(0>GWK3)?pte6`rs26vk80CfzOH0I8xLu%+{n6gSQUlWV?ZlzVPAB~vi`3Od@Ct(O|8}aNA?s8 zfqT|)=JYDad7Q)Xn-iyQK;sr(Fhk9MIplp~{%auOY0L?a6*&t;#D#Yf6P-nl?x}Wl z!07B@!F&E&$dR#J=?$3$glB{bpMdV<_3pE(FxF>etJK35z76R=7bR9qQv{!|4^P81 zvuX1TUM)eIsJrUl)op<8)DH%`8zV+C z3fo8=rk|fv1!#IT?EOy*St$onX$q$98+~e>o``3QM~7`;ukU26pk4N>OW!Z{WKOw^hJWe$L{rUl8 z3TP>u0ywr7a74wJV5Z)vYLuZcR_P;alOC zyB|_!uqYwXs0nx2P7N2=_{t$qJ@pTSgcPl@oG_`(n%Q`A&ViKn`)(LVu5fVD{YxPN z#~;ZWzUh#EAp1Rwa?e<*A^i`5Oq74oPdtRhO_KsSmMx$nQ>#}VA1#C)?kNb#KC2&( zCFKUOErG@NWjz7HN=wk7rzn$j2q0zOg7*f>{9WVw89;<)Dv4?zVPk09Gnr@B0xFu~ zWy1d?w8xHdv-5HyqTkt&kl=>Ne@s=j9&9i;>_%-`3mPAx1R<7!=}WLp4^%ycqqO5Y zpC?!jI2>N@kO(xfgRVI~B=SGP^1}r!6d)gce>PxDT(Cg=7M}IHaP23SH5w3_jykZP zyWUk35t%yA-W!n#$V1xIItADSF)i(#zn0w|e3%nK+~m>~k~0d*FEk=(=UpRGs^=7M zR2f~8a2AOB%GmRL3?>vb&XyP4Fir!kPtv}ddl%5D{0>SE{Z>hfiIOzrQs z8=LWg0yTyHWkW;qmw!($t(l3EBme3a8~XDRZCn)qDpm}EL@Smr)= zoCYfclBP84@|l9ev>&2*XCmK2Z_*(xPt!T*?9FEeo7`Bw6x0UF>z%;=`NF}81zOnn zCuoY!DXKx!vT)(7%iK-e=8L*YUW9Gcy}hS2>$1{UD)DA5lO%m+h&@STYKP*Y)LF{A zok7AR!v1{U=>Ggb$5l4~PxZtmHe)CLimuqQ&N-g;h!?Z3YDpkro<;Kep3+NPfj$9- zBaK>*@rtC!#fHK+*8bS+CUpP!qXi?a@`^qDOs#KUGl!mZP1Vy&nb_Xf(u z5%mC@GFIBqq5z{3&H%%>i{?{jJD_$5dqC3{uM13nD%-67yHql9b_r)c&N%3EwAr!(pqGKaiw^W2)H2Cq0SHM`c^l0|XXjvW)l)hcUhou{#Z!`10mHk%nKR;JBw=u-I z(Ab6}qN4RYdD&8Nb59&-oLEeN!079bjX^kNLsO{W{XXCrXbri^W&|wH0_iN?6qE+4 zjSRX^bDcTCY?Q2Ep$``ZB4$l(DN3F`R^i^CRX_g+o97lA2nwUL524D%b8I1gxe?y> zAC@|{=yfv|rCUO2X?4Zc<%?lomI~s<^>G)~MFzRh+>pXyk3Pu_Jp%}OmEAyzV_?Cv zjE?tKQB-?W6})^6D*>&_-ZgEv`cKBph`@^ar!cZKSDjzT`&NW|I~#~2XoBtWvgMi$ z`6dWW_Tl;3f*XQq$Zj5eF9id}X%SO}8df_G!mk%EzHngNxGw%cUS6&8&ZB2Y zfaar;CAO>}an_a8f|6!3~vaBC!kVeCG}A*^?hgv+{)$S7z>uhG(}0 zL~=##E~p>>!E})`*!MUo2t(O&mMWH}X8?K$*#nK&s6g4ZKs{@V3RZgY&kFgdZCvx2 zxM7vUTV}g&>MObSFL>f`)TkKl@PepNLB5rh7iZo0x*uC~qW}DzWxSsZ_x%l&K)rX3 zbo*o3yOqVp0Ulve+q|H#I8p4$$CvMIm^fUMmo{o#ktXKijMum~d+IuTS;sb!EWrhc zrX?j5llKzl0&!9wXI$3#;flFxAdACr68);-U1Ri}v{5|s|wDek9tYrz zYs_!R5q_7pBz=@U$@+`Vp?ixzt{j1Yj1us0So)v_IL}~Ia1857+1BvTVTl>auF>Ibp!U#%kCXa+9$zeN zIJdg$`9b><9B3*YI(rFkRUT)pD(!|u0 z>NKRFf%+8?R}~7^jE_HyIJ)qAxI37{>0JQ39eI&9L$pdq z>jRXcZVzaRh^b=88sKf03|XL#-o7Kj3j&pv1HPV9%zT1CF1l@)mPq*iBTw7>Fo&5N zzzzn>M6*Nvr;mtPt@6VpS?<5H$UB>Uj-gXd(d_*|UFygdfCtconFFvpTIa${HovJC zef0*Ak1vW&|3S#C7#`^?WZJwK7eK_dAt%+p1vVq{i4OU*5usy($m%ef#vbPKma>J^ zHb(Z4dB;y|OhhXafX z9D^HC;hsdKNCDZ*uxoU*18u=yW?}&qGO<@?4xzZprQwdF>ljutq(cM8WM|>l)XC{R%GkN!iMW-K+Fqlj;C9jK{>* zf9*aVZ85{=xNK8~o*ui;M>KJ@gvnz1?q7K>>H1%FR!|lvtkWS1epTo5cEsuTy!TU? zcBe$#KXP|J=!yE&pvzbf;}%q0Xw){uf$?ElO;qeE>c&$p^9t|S1{c;jhU!}_uG~Qj zVKnL6KWfaC`rk%VnZo@g7xEXNxnMLkcHaW-eF1XW6vpe}hh&`Dc|`DD|2C|I&h6f< zvj2bdHNV?%H%=-=j5`#03VLi~#SpuRN;Cootx1DH%|&fK?Y zJ;UPQ^4p?dInT?0?2KHHiXqns4n!YAX3&GvKU(&1CQZJY3T9tn~|6Kv`eM=6Mwl)wj zWbF8NV&kM*C-A9^z}ef4HIZv30I{4Vz|bRpoH8*!TExeJ7Gpy}lF{?GnA>7ny%ga_ zCGpr#c%lqsZwC4?XBHf!x5MSwT_J6I{`aYW6vcxMetP&;UdhS7#|>@K?m0E>cmNb= zs(M%7=cC(~h>roVC1U=GAJ_vrT-0*ySW*&ps3#Xt4NKr}R4;M#$b~IMu54lar2G>l z8yl+=cEH*iFV2-o!sv<}sf>C2^l1;-`JsPYvoZan%KQDYOeWg$e?6SQH?RmN3dwPM zYD4$kT@*0LbE~CIy-#&>5l31u?^jcn%ZQkN5+9b$K&WBKVMkoJ{Ak2-RGvw+40ER> zRs8mJs9`rhULg0!=Me<~4Hs>}!QJROgi2=vp%Fn&8S1$iR;}RPOUU#>VoFax(%qBT zrkiCMoaw+%k8LlGWNOzh-`*pSTAx`^@&jbb8^WUDI<;Y>Y|CV1_@nS2l(|4U^EFwTW`x>Mro2j>ZAU|_vLgAYG7vnG` zCI(`s4Xs8xu`*It%@S_cZ-$vstp+8x8U9^+kBd`@kfBoK#mHNcv*v{det(<|ti@7O z1=G((x1VOtVwKnW#5dJ|;@V9{Iu2(vNNVY?)Um5 z;g?ZW(Fp-VQM!zbcZK7jd#D4|A~9nCrfeCFZL{q1jsPx%Ix$n3S)TCmss zUOLIK5s*(Qg;XV=;~^Z=p3QPQA@tUdzmJxiKCmmxXyTX1gv*89gpa%6#RFG(Qm+H{ zFcyPcpiX&gDwRmH+SU&&t%7te)x$r(qls4ti^icb+Od}&m|OMO+$ha@6eMo~nBCI? zdnCBjaTZ?T`pdN&2BqYu*1Kh4^M-tq=^Cr^6AXTCn{iTAiCmFYdE=Posk#U|yaUykw)H@DOD?Yw>^+?N&a3(29c}x# zmNwEJf@Uz{R|ah)_>f=gB^Lp# z9?sx3L|=_&Hr#~zPl22QYa{UT&{rVM7}_^0u)xy0(4kxPJe#e3`a)att{WjS>pL^6 z8wW;fuXhmUJK-!ky2n}zm5Wn!%B9xLOL|bYzPM8@1MO#D&Q$%R%5b7GCKX1furs-$ zRnVEtWN9~ts-ShYL*FO0^ulkIwrN0d0I97BA89lg&MU96d?^#lzkYnP6u;+GY)^rX z!8fSDUOhh3M^Ynyc-khcw;Vr@hBjjjq5nPr9`Ycl_zjJ1pKa-7Rz2vXY{>gS+a~st z>dXc%?n%o3FI9qSBctgPK@bv3an8|NRg>!9ZR9!v@7uA_O~qG%9+hoj>7!_-_YBu7 zgE-|3EhR`}@+tbt*^IR@@yJYsQT;cA!?45OBw&?~KJrl|V@1{pf8QiX!ukQqOh;_Q z~aPjAY`Vi()5fq7y5Lb?8UOv$8W+ltWeoCTcxImUqP4SNE zuqMlI@ZyZyVRv#f=d&D~?>7?{)lpXqE%9mPw6F?r!w6mC+D$Nzsgu<+e!wzr4i2w2 z5lXJc&{gZL>c%lZrjJPoTUn~^$dB#Rgl^RiDyg95SVWW^1(K}RYE6kLQ4Q{+lW(6d z)g{-W!#bMOFIgd=5azulymEL#-8|g}4pGqEr!?*@oi`R=(`d--3L9gMENRr`hdzLa z@!k!~T8(IFzuQA#ptqdGSbu8Uw6F=?^&7?3D#A8L1ypZ+R>M<4R&3o|6*bnKYaO$Y zF?tr=r{+|15dww_=>9dd#zBN*u9Qn5%jmP{`9tfl^yTzdTG>dr&(jRC-)Q7{&;fjDd(UIy>Urz+wWp-Ol`b2<3yG{mV}WQTnSCE97W%F-~?b2HOkZV%T!I0u4 zvPRrYkVCFLUp@NT^YTO*ux9VTT=Un>MRyzC=NbQnfz>a{T1Qmy(7`Ko4D9)&p6#e5 zbU|cguke-S)U4`#Zmqo*tlWWeJ(DFDELDui(Y_ZID^(?=Giy{+sF~vYoo?oOBtl+# zuUUPzq*GA!^)Hu3>Uv$fdPW0dm`hd-tFQI_c8cV?dS)$dLO;4fZ>j3Arg4YIJ@2cI zE_gb2EuDoF zVydzEUlgB>xtK$wr&F~wg>B~ucJ2h+NeW9@5IgR9-k(vb!=jZZ|C2( zOWBtkNyp5Y=5@A)E!4I?t1E*ylMNw~skln?&6-pzvNsNBeg?+xofu36tgHxrtqOzeN+T zZt{KGN&ip9cLFlJwItr`>7iTBEoxbsgASkVppZIwcc0Jeb@yMP0f?O6s zayhl&wW{I@-7>~`io&LKG5>8_RJHI6?S2jbP#91+Y0UP{xkdjB8>euBc4x`B;LkQ~ z^s{{ZgHfhh#;8^zCuR;c1np~Ra%x3J&Q`iFPLNGko>pafb99Q1nY^-i_8z`^+j#a2 zj+#~bV2ZQ(*}{g5t!PrrERlN({fL-Cx<)mWS;7FCL4r$^B~q0wUUX~nqGa&kb_apt z7);J~j75RnG^RM(e{iLMOU<0f3O8778sLYSXqj}V(=_Uq>p(Xa)=x1&kEv}e&m0M4c%#V%}u^_*gj}~8Em{s zoW+slY>|D#O()lalHY)#Gbk6Iienn(sV!JX+y z?x(U~qZ&ahPCSf-J4)(}=6_)4Z>fr`c~M)S4K*M@hrqp8G%$UU$gVM7H>ecj#~KE2xG zTLWf%5$&y)Ho>`g-EHH>WTCw>O~1%9ogd%wsF5{6u3Uf`7cu)r(1T#T)e zG6BC9NjM%}&^1l3UH<27Bf-DC4{dv^Z#Go{X{f&qg?V0Jt8OJZm8O;vO)H~yCLT=% z0=zVaUT|SaTe0g_qBNc29N_mU>gaBvwEhtb)IkC1qlx7n^kxoySoSFwN|U3kkqDCT zeQ~sbYx1Bm3h<1hZ`m3VUXWGpV5(%D#BCMC0YAtp;#|Po)k-yfxgkXpPxQX(Q!wOm zVoN}MrdWZjc%N;O4e*2I3TYA( zL}0B0oWqDfmgOUcGUmGsujM1uW`jbmTHpsh+{5cH7Su(#Zy3NK5kq4=9;S*T>YaI4 z?x6fEy5-P)KB|h$spVcLOqD2xpj_2DxBB4Vu3f1ycM0jO0~btLh~dW~t`Up(Q1@5e z3DNmtTU25dffCaCMimDva-nKasYj>2Uqk{p`wlXbIxxF$_~v`W%*6BQYiZ{_qhc#c zMopBJH)wotQ%%E$$4AFZ`+pdGsifyop^W#}d_K75L=_OrhH19*(z|lv$TEhHs~S97$Q9HmAkOtD z@WATNNk=ib>K7zxfR$Dl=%vHQN4};e$hH59epYs=Ow)ppmgC~A;f7Ik=2O7qYQ*5t zdo}CnWzb8s@?$VQo71+msi{)X*a;odGEf7{MF+pl37p5@+(|N0ORdE>ysqH|INy}o zW=fKn=3=jFY^n)p7_`6}QiN|g%N>+S)|Fa=u?4oW`(mIFWv?;2R^XCfF9~i4N}hn;h!d2I0YzTGkP}bcH7sGn3w$fQx5&$?4$SBOT$O+F{BpRO zxt%H8)ztHt-%0dy#%o=#9@c+;u~T36K@N}(UQ4|KFNz)owD=d;S_=s5Z@`!K#dRqH zSJW(s&n605Ho1rVex%<^NVFJqEV3KZohH#G4nahxp19{7bs8z(QJdu>hB}y6n7C zdaUChhHr)iRvNRTPZuoAGBH+J*(ph~-)L-;`LnT(Dova3VU=(y0!Z1?7J21{``cGl zpjD|1Ao--qlxc824=ZwS@NsB!@u`g*o-zy zu6?YTb&qP$7TuZGSnN?)#st|RZJ2p@=G(bd;fehEG5!1))B@mSeu-(Srl}d+_3>gv zyF*#&4PYkg?o6$;Yxi{ZZ`51ROq=-I{9O{Rhhr*w{J!{+bBEbh+$Q}ckSlq0hwRSehUp24ug zoosr#u)yCLVxnVxiap9bFgoDBs!PgeF$A8eUv=rD!ux++h=m<4D9IMoOO&|k&e9Rv zLcX7*45mhQOO#eYg4#5FlcER6)cH_mm~3-%o^ps^8uD;(Z|Dc4#;S4gFe}YIhK@NM zMW4ki--z-~R~?O{^%x!vxZ{d8!g{nrCVqB=+_U>RGtmDCSJHJSU|6&GWb-cUK2)Yx zb7nSZ$riN=-!G}H-}^d*Q&btBTLqg#OQ^J6!mW8Y%jmu;$00$G%5BuzC-Pj*Rr)cC zv+v3-wO29W0=EYNH`oK8!0ZjjmLoK~ZNvqn8&*$kRp%QeOWru@m+;pfcks8r+Zww9|3z8Y zt!2WZ6qBXO!6~{j_U2Jy($?8uRd;)B`wQ8m968hOQG6k)fZfdcvnW98J|&|5I&&dwGAB2$I=l@S>aY0&Mo`@VQ^jJ4Nh`VItR z)5z`U=Kf9X5e2TaoG8mTQ?+*r8 zTBTDy)~wS_2jsbG_RAr@X=%Kax8%~U*w&@azG(Tq_#mGsA!u9W%B0X>LVu0Gk{5Rj z%a1Oo>)5)@(Z&uIw?yfa{A#9)h_*>{nF6ax7!QlTTAx-6++}R&_}Fs%;bYsn==aMl z@*TVkiC=c!2k^S&zwrc}VQ69V&Js&AKX`SreEwD1(J%B;;`pf63cNW;932%A8JJ;I zK{v_FZ!Ike;Sawmkd#GFijMrIA-9p&`Q#=LP(du%?_N(8^#GArUb5ZkNF_(k$13+r zg-WE=cX&BeO#7=y@JGvBUb^M@Q))igy+ShALg(NTTB8!rIkb@)W!Z>$o*w1=4Hn=u zcB!7JET~)8Ax&W|y;Smz$obW@E=qdU>$Y)G?)nFWd(k|t%-^Jm=1FWS)hC4c2CJb> z*y7hyF2tVut<^X2x5xd$uJSKUQ(2q*P(U`Cx#?<8l1L3E+vP~QHgq@bOg4x%xWn@O zM(SUcs6{jCH0(sbp0nQ16?&z02@zfak=Jxg-L)52QQcwbS?s` z&LH=<-Q&0H5UdWL@?Yktt^&G;mN@4QUaf)|!|pszheOvs3u}>US5eWi(_o7j1TNZ& z(8s~E!%x*i1ntUNhF5Y`C5t0r&jnef81KcXqQy|$du?4Q?e>zGcU9hATe(?ZZ>je+kmQ!b54Vw zY=;tTB4|jt27f5QL1iyzc<&-DlU!L1UbP?edU(*OyZZt|`!uX3AkZz%IIeZ($&1^Q z;(}F*3x7Rp{P9z@Op!f=T;=w2)M6Veud~etQTdSQXKT!b1Cq(*`-DgmPqdPO3}NmW zZ>X&DNH6uCfeexAx+L4M=TMnn2B#jajlTUaBac+V3q5tvl-KQ;Y|tJ^0mwmX-( z%}bT$1penr?0iF)o!h$g4+B+Bf9xBZ4Unjgy!KK8Y5Q&8y`I|3#yxvclWsFDhQyuo zP0i{a42bLhys;XvVV!?uT1hn6G%7ldBloxsFfB^=KMN8IaLYDDJdPRxD91j$ zOoTD3+GCZ}GaHe(Zz2NvJ(XWxFWG7&=jUg5qR1XeKb-y8J9x1o?pcocnY&R-Y@&0P z88DcDvb&$)6e(G{wuRtSvfyI7CWg`WkD1cYyiunUq``MU%J_aDz|cG0#>N?5#>2g( zq*LDzUwby?-FHb(oCF+gu8e29R~h*LMn&opX0n)5_O0v8;JqlzN;*&s1P4xCAKY)^ zQi*c-NVopUQ2xU_ZWC2O49XXK7X5q^_lk{kVUJrI=iIZEQ!(TEA$C6mjs=N}# ztOiNjMKr=Jati;pY*Mkt$haeTZG}FR3Bi7A(d$3_;T6!cE1iEnVXrM6@O8Cpk2eWt zMnWnG65lDNe58258pn7levcO(dPXe&dRcZ8O8!T!#MeJDxayrtCs>#{;8P>yfTDm< zXeIeqQW`_7vfhDcAUohw)jrdU!KV(Y)L>~^gtUCqW=R%Ihc&L;%so-v#92;=hR8LCkpzEIE{ zrtiA-9Nyy2zg)htaZI}j`tvX^PrH5cG!ZaPYA^dFeY>@s(9xtDLH4!9(cp#b7=KpU zVz;Yz`d$}dx2qqldy#ujNF(>jx)h~;&?_vzg_ec9dzEb-o4SZbTEz_t=@dWZ@Qhvb zE|Y%WUlYAR5u!=Z=oZF03*4Zoi*g4R!9J11HP@8}5cb{T=|g9S8h?3QpEZbhp17(+ z1nF+7k?VZ$=(oFHhMPt`o6Ll1{mt>9Q`Pad5rcLNa31haa#L6xrEo}>|FH6QKrUTQ zE3%tRE6zkYvPFncqia}wCcj(+4o6&{7FC`uJ z^~WsXaKa?S@wvqwz|{zL!NL#26{B$?mp5r!r-%8(nGR2kh2KP_Ev2i)?9UP&Q{!1B zfO_8A+wUEgT2HdfJ)_g`edLjKOHpPZxy{PM+%@zeRAx_Xww@k2Ghc`+Rg1Y~JVvQV zbB7mgvRWJJRUlc`0pI%WIMEi3p{8exA7z6XZrJHZi{rdgMD+;NhS0XYu3YgE!mubz z++5zih_OJ@RJAdQ>&!DxFCJpEs%Cn?m69Nz;rX~Q%CHle{x}Z?&aDS~nA4A1(7wo4 zR(wcii8o&ToeFna&l-Of>~kzkvxdoU{*W;quVBX;SlXJq zrrh-?XeW5x@T{E1u%i`*nVIv`lXpOw(AS}vAdiSVn26nD5u=r7r0%?}ayPsp?zDh8 z2S2N)&k({&!?9P>;pM;atWUx-cMBU~tS@<9dT9j-&kUf<9!Tgdj?#=9egG}=I^71= zbI>1Ghd(3Yy&G{hX+kdC36N<4G&$w4AaY-qX8equ{ z$G+xE{~eh>-FXPq8{FkoVrDqzVA&|rh)k7i5bbqZ zRSkw+T!`&;mPH9@7iSVdKb8-V4tfwJyFqW&uJHxA`{~zmsrIzMc{t0L_` z6r7Q3f#?NfG5#KV7zF>8Hy2dBojesUZaiQD4d^W!XH0J~ zeEvX1tEvP`--Nq#ttQ!4L{?}J&DN1so-#SP;9YtD?UzrC0X-S=_E!Ze&y7m4UtAa= zkHS6oZ14tV)C>~2_^)5UU?tA283Hf?G7HEQDx8H7#ySQr1D<&J-8xp+TpBBxKZ_?b zg!K^ef7pBLu&UZ^eON(I8tD)aq@=q`O1irnq>*xw5{p*plvs49bSd54us}e%q#J&d z_uXf|zOm1@J?A?6I_LWKA9CSbley;e+|L-}9`_i}obhAU@0e^kU*k>WyAH;#>mCM^ zFRiNZFH&X*8EOtE0f8+6m9XiIBfc7RiIFVe$&~00u*INVotqGg(1~c_gMAtz#N4N1>j_^D16; zW9%YQCu=@lN9<#vAT|P2)M2C6r}St}Xf$LobS@JM$N3;n?j2?gwm1fD7#&VyB%PXe zrh^}f51|dhT#?PP05ZrR!8&Au%A9~hTSTUln~k(qiHe0iPcp_Y`C}ybwSbQY@B$4@8|fQyoCKwu!A;|Pt%@PI_Ji_ zd_)%*b1N*~Y7AwiaP&lbg7U$}lntFcW09FU!|=W&{ys~Tv>mP#zJogLsNKQ41yhkW zH_dOw6aE0F~*{uk%=OtY&Yqe0LGr zvZ=YGzxSR#%sYQg(QE6ivw@vg6UTMw0N&K``KEB8zP9!CNt}YsbtB42r(wZ6a^ug< zebGa+U?~SROrikG^i~B~tat^T=pkL*Vf+x0Gx>^M}a}|fl%u< zzVApUs3H-=hSV&GGG~P;kgh2k6jH)SvokuDoO2(#EFX9Ql6=IE;j-7Z-nq?8ixpos zL(@qh0{CGE zb$CbdK+Wa6G-yR^t8w5%XO%4>Q6(bXKtlJTgLUz8_5z+#b2|owfGJ zDBYw;*_4eMnQDd``Q;3Pj5zF3zX_{_G{>_U3%gkS$FjQ6I2EQbb&cRR z=Y|Rz2x{k^xmvHhfJUi4RT>cmvNoe(Y9*T-+I!0F1X8aqF0TY#sTCG#(MIu8&ik9o~ zx25meh7@EJlhb5*h>y%+DdO#bnxm+5_eDVmcxAMWQ%_m2=beRWY0 zV@~HRaf3GA!|2@@h7>`&Q#sQkp9ABZ6N40CgUQe%`VvGHB2BczoJgxd{5h;`_&`f zgQ<7B8nT99Ue(|> z9=6%V=wo&Grv)Pr^^)D(ifg7!+a0zZ_J9!0)cSDH%7N zv2a-i+lC7y!q;^1c>qOF%HE6D=fy3qm9W`Xxf=VzLcLDscU$kToT6husNtFsf_=6U z#^{&4mDv0}i6Biyqs~~_P5FsUb>WKlhV**T4230_(E7S6aFz$Q3FcSw|H__%uWraUYjp1j}mnFqmPf;9lw}vLKEjF2sC= zD8;9~ORR!{w52&(RZ`n_@1ZLhT3WB(l7ezD@{?mUw+H8^-Bw?5>M3YFjt)=QTM>HP zH9xQOpl+ZIaN5k-B>B`tETNVf&CUs;P=%?FhSXzPV=CP&k=dc>Jyjokk)o;}K~O%K zv4}mrPqg`RC^91yQA^Mlhk&z0>pd!evo487!yL=Eb?r~G4ndpm-jO#eH23&kKnhwBWXU043}%y`W4@*Go6CO?!p1 zD)wT8T0VmDWrcC&uBu`Co`R0y$FPv{t~oIq2bP(;C>~4wp=wCy>S_P;;T4pKb@+nO zK2Z>OQS|MuW^Ze+a?XPcv!3`zQ0xHA`wu7;Q>qw_lVlU#VNfWhJPkg0pn21jR(Nxe zP7Tc2TfQ*_Wuv+ld!D#f;NNHRVyie)6LNjrSJ5_*28oQ-htKVL^sw(mO;HzWAgus- zhA>9b3xXo5xxfRytf`b!J`V~(XjUollgwOd0-lx{V z>wyHrRAxQY=K?<1R)WazZu0XY=Pz$&jJB$Mm_W%+8aIO8V-w_dg4>i!8?WzwWOR7? z5tqVIKh)ZeYN8ADnBK|=pc(eWh!$Kx-k2hlcODSw_K^Qu1ND1%EKH~tfA~7!W1g=S zRoA66e^kfgmXEiew;snVZ8`6(Y61EBAI^|+Ggf7POnsCenA{a>?m=q?Q;^XAK#i5q zPx7Psprx5(b*0p57JchIGArR*_!pIa0fzR)BtDYsxchrL%47as;PJlIT}?uG=T#nN zW!5BwLNgBdC_|w^P{Rd|GCrJ9zMKWN%GL$K)#|ttmdw%k_LHw5yfrN+iEluPN(5#; z2rcgJDygzuJlLIPLd-2eWXLe^YnpknOr4t*0vM&wE#?dNvHHF1{g{`^Xx6tJZqf zELKL|OzPd#T+bYyz>@b>col`^)k72$0mFA0t@c6zt)(6NyVE9*r!qOs4%4A2n;`Y> zRfMQ;IrK?kTV`x8X>ScApkoaDv0$_wSRPFx$5P~h;PHD!4`iax*%ztrq zys?)wH}w3%ti>%|A%+cI06#6(Odo}JTW^dmPniLC(^V#w+t0@L>&01#qwab*eql@g zjR{lt>bRO}+HO_A(1I-yU@QtLx?P#E!Um|jbgf*VJ{%3|lG1z;D8Cg*W`iCSmY63s z_CT$Y+Hb#f{yhPgJZBAEUmp_Cju;D0$ZNIq+%KiuE=}qx>IFv}P3j^%KsX$uDsf)a zr)JF6T6H(4Hf%_s@uR9m{Wh-RKwT@P{iylH>zy>`u;z)Zf<@l()~%+nfgxQ5=TT=E z561$=N5(U@$>S)}yGr<4-Hv@(EUzOg8FPKMU5xRi5x$E`&e#qrdq~X4jUD+(1VTf_ zvmU2hELW!Z5|tC?7r%8<(Y^PInzY_oV^VNl7G;=-%*vA80%>~MPt+Cht+>mJ#iu9p zi(K}I(jq6TYR}x?VF&MwXzH%Uxn)5lT-^C0J+tiQPl~cRUU5cliy}<-D@P{hdOy7k z<;ya`bvRTyj4stRbhes*pe8TO691iu=an$`3`Wl|j!OppHt;~K`n=Hw zo#i?>gHG+vMyaMCX{RQhx>-Js>_cCkKf@Di5zSmGriP8ambvQ@oQV&&ZGUln((096 z0O*-g$f!p8yAKPmP7eqTv@GbEn_1(LVJpCXQ+rhbhWAj|n_2rRy0G{Icd z&fc19cs-I)Z)YBNwbK?*UXt~Cgu}Y!1TVYD$s2_(kFM@(!yJRoNM;V5IIqsw#Q54B znutAl%G!S90>P;6sjw@DNcxYvk-ME0m11zW?$L+j&E^x9?cI?PqnUmb%>kVDPf(2@ z?EsfE8>RIu(fRL5rOTC9s9ueNW5Rou6i3o2?5)!#6 zY--w;kEEh;x%ZU}A$Mnez@S0gwhfqWI98*LH@wC{sPuuKu!~e^ugly`uZTQ^scT|M zSGW;ND^FbZQY=1*+P}RS^adV#dU5J~JJ{^FD$D<3<0}*8u4T~`Rpb=98hHQqO`P*V z3^ij_&tOkraM8iDm~Tzfn}JpO#hu=!5E&Yy-UY6Aec=&Jy@NygL~O_jqssVo_$;AC zk6nBzDVS31-CPT;lqlB8S+(2Ys~AwsFe?4q}sMSVsfV_o4yHhm%Iaqkfi5o~da zHBD~)_}g@m=ZDX!b_Z1-$dsyUoHJ5Q(H6?ow0I0NX;!OfuXy9Fa!!RWq|q0O-*NQP z*CGgZgB7iHMLxA_G$RyKp_PvOO;5Q!?`S&xx4>FUcxfM^fUM6jQF%p9wFQd2nN-Mx zmt&s92(Os(=A}P$jt8`|pzE>G5Iu?}I4q;0dooLljMVug0Bj5sC8}TF)=~!KpO1DX zNVi^S!^6~v&!DU{Hb7XOVzyL&2GUr}=lQ%G+Je)?Xpb9cAi-a^rO)VvE%?<)kG!mD zn3JDM+C8f4VRJcV19!9Dced-Hjzi)?-hz7)00#?$Nm*d7!8~MxB7-9vQ^N!b&7Khw zDQr@C%pjB9692&>rrRu@PS}LMi0W}8_XHV_W5mkE=WjDul+yYXXXMy!uQauFYi==ozX@ccBny( z4G}W=3a4~^(PK3v6@AK5x0KJ&3e5)9oK0@=1*%5umTM-w<#Lx`yCmDPZjFT zZk*z`ySe)OM`pow;%V@qoDTzI;`d!(MBt!>q|g%v#TR>}&emqy7HIoypBBiXIffDk zhjT;gQ}4b7?F?}zZ%b^8R`Dx9IKU^)ew&<}%H9aVms!=5c*G%giehl&_1hfrh^=B> zbpB_vLYKx+^2c7ccA~?rGFU(&u+nx>o2Q_M)gyf^?|*|D*RpeL%h zBn)yPjcD=#-ikplsJlru(gKR$y^pkp9YBu{t)wJ`1nioXQnVUQMUpb94HH`E%Cj97 zrXz*)8X~LQOfOzZAu)h@Ww7~!-Ukz@lGe1~jbeAiH;G{%GvJRx-crUqR*kj+DwV3A zd^$WX5%Sk5HO}aJ=JOcPWo7JA=&y>|+uEF%9rGSWQDhSq5(g!6F_vnn-cBpRe%a}W zjH4*T1b_HSSxksX;pm+{$$+$@FG@ih|3vgjp*f5Haez^|E%oZAuMzRHG&i}I<);Az zMm#f-9l(ZwIAR5M>ooS2+BrobE(02moI=;!0(_CvRn;g_=4KVzDQ^fhog^+6YSR`f zicU4v^OS%sQAsA?X(s0|2KNjI&Eh0#ip9z$7_3j#B|>lwBXH!bxmM zo6o+4H@yv@>p*0Iua7k(xuFe%O#^Yzcje;e-b~CAFHAAzm^{*mQwk@QfxrKvFnX(? zF!o5N-riEZP7*%n)$5h&0 zd=_>CFCsVWY9=MdM#Sfqn8o)|hfO0Jvast4iEy-DQtu@dC@JH{QzwVGWf@A&=|?|% zcSLqFu9_j~@IbYgDrZa<#ox}gSXFLG_|1lo;=Ge&(8vBUH;pHo zC9dlXRh#mbET^k#y>q>2+Ecb_rSg)BD;q4bS@a(*5?*aeIaw!39*fH^zRPdR6VZCyxX#HCy> zb?F1C5(&c!N*_>Sc)409ez7n|UPUwp#3L5G3e{Vy=52NfcoFT$FX-gv9^IE8?S$PD zyK}3N#EkM6-ncs|U;!1v@pg&5F`SlqmmqxWmqjn{0VRrhk9JQbPW1VtGE)1)@6TTF zk6WKh@7aG+Z8@E+*Y?CFd+%(WTomKY2FQ>S)}D0-k={1U$FoA;DvvuAn!FO3qXqk& z?i>Nmy;MSpG8was&gXxlME+4QjDj7y4F<>`mK;xhDJms3~DR)yDB=hXx zh)iml58xHMlto!>CNyM0DeBsMwwTdvZ@|nQcEhqz9OH)d?CS}i@1McnwVg(REoC0X zonBtMRpL|F^^*NFS!V3r>cs2Ww%Ps>O!H2ox{_YPpcV~}S82}s?R}2AZ`1BJ-(>3C}2LYu83bOqh;qv-2%)D*EOsIYKfkb&pCS(&>Jg z+q{hCz0_A(^3lkLV*SER3Yvyfyx&SAy{RN|lOrYTT!I^c7cdH$4oe3k#Z<3np|B}v z%6TekBzO4tjGZ4eC1jx`ZZyHa3)kE+I1Fzs1xXkPyKP0THCdNpkBr8e&|idcon}q5 zOf*v~Csxr#jz5j=c@-v)*h+9e&@kfdXY#jJafM}UQ=i>FnOcTam8R&H_Dl&q+p2AB zF3FfQdqGf=od4?CAXVT)pFu2~Bd+~) zXsuSJs1pRxB0H-1AheF-tmZ-|KHEpg2Va`(zJ5Pr>k<9M&ewB8_x;4?e!k&ZTB;H0 zTNkwt{ucrpVGkC_QZ3`cwqTZ}@a{W|v56h}A+E2-;W(cFAIyYD4 zD=DP%oSt_h@J4%hL@CW&unr7Hk7NXad5JHVn4UcT%4A*4ipO1&V(6$_&HlhnQ7RZ| zt(rOuMJ1i({iCS|(%-W$;_&nX#1HlrpBLry`5&1Dt~GHLUL^wiTNC|3zVDCuhV;!s z+&^RrA{-3+{w4Sa6JDjKdILk{Pole3DEhCvglZ z6F&S%R=ZaV`3WX@S?beipGlAEDM4lB50p_8jYdF|Ll+1fahPtza#BG5y6Fr2Kn8s< z?@SA5-|r(QcEp0XWmN#gp@>%-=r~n7)epS05&8@-!tSE*gYE6o*?X?mpR4SQWL569gQ67m*#s{>I)q<_P{uTc zmgadZ)}!^${4ys%_2wnB6bukyUcr#3FBf|ya0|jC(pKb>3n8`|zJXt6QqYEreUSSl zfCTo_O6NTrU@qXik!4;*lfbrMyg^z%FitBSdF6$iXui{##$eecDzp>*N~y!Ar3Y_7 z^eq$q!xOS^YhTNXkRSMdTl>rtJ+lH6g|*IxsRI-!zk4qQkD<0kO8Ts9qse%qz)!fx zg=g?s_q$bj@|6N9!43)9r41+Qm)qbuBUe4JjuRr$d8EYpPyLO(oqX`?Z$RLfYIyi~5Fp+e<%X4RnBF2&IGir*6k zO?TCoq^h1(wgWB6+I-4=`X0flKr>5X)^$z-jC;Mp=uI7nu~q2XkaY4)WAf)x^cRKQ zOhXL#=a4F|n)))iOZU`Prm6JLRfHs2*YNcR5-@==!;ywVd|T!i_D|y9Yr5`v?zwb4 z2VJY^bbK-PJkIe!9qhW~jPGHZA{$4PIPh|Z95&ptDcjlM_CTK0Uzi?-!A6Mh@hLqW7pidVlw(~yb%DJ1$$x5w93)y-um&|lrbc$u z_j5qbrSJ2sFET_H`qob;(9lpd-10#A@Z{{_zmQs(rT%jIM}ELw`j+=%)*5>_8=L-NKJ2{)gJ zBg@cCyHcMc3#5%=a(81&2Ts)v|*WcDV6U?R>T4i&9bG~0EbeF_|l+! zKLY|kFxq4d#W@7HbEKVMu_tvxw~QHo@JLMo;c$yRcu@j15c?o|yGG2+8PFh-%QLzP zt&v*A%qkhW>?3ep%~z+i4FHyr#^^jq&`MfShueoQdWPG-C@!4MB9rMTBwM6bK z8$0p$D46P}IpgoUgfDS7Bw=}D#0m)>ZNaraE5ZiH?E6L%WH<+b_kqUAvDsvi;Kw!y zo7*pFcCbzu1~qUOWQq=xkv@D0PBw0ng|mJDK=s< zVJAB3Svq}KwD1+p&1edD6je!BVFYHsrM(i&{E8Mf<31{t=Jg6f6APcdNs$ETL8XotL!L1;1KD!1Ek98I zBrRgLLf)F8I7qa$N}epGk08dd(r6aru}LcvR+eA9n6tsw0Ogmaq(eSFoFhsq_o}Qo zJfS5Cn9ME(g+8Tu%1iy)uc$PE=rzI>yh3~fur$(uRt57-?@Lcrrxp{iq0%Okj1(N@ z7p?lXOn|Y)(=C~I@?TFj-(Gs(BuCY`u6gsXSbttAky#Typs%xlMS3sI#F!M92RD=<_cGBZTa#fwj9vANY6qMO=H^E#8L>H zMm%McF}`5KcXt_vharpmTE&Hj)d9~+tk#|!7h4jx-|&6h;DZsktN)&V``r1|%(~D< zMML<^sQ7alzVoD(7!kHOyYl$#x77)4UEPl|0Ze(hoMS=`^<`X1cyiGSb28Wg6#P`1 z<%#vU6U|);J#LoZOU&lS=hjSWV?-UbIgg@E8eO_;sU8gZJAX3W4X!E$+A6p5zV66l zNhKArC!JEMjiYqX7eLE8Kp&9+iBI!;?2rV6ISoab5F=`(X$5fRZS5?96gb`cL?F-}YM;X~&XB zD#YRde+!_oM%)X=AhNiR5^=TfiSX@#B(s>%3MVgj@LRL2=tG&623qqUr|VO$PR56d(6;+1I8|7U$P^f-q0JJ$NThZ`5BF;a}Tpmc8CiV>eF9+Xr=CdL=nz4;yWC9S4LYzIMr0O38yJ zZ&BvkPbg5kBW>2&X1beKg;fj@FD&fo3}U*c!kRnSarII$4TSisN6p#0z|ussV^Ib} zOVrkAn@Jb?dTMv`E_bQvXt6$MtXQS_OnXFLNaiplJvFR)>pvZk$l^{Zmy$QAyXkVw zG^>z%fuB#@j$<|&%S@3P7k?+h~@ zXETo3q_RMJGWHUqy82NhyfCUw+Ve{D*WVIC=JYs-AgTtMjDVtO2M zU)L~f-QQt;s_qF{oy9va&SJyc&HLHjy!l)gIT#mKavybJ8Jp=pH@6M_QoHC&3$eD7+c zW0{z`5^iMu=(Nh$wasH92rc%0<5ABe)6mV3?Q$V_zE%F#K+{l>jUyndGQZGN5c$|N zX2%b{24Zz{ODQx*P^!Rk*$JD|Z$|*N)di?ndo{fg5OoCM_3RlXn+m})gEeavas@_7 z2h&eW%kPdV7tA?7=N>_&$BfmFF1yye4~*884{RuZT7F1Q3Gt!&?p#1VGtbu2Yjy zWG%13hB0X^{wqo3J+{`5*2xqxaAR-x$ffpY%kg`xtEMvXwb9--j2xiJ@TprBK0~Eh zK?s#?sKz*n8ql!Qu_j~Ev>s7ty_e=W-Sm8wB9&QL!EMaDu>bn**E&QcUFmNBd4b=A!^NAHgtekxZ&6PN z?VuItHKnodw+Z#+uU?Z)`8OA>3R89a6&VwJxKt-PmRZH`vnHFNqZm~;T$}7dVZnW- z*{%Se0jq&i>`+5!jQ5H%?8kDZF!Dp^o^v9OI~eCOT;za!4CaMkgST9o2BBAi>B7#K z`%+E&$Ge-t#~0@rcD8W!Pd+hPzedNhVnShK()FKfiC@^H#xclCjrXUqRFi0KcqA2D zKF5HSKG}XjS{EG1HLIyeLD$HtE$pNy*nS14DgCgh0;h^18)&%A4n^)%_3s}OItoD? z?#G$XajrHicr<%liQ%6ux<$`JwR>rL8a(hyz6F$%9v-)@olr#9(C0re3w8@3ts0_5 zhi9@bJB^jfAZ6EWKN9Zu{?rh8(YJwYA|_6s2kPSOU-tCny` z53#{uKi^0T-S!cM>oSc*i$<{{HgvyAjl_=4gmn;P`&6g*Yw&o0Z(<2`(77*0qDZAj ziCpxp{efKs?=(&#aNla77!o$Q30k99wJGZf8uV40G&AHB850QOO}ZpT$FG4D!{`_X z4l`T$Yv(EWbGiWw^l7eu^~h$-8bu(mbAqihE!VZU03xg|;!tNnEr%P7QO7aFr2Fli z^M=;C)R3btcS<3_s))gUsDsugK?WM@Y_S9xro1njm<2DD4um(F)Bz@SBI? zk`fQd#{$n4^smT{38|S^oA#A(d^okOc2f(Q2zMDf0v@-pe}$HG6WH${Vhf8wL@iJu zqEkY!$15+mAml+5!uy|d8M9Z%QW27IE#I%X^(p7tQLD)zxA!0@3VS~0ZbyI!*vf?A zjw^>rMhYA5C6I_)XN#xAtrMk;#(E&@DryMKD~RM9t!mOmJS3~e9s0ZjY%g*rm8g*o zH(@k-CAX7M2Z9W}DZZo;u@Pa;Ck#GHl_;@=HeO|2NI;==f z2`^-$g4aIwlhog{t;}o)YqXnJM_^pwwbV6<*u`%GlISgz(`$~Bnqme0d$Ev0UA)#) z=WLaE1}wzMQC?xgU8{yM`H@Cd6X_9B4?dz2P}50e;fs{orun$=F?t|v zN_%gAnW*J+rIx||uB2Y)--TqEYzMSZPDidN*X+kF1XZ}Tsb&+wL@qZUqcn+k39lsJ zfU+oEi+D?gO+^f8v(tOtAjJR`$r!QFXhI5t<&F=h#FL;v&V((o?6>0Q<88zCDNGmENk1qq)5*LXN1Q$G zC%?%jqQi5*3wa8I`C8d;&R)MuR-Hl$lU)Dpac4r&ySf3BF2H*jU2oDA%%)_br{qHg zNS@?&DsYEIXt4>2TPl*ZH=*lAcX=(N=T%cj>~yd%9)1P08}gZQwg4rH;xV%N3Y#RY z*(*J!l{BNOg-O;R;cxV%=$eLoJn)LI^_z7cQ!>6KUCfp3P8Pc_ zW=N3TVxovp#ylmIYm67PV69uPfbkxey93c>GCQ`Yt^69o@LB3g6|WZu(HQE>1hIid zL3n6~HM7()gkvMg)rK_Uavx=%ljLhL6*rsqEi#hLIDreW$Ht6S1&WL%7WhI5E52=` z{ScD!CD4#tD@^)hHbn$h{}`WKa}b-<8$^%G0+T63zL5Tr&CcgwB%D9|w&5Yr5c|+6h6jOp~ znBFc!Oizdx;vVRH5`lO3rMn;3IbGzn*n{>tG&W<|$8Ao)9cHUmKppJBM;*5OLR5(I zs{$G;@tcgRdtW=Q?xoQS-dlq6zDFIia*d^N5KCXGpy$u&C34qXa5w{G9=%cp_5BIL zCMOgQjq`DZEBl_LEH5x7;mFUcA!b{eC5*oB58~ZsZxnQ;$TFBHSrCoMo{^D&ke!+M zR^PQ4&*2eg(DUIib{Rh&|0a=dW+h`{31#C`v*L>EG1Qm?dMQ)mPy@C$p<^6ffcVjn%5#GcJ`e?#oU?V zj6fc4Og|CZ$fpTE0XsjSgK<;MzusJ0qd3Rj(~)(hdhar`VqfsGRpF`o!=mzVdce-n zyNc7KT;OE81r&)=UB)mSsbBDG*~1iMVfw9HRT9*pP@3d?x8Ovat`$4*TF!^h)eGN_ zzJJ;|W8T#Shf_{l7%TTg4XI+WL9+msIkSYo6z}Fu=(7C$sa3D1A=kTS{0R-g!nuN$ zRvddCGqOirE5gC+R8Ky0_+U#lB!BMtE+e||RC4}>i0Ilo;nz};Ms$yL8=iLO)e}LA zU6pTB%iN`3Khqqg<-yjJ7b0ivChO1i`kIWf7Z+i1V>a{U!4&b9tIDCX=~EG}XFZ zI9*BgLr-TuzJ;D1OsbqPo6o`QC|tGg3~Y(zeBeH;w!MLx5{33doAF z(l<~w!JYQZ>#m?-oT~5dP1WDj)N<#!9-hlT@dh5(RV>eV=x{h#wOc&nA(Xn%vw$^J zf4-WYL^5PIH{0OuRQs-Kp4^S&CKQE}At_b2YQe_KB_YmczHhCuA38eK;5g`>xb3G&t7}Xx2_|+ z$0sb5Ua14sam-~GP6CXx4K0kg$@2wku$(2|`=j2}om;asdD|@{QE})Wq2VU$^$&LF zM>czvcsCciem{q%gz2YUtp)4em>}-qH-2*+e03P?={|kc23*R61yr+s6uEF+{~hYy zKUlzl>3mq!p+7c#^P&2RM||M3!-9o7JjW?PDu1$Iz_cERH*!T|pWv6*rxtou+^}(> za^?mC_Kxr!u$K)ig}7I%nti~JfLYdC->4(-|Ks2X6vs`%FkQ8Ypl-^E{-F5 z<%e7stPg!pw2vXC0th6NTOhqpApXJ7j?^T6en-9f&Z6`kE7$oR_p8Cx%KAp@CYS2C z{>k0d8;Gm-F0n2HPovA-;Gz4U_3B&Qc5GAHJ$44%t*`ebZ*GewcNcUftXzQsK3-Z$ z(DiC!5qxFW+&>pF+q?SyW1NlN956@oMYq88FQwoD|4j<**csTVuDf1tTYf)!0@qyo z;(V_8!T7`29n(Yj;gfQZpM_(FL(|tzqMqvd9Oj$imDJE!!M5@;z3<)*^@nc58^)bi za6rGmCgWNE&cNCYneTidXdd;J&Ym^`Cpf^|7aJSm1H}qj~u9v-6BF{M)4$-o<8InlEif_A)cRWtn zu7=KSPj}YzDQ+Cc>t1j!cwY?{Zov*Rp6%0qzuhhW01UI_^X;KEiQaelPTs4Pgu~2^ z2T(l@FSwf%$#?ob-i?-?p$ne3`|ifB3tl(C8);QKmpCK&Wl3%s~Qx>@kObs-7_ zHSg~`3I;XSIV5;%TDVTH-r{r=4e4EaJE9%C4PQXaX99OB@KO&L(fFF7?*&ubuk~)d zegS82?FJ`S@B3`;o0nG}jn=xkaW7N!mc1RLocM}yj~3?0p*n1WZFUTa-e)eCjGl*R z3-kO<&T&VlxNlxGPc@`*R^#hi>kR>OjO6CH=HL<1EKo7J_VN5PxAq$Bg5cHVX$j~; z;gIuGka1!ANXnD>roeASU+{T-dNzSFxS;lx zc(5Fio4`A6=@MW`LpV?QTnCMsMC@!;1l%_5s_F#FMqfrcXfm$vxDvXC6c276@OrP( zf-Y@VbB=)5sf9JEUoFhxH|jWDgx{#UigPY_9uf;Qk6&Gi)>B zWP#k?!r|rMZQso9texIcyh$we|#7Oe&nz< zF|r&;vY85;@*ju)b{^l*^B?~H*ALr-*U;r{p>XHd;A7AA6=<4zl%BjB>&8x|Yt+2_ z1=GfI%mjK|&V~LFNblHmWKwd0(lhnQcw|trSxz)aTQI=;;Csfxu_!W)zxqFUe-A&) z6MOZ!{&76vi2Fi0e;eBmH;(jpX#$NQmG4Sn1M7F=zdL8iNA8!o_~CpdQXI0jB9a<@ z(3xWAUyK(v`OgzWCmLj|wkOae0GgWpi(5c*`t!Y(p_AHPV=Z^L(((Oqn*T90R+@qd zy>1GmtL4NhLA8Ysv5gqEUV!6!FNAZ@9W_4`ATq}trwPAN{pEy5B{XgQrq|y9id;>J zYt0J`ek>Rms52RWe)1ipR4s6quYjKge>(^c=Xrd$Ht5=dxw7Xu{E@2uJe>?ve=&R~ zR0mfp&D)z6kS3a&Gw1qoCTG;VoEzuHbtYPrfZi)lt2(|CXHB#V7LqgPk!r~Ws_Xb8 z$B`1u$4l{U^_asX2xla>AO7mg58DP@T?}l1JiG4RczpYH2(m%uX_dELY zcmLIw-Wi!pqsHeE( z{MA}ON*0-0(3)s&wkLM-(}SY8@o)5JR!7atx(RNyW`0oDT1aFVw9=jMYwCOGm3(9~ zaw;iEdHwNdOVl4&U%R4n^#=O$D1S11`!8OJM<69q4}%}DX%oKto1a*dfJtuJh}0M$ z{Q%yb+Y3khtMUEsyS#Sqh1ajv{Re|bN)~7uFy$}4ga5}B(W`nlLnop&2E2Dx{72mH zR>B|cUzp~n$L)l=e>72ka}7Q$CqCM=9omw#qijq*vK_gUB%$wjDgORMExZZeHK!S-KVe)H$|HGFDTzkb2q5L;^8J#Z9c;b$0^8drv|7|(U=}Zj%oNvD~`!aN@ zzX!EHMzFtfl4V2OZGynP0ht-VDb!v4{>194vwIJ0hymzhp`f$Dpmi`%C|wkPiNL zNdJeV5(w$~Kl1f|Dx?G1!T)$jmm}RJfoe>k>2uLj@IfMb}gPT^fC-A&jg^ zg1`Ix2*CgEV5i^e{98)?cN71G$m_2Is55Li{Hn?PmZjI!7Lt(sOF*ak{|5aZvp9Jj zAfP||cR>Grwt6R_-l>G9*Csz3(4+n(0sTigPVi^tIES@=8~Pn*{j+j(C)(0))&DGW zcLh?&6hQ9#BWeFtf}IWi4^{40(*6sN*9gM=IgN1r$cD@))$w|5$(h zMWV4~*t-0eSo)7@+|Q$ar^a3X?^ok;|13!Vv~c;;TH^l^H4g1(Rrg=iIFf&>aX-@4 zUws}>#{mre|9~3zkI4H!=5fEMacKW8wfsIg{iWFXe?^V^dC-4R|FplJARr4{ZAF$_is1KE5^7m=tI(#^W|QTm$>^~n%yXa zc(2HVbf^woO&>hs;OzFhD-v_vRg_%u|BA~0We)|=S7f}Y3p9xQljTjI3uU0(>&!Uz zSc%k3lM&nKhhJEsS02fxjq<-cs6@I;zZED5+!P3Cuwb|o-^BpYv+hn8!@cXY0ID>) ze>#>a{4ppLm`>Bbyy?Fx(Qpmh1PSiZZ!M6_F>FE9(L_q(=II)B-KvkgHd;qI&3U}p z`LA_zZZ-Fm^<$eDJhd;Hj*K_fMoI|pN+rb%OY*qey@3$nGnL=V7eKK+Yfu{ez5R!1 z81zdoPTW6TDq8xbhvO&4@;?t^yxULReob}Z?VKsUN|5e^2zctZU#>+t1@&Du2bF(3 z2%J{mdoc*CD?OkZ6MEN1b=PF^1NOMqxPLm92NYe&es1kL5iXUu_g9qqrP3)W4FVF9(%sT2-5}jv(kui70V$D^4h5t` zN$GDc_r7Pl_vY+-IQw_}-FrX((Y5AU%sJN_^L?K2jAy*_b&=go61?sCv|wLjbacT< zcU(;GsH;+TA+eS9;^Ar2!DbLxs4NY01-!0L@fiaYezT#1c@f}~)H;2silFzsJN5^V zr5onin^2xMYp}Vu8Rfq$nj@+Czpx<&Cv0Hb30*X^Xwti|}}wa43p z8Uc1uNgAZf+M?;+d|U;tzVtiXGV_m$z+9+yyn2|Zx<)b zD1g$Viv@lz24MbGi3b<1ERzbdnMp_=!@l46%~^SqQv=^r{nt8^uIqW&Q3CGiU58Q`uz0`Qjrzwq|9Z7vJe6+mgxb+0*aUY{E|%SV?b z%UnNye{kTEo^&{rH=;#K@{AM!tcH2-;7htp1OLVaF}^TJI#i@^)A)*pDTzZrR#ugCkXI@RfH``VLEQUqAnyGWaUgH~^DKS^l)N`V z-1`S0PTRRe=8|}LCDf=nE?b%S$25Lw4lu9-No$Uv;8B8ea#pTQ2vN+oHS^UzpMZr_^6m(#RaHbBn( zRb!31USnM~7C120JHH0;HozeF6Y=Y`d9b1kAZ~Fp#M^#|s^0dSs(y3Ce~xjWNcjuI zJ#L8j4;ka$e`Ab${~Y6gjkxK}5cmEeV?6jbiFN6wZIpJv4QW?|A^2yj`}#gGG5N)J zCdvQ(jj)^21aR~74?VMQtrP_?bxS(rSNnZOt+Ba`?1upU=5GK#{1b5Cp1!sTjS22{ zzqujcKP0B({Z@>>xvBhu3^Y;R{A`V1V`nEsv!G zc1Gg&S{qnk?|jyHLz~82(AVSNnkaW}*hKj{@pFj(Qq5Ns%|9>qM;F>PpP;k>b2R91;<9*U|H$d#a`~cjGE$-~s z2ldm;L&3vqRMrj3OGltJ&aR4(`KL#V)E>AMW3)c}Zm#No-^skZpOl{8{kBL6Cw*jL z#!a3~YGJ^e(S>0G?!92igc=q;kDd}dvJzGfwYYRhECvmQjA$DDH*;bRvih$Z;sfXr zfBgo_L*NVFy$AorgZOXFyJ1+Sf9&EP-@q&Tw#$FK2Oon@=3ACr^TYk_puc|Ox?^I~ zd}~xYwj=%SG{Jvy-7!0#VSz;&K$eRJ_gVq~9~?E%NT>KN^vD1H1E9I~iz=s6#R8mC zuLQ8`Ch%{^0}i-M3DVd3(>Cx`!u!^A)~WjA^^g(0?o{n^W-z?x{*C^*;xFyg%MCxz z6E*Zw+qo>#qKF=lg#HaX^;40ph=?a{qC}7ybs~ zJ3kTs>oE?(@6Y;-B9FXO1fcP(}+$A5$Cf;;FS{lI@v^j}Ckm>K?=UoVOy-jK9@>$rzpNBgfk=D$Haf0f?B zj#ezd^Y&k3{T~w;e*eKgl=uHM;8A}A@HamJ2iJN3q|*3RdjCDHlg57m_z#KctiXBw z^{ljO924IvWhYEl5ZLO9I9vz>w(n|GkiHOLb;ZhI{l0MhA6PB_ha(7JbOKB7-;eHZ z6*(Q~Jx9)auHFyOk{V*=s?OAhk5#GkfNcRsTRp8wj6 za-AMN>TFx3>gWbb#E z2D{xv2dZ_kfXSz^7&T>?>np39w>Ldye*XawXP3ypwnrez{Gzw~9&~V^U;fjL@F4!% zywiU@G5vnt=|6GETsQCZn^>jOnfc8k6R@W8eYOV2IIs;4aTxFW{QCn0uA71acE4X2 zkFQs%Zk+Uin%5hczwn2V%(D5`0FO5e4$ z?Q(9+b-`0AudOMspx)TX;r_w6xh-Fy#|xH29#$kn{G0{{{WERU;hxmzx+F{ zs9yn}{0TT%i})wz2>|fM8v(xbLjdmr&g;y72AGDlVYhY&JQkrrb~kaJ)$`%$SGm>7 zg2J}Vmu11NTVZ-ht<0W!r!Nj1)+bQGRuJGRP!eFPD+R@A{O{c*zdQv>^g!Y}r5$Vs z`5#s-|ED7eAgP4?VRVCSF#lX()dS9G@0-u%_gWj+3**=h`knsvi$CU$(l65xud5EP zZ#?i)=$_k|YF=IWry92$n6Li<@xsai-~nELc;?qP&i1G?ObF0dS+12=|FrS{LwU8P zOz-?=f?Jr-4{4ft|JE4S{U=Y4xMtqzubR5IGuQ_A)9-YRtL}e?!|jidU)^^}-)fA} zQNp>PPAXmPgl2dTseE(-$<6km>Eh}$(sK^ot+P8i;SdOxkNj5Q%Z218cI$w(2+;St z-bgFNWze2%7x&%+kklOF1=wt1Lv(F^ysoo(3mkW>#IDmu zuWK-Lih`S}R{ydV{SbVIH=tg(t7~~BGC1kgUn|627itZ%1AymzduQ~!M}wDH!vZg; z3M`)~AO)<{QR#Sk1}wnlDg$1%0})tSl^ndTDm8df)qUU?csJ^~h&J8uGOc2?&1#J= z2;E`b6xn6<-=XzPKCl}BuafBb{NV7vMI10k{Y3mP5C_)Ly%FO5KLqjlzkxVlj{1rC zUmy-FlY1k?H-8A?z`C0M_814uQ9lv?3&c}zi1^G8K^#~y`QJu-_b1}ld$oSet)tq;reOIIPlry)~|Qg%|`a~lB6r+=D(>g zGdi95$VYR=(HMZ&2i$r93>?20v9GBBMJa3qPK$ANyfa1<@NSAf7A_r zh9lCK<4i9QdwFGMTK!@7yv>%iAsNq+ona==k(;3&&(Yyk{ke?QmX{$qk;m-hXm zo$9^9?;c5K({lV`>@6Ig z7#lk}uzvfGi^bK-GEH?VbcPMbll6cseBMwqa{G2C0f^0UJ}4XcKtATv!_Mk(Y6Xt? zOOMKzUC|td-PEAIgu$j&mcgAJO4;4#7F4PUl#B2^OSme}iy|3Lzg&2|kj!{d+xMiH zF(U`hdT8!)DR@qEJ&xf{11Xy-twdnm;Vr}0NG!!))ZQ}P=}kx+>rn9QEErESVU|j~ zgW?Ax%g8v!Zh1(Imayvgr81N|R->M3_+{R$gH+Km6a6?#wDi2wu`%QVBUBuCC8V>F zSW1QJ?bk1qQ%Gfc&O|BuUWNrJAS`^sC^%dYkM%P%R<-uj?@nKtIpB5mXCQiRy`L!| zMp<@JV~hGyIdC^ld@+XUWi0c8bYi$s#UuBbklPa8ewUg)G}ef>M79!p_wlw}w$u1G zE*@~#^SWyS4`6}M2qm~9&<_SyWS zL522rCe_EYQe^S>n*8CT?|n?2Z?U;gxJ+F1QBUaTE94}doVJqod{u9XlP0vKBu z_MFHFS61cYZ*lS8_K$EkZz5WrW+}0i)*4yTSyo(7x0{?u^7blbe-1_Q*9Z%nQ96bH z$1NY8J`=eqRc+**uS7PKKBnVe0vE|4%aB;XmvFbCp}QS&#mvOEWO)B zR)L5jx0%!Ez!4-7;B~R@eeuToe1Dtl>io@u_r<%b{j1Bp%(jb>^SvaYt6ibX=cDL*#bogpHirh&ke>E-Dpzo*wouxMN-M5}oYw z>Y9^{tSL+8h7&mQ6Kn7LrX1^MFfuiV z70~b`PulBdJIL0w$Z(<7C*0iqB+>i(<;ap?-1DjeXFtL+_l!D9MDr`~!j~_1duNfH z7znmDIJopYDtqczYQm6J2@_iBBEEPx?khV#?Tf)fG;)sTz=;<>g$^0PBj3BTjZGb= z0grZX$WM)iRhT-J1d_6rPk=&fac~*(4qcCby+zYSn4VA)u`O%}KLM{#Ps1UV$OyI9 zEAG9+Imn!YbJ9KT&-|3De!VC7q;U=vC%EJ(I*^nLJE>wOG7qY9o^y+E+;#ep1jBIH6k*lzJ7)uu(TE+hVBg4S*xnYu z>xF(7_yM4b0NGU-w;{Fu3gkzu;>}Rvu1?S)mnZNcpEs5~m|TKy$G<`wKK;NXa-iTX zM>TxN&R?*pwT^_5Un<5Uqv3;q=dBRk*(LCZipzb%0_~me$-Op&T78Kp2k^5wdxsu9 z>xcw+aAnW9csK=f)?wGM#7Ser>S%t^{8F@Rvu>`iL5rICB^;-2l0?=5~N@7|hL zJIHR`ri^3?4~?V21UB@&!9aUFjx1K4fZ+y~S5GpWq;fBwMeV+wWgxV|zC(*ZN#RWs zoml-Axu$_A$>2z0#UYsd;~10Q@}@U*bOM4=QOt;IrpWUS9{Z}8#95{=$eNMU%MkWH zy+jpsLN(+FlaDAHx^Lt_u1~GnE|j$nCc+hXy`Xs$A0rwvIQwPu>6b}Cgtyqoq|4KM z;X(KH6>lp1jA3J=nV`+PC%gp;3;YnluT_Z~2`6Wcg{&Qdv*})Txz}i3P`8?XZehj_xzFt=R1%gN zd`D!N22wh?dXpuwL16n~E7p?&<8h+w+B<{E=ZvOQyWmT9>-9J;hU1lJ6%`>CQaW&u zl9~j?wKNg>ZNFzGFKUf3$h*uc*We2HiQ*`vACY0B%t0=<{Og zh48~pTy{&AN8uG#&DbNwwFR2vd{AJXW2dbd+y%yBvb#(%xd1WYXCv%ifpdzHt%NSjyTD0MW>F(3XV=se?(~ z;6xQ|ckHH-FSIA3LEti~mT#wT(cKd{u&&YlQ0WVX2!~5*b@(Jr!HIf)M38WzE27b} z36Vh^wmj7Lo@m9kygKTkC?sYdB~7|@KzPEQlpT{g|$TH5NNs*hc4qceNSx-hG zS?iAkSG`uWJu?0ExvtK8c4QXw`vb^aLPt1QVa&GV@eOyya43#IM_e{#8gk^DxY#}J zUF1%h0w_U_kGEQ-vpRH1?&24`r3yqD)U{__n7~ANs}|08n5DFmzAh7j;$G#FD@!pH zAUBCCQo;%?CHsPmK&59>ODh*Of$xNO4@9wBfg1nPZtAn5aLCIF{t?z*W(v!-- z34i%SiZsC!&UkOSr(IIZzH(XOcn^aVlNx=p7(Sw>H1^=GKk4%Og4eL}2v?N|SV0RI z)F@XmB6klvjB66cQJ;9s)|{vLoDhP@Y>o-m9ifjvAGjuVj;tlg4OxA|ThQ!}VkrzO zOYC>=_N2L6i;z&#TE- zJTwFXN>I6DM%Jh?Ut;+EoLQei>OM0=S4r}1WGbnWY+i`F>Ak7^{Ji|1c?tClSvBw{HYpOlE|bP)r4XU%1qect&ruepJL zi!i=5E8TIcGj~_dYP$c4cv{KglTdM_=M}|7vRbtUkwSgaJ8?a&%zyT#DN9$lD7Wj&G zn4o~>SW-U)%i|+ebRCm$^T_?v+2FB$;%WN9Of;j&c9&W1&I*KTXINOnZKTn)q57wW zE>M~xTbHTjEP7XciDlFkJ&v41sh0i3evYHWzUMx%h#Vw@u^{72k-#?EQGx>4(fO(@ zf??w%{9MJAF@MQz9&P96B07~1&mZ*!SO+vxN_};Jj%IEiNFK3ru#?wg+g+ZYkS>|J{fV_ZQYq0(#5Z+4XM?xuQ>7z*g0*a4B9ga!5pc0CnXf=sbw`bn z{0xacA}iBt?wbxarK}VOVqTa!F>Ga&IGg2y2&XdKsw%b~!#Dm?5G3OzG

}A3PKhx>p8>Nlv zdQ$FXr+tozIuU~RbqQ)&l0+c*kA3tydXV%Ucg1Dv&t3iU&!}ZH;}rFtdVgvr%1l!q zhF)c{Wah%##>hiOgXlWTW~2`o;fma;Z3j+hn-P-~JSYwM@Hm-m<5SDUTjC=Dlj(O-M@^DIW}h&Ybd)nuIair^DM_MwV)G+XAU9&s5m~ zK2jg+LA41pRD8^{ydioVkjYuJ1Sk)^0nTg{2(i4+hC7D-F7jAo`>)o${5)5mciKXdeVu)J`?6~WK_^6DzN8yA;lkjjbB zMy*#ro{_*O=q7jYG z$Q&1Pi!Vc^I|Z#3;ciW~sCwKS_~xWyYMNk{UrIz?K^ z*5Q%2KZ0mx4C7G~O7P;vX2|bfmRs72#k{@!kyaIkjn-B|KeahNuUXF}K*dw%KV&{EbZ|VP{~gMc$Vm#R33zCFgo_a$oD#cqg7e%R5S?{7gVz5%mz+C70bGz zX;crMwY~!gRD`9hKX^Pi2Mq`87)eG~$UsMP5>j*X)i?n-OXNv#>eDl;NYNliO6v}9 z-l~veRyCNRkYX}}kQkU2gp03>;-qbe`p9TOHlqelWY*hUAGL1cB=p`6pdJ3LA6=E} zl9#gFruJM842d(dR*l8-2ftpH@HO#@Hj_{u2nQ0^G*Hen3kQq@?M4buKk$H6geht< zdShRd803&>5}_y56P^j)W!Ak0(sKrU&IZaE zX5pC8Q2k(Xt5=Zpe&fdRK6@`y4H!EQ>7DM6deo;?+ymnFd_*}H)sYu3q~k_Q#9l>> z6aCCxZMFN&Cb@y-EIpcrZ!1oHKKPWABiz#_5x0b$su~;*-hpJC>o}yhXx+sy+D2%= z5s^jmgjN}68BFdNH)3Q^+Gwj{F~5B=3w8Y$b-Yf2bV%5GveYn_y-pH2aKvj`lwJ*_Q_p zcYP_S=hSNes!=GPw#i<-AZXPv@xBjWGy#Q^%M1dGa`+#0+O#%K;~i{@)$bL_xV-%u z8$K7IqPhLlh)*cLIlYzWB6I^!1HdF@G0GzEuBkx&vcS_RH z5v;A}bgFJIjMGhF4otAEuro3B&2%IbJUqDTB{W4Lg_j0NKhkCn1Sc(JIgmaF5o0aa zm-06#-nu~a1EpoPevI$oblngU)8cX0|9aQo_QiwC+Lupg0?UR*xHU*X7S$THcRl4(ZmkzK0GP+49H`-k0rA5d#7 zA+^~ZI06!~4JI!)g2aheCqPxK_xgN{(#LAZvu8~e>?Q85`}XPR#i)chvt*7H%u5H{ zF^oLZ+zhD(F@z>mmmxqWK9Zr-@ld zUu)V~7G~X976P`(cxM5^*y=?%!|V~gd}P%x322FPAmo<+{S-{kt2>8THeb`YXjW>N z9?0rkI!Fk0n>H*zCAAdv$asockp9kk>SPOlk@I26mnO%>C+4*IbfH8V)aCUo2&b(#xE7W*uX#4ca9Vzgk|DtgOa*1_WcA!eqx{ zEgn_+ihFb5UGs*HS<~AG8M~5P^#ORV*|nw z1^H(6*B0c$z*qsT+KVOrsEK6TNqnG%E z;>jGASLaNF_$FmsKTb=&Y0i5Z>ZJ2^^5KnPDGjK*DgyYX<{k0kn;MWRB+Jjn*$2mb z&)8L%pk}dfXtYbj$hAwLv*@uMi_x;n^Tn7{R5#jo_Wf8Q>U1p`jtu4TPXgDkF7P0AqY$Y}L3!N0;aHB&T_;GwMh{;v2H+ zXYC3nntKMTNlb5czX{i!s_Y<_(l$fj5ItMr#1^W~KIge#5E#7c{2r*(t8&lGF3I(@ z*2%u;IuU506MoUuGan=v`=l$qkaTYLpao2>IqnK=l|&(G05X$Y`e$r=vE3yu?U-eC za_3^HmtxU8uQBr8vO#EuL87j+G@>wS^2|x^+`|VhPolAzq$3`2R}swxKv*7{l@4)) zXd;THT&{z1$lS!OVK{dDcu(MjfGQWddupq-3^iu^$!kkB%w>?B^b7aeRWuQhJF2h} z*V+cRsOInog9#b$(x;iIxc5@YCk%s1Cwkhx5S)7?=9AYg%yIVA@v;4NjRUF|I@X$Cm5fQ%Tjc$E8ypcBXCnR&jDuakrjXeB~e((bbf z4t)4zlRs-=2bcR+)bq`zKFyXV6qDHQV-W zXR(70_!K>EC805_=v{RMT$Y1ZnK8(rI@RV!ZK*MMC5WzZ# z)9O_&-_p^DZ?=AvVtXnE%?Go7ixxscAj`rR!dMi(X_Q3D1F9GAATns#<|rHfQyx+9 z(uI?Nw&keM-JM`-;i(5IkHrNI92pZ2vNCG~{6n@+UJp1EHO1f=H|rd-e=OnC58w9j zP2#UsHTIBruiZRQKeF&)jzX$b59000G_faDyn5U61HISB1o3)%mdpBpSUmv+IcM=j zHpqld~eHn$QosejcAc3&-;*?xKJF~q)_hSS0yw@S(xjG!a(#BkgEJN z4J%hx_r^gY`tmE5&z7xkcrS@rqfA>EpJw9fIyPIU-2~nyaV34QMbfKr(3V9oxtbOP z4zY$CvEt=OvdBsPu;jcI`1;)Agz`sNgUNsZ&cBtyR{3@EtBOKMUqmAq=V)x#tO0>z z?bpnAeA}xrC)vgIt5liA84W&88^1`{37DkliRF?Ge4sC^Ng>KEJy-LNJwa65sa-6J zpg!fJx}^}=d_~^O63!4HvWcxzhqLGF@IcF8_t2w4rO91H5!?3mdL^h3-X_YYdlta< z_%Q7}+FO|Vk^f|V^$?nfq**mJ?-5*L*-7|(_;9cG zeQ#pgYX?pQ^qD|d$$2f5M%c})e0qVjqS{aoVJW{Z#hQV>NU&xl2Va@%;1`#K|^>%_qDRycFl8=1eMFMkG! zJ20OAHh{y#P*n@pR5oy`K6{9b`GnUb&ijnpJOj5p%wx=D;K7K-g#Ea`{!kF-%%_^K zED=+Y_9k?4k9j8R%Ng(sIEhpR4S5)R-AMY9r*J83myk3J;a918T(=YbCP3~ua+(ta za|Gpa&7u}0ca6lF^h9n5Xb8)ownD*m->C7*k-z#z6r3ELMoLOu)MA9_@W~ z626`=j5m|*br>-%n~>dT6|g92Z{-qYNQqTmQsP=aO0+0j4=4Ay5Gz*H$o69>ywgRk zFeXUEh{Ib9{04xhH+vM`38eS-d(cv2H=TI@5Fn!SrGaWg6;<3?>pH_R%dd%i(iV5# zGVI_N-kFSH{fxax5$c~$*hMCaDiutW^eTrXeqlo-nxz&FAfp1~yrP;;Q0!)8&H!5p zE&R38`1LSFGZpTpRH2*g=Ri`?#d3Ol3Tw4Cq5@MK4n@vK$zUj>vj~-WF@*E*C=FHg z;{X~Foh-MgMnDHDLLec~o;jkFPE-|#l+CE_jc$=FSv<~=#}iCtl-h}Sw>&>jiD<+c zQ(<3j)viM}QgeIQ-va1SDJNK3wv+gk4Ib&Iy`x3;Gh5|+c+382EJ^!Qzfs5NbRsYLB_ApBsf>BkfZ3$Q3~3irlPm z6u~~4UNfHo>=RACG&TX$V)-9zM~4N**LoPW3s0wMzzB!<#-myF)0O!-fpR=>7uMYYz*I)&C&ZZj;Q z^r$#*h%B((SvMch*O=out+}spV2I)@JNO}h0p+OFr=1+NkLn7M9m~I6IiVd_+HP}k z%EXpYzQN;hFnK~7Bo4~0j5f@^SdOl|Gt`>w*m!vbE00FDQSs?*VfiAcrrGQbfoBgU4!^O-K()NASHawSol-)p6g5FBnz z_A-!KisiI&$TW}mR5yrMALJywegh-xQjOEAIfW)_Tr)NfKV=RlB+DDCSC1R`f+&en zT!VcVgL?D=DJ!cvYaYSa5&oIN*(adCRyBvyP0BetA$l3Hv#i&eOZE1d^P+;llla$l zVvd7Z@Dtp{xFdD4!D2-CelJ7>KqKNW*K?&)Kte6lb19 zt*2b%oq&8zO>H=IkfIT_C>}bOfJkySUR~We6O1QNglnL-lfE}i(NnaMNGXil)*jED zJDiR?)T`6m3UKNmFsDv@dLb!CkcUxs;jtl-D=LO(o5r~&qLxqxj5kMka4(H$H6Pib zz+9oq0Y04by{Aar^V@3TudF0swz}V#)epZ1D5B>5L8li}kJ{exvVEF$l8wk%ZUz)C z7qxvpaQ)P+~e(a5*N@t>*QNL+?AwpK;?9SDm-X=yn^BqUOoe%H)z`+ zh7ufNj#4QVs6Ek@5@^&x;(PFP36MQr=S#U##lP>yh|B0`1$O>g4WIwp&TUzyT!8ld;m2u{au`qNVV#cg9QC7 z2yT25(-k2${xee6hZ^fT%J?q?2GvxbtFt>+9W@DoW;Sr{rqQrYQ*duXNYKXpyRA^4 zM+23s>{Zu0Sot0YfV#^%z68WTmH;Mmlnd$L?rgo~3xI zbNEOBNp>0SW0lxG!GdHEcF$l`2Gl7dxs@pwD{lJd6W`cJn;7Dn6dm-;;s~(RE&_Ij z`BLoM5z(I``Z)*3oBOhO#9!Xz%{3pdD? ztuECBBsnKsKiGu8d>aG@Mnj{HGS3&>Qqu)A_ZZi(fF0O!Z8I*&2Gk4GIvhvk{x++V&+qHBoJNnXbU9;4{Jrz+rz3s zso8)LRF(jw>e45XY;?^Yk6!j@5I)kFVpDEncqWMR$YuYP6Ud^ngF$(2BbFEq`=rCm zpePH(U(sJW6#|1}HtX~%@N(7`hd$DrJdAFPJZv;Dql*DSIw}*V)3RcQ#Baj-c8FS; zbb|8mtyH|EZ5|L7W#a?-)qA~R)436yXx*xMx3fedpe7It{WcJ}in^TWi_i)j1GyJ! zClf~>jvq)cU9Hc*2=}&jWxcmfUwCKv0B*nICbdJRjHrc7qQyTGAXs_ z_wPfSDeGgZW;@A-_dS?=89hX+C(ia<(cVSLijR?$WlkrO?cMaUQpRb|G$MDAq9Z+$ zEeR5%Ska{xBKz7qO>tk*n2p5b074bb<4zpa3BDNVXM4-*=9c0vUF0&}g-ppi^;vfy zgHHqDj@x)~$6PQJ7eu5y5lSCigxXKN^m$Mi>;rpN3j*R=g97e>KeYOgIT~X8{NK;$0;vDQ?)CQ!DuI+ z%9V9XqnhDENeMyY@C~``8=z3tUWp);u7W85VD_~h6e$=hkj+0e3fG(8@D6gNH-q23 zS|gh?w#6({5a&D?KkW*~pLG0i{JdtS06U zf`QwVm|3cX`9-%t)_3PMdb5pui@uO>r=pt2v!A4<2u^#F%R9}?w1g!cxk|Gow^Tvh z-fNT2gW){Qm1XWYqN$F(+E4hDP)A0{a!}y0cZ4GOn2*TSi)&KF4|Ox$B;S-X6S=JP zP+oMy5R2&u-skZVJcRxwMl|!n2jSyYZ-`gpqD+wr91=RImII&|d3ok-L3{@{i^A>B z1!Kc=^(65&EJg$yMlAe&zhM69XM9~tKD#VQCfbPa4T=(X+2P#0qVDj5su_BAVi(mt z2HOv_AAv^Xt?}w}!W!}OOB)ejRpDt=FYTYoy>O(9);MF6ts=m-dhcu~DXnR??ns7k z=p-aT?U6*@04k;^LKpq?NMo&qV!%PbbxiGE%!V4X=W(__aFY~1DRG^6;F&^X(Ro}S zM37P>grll)s*XA45@TO#AzB&E(T->dc_@PGv;i%ZQJdB3?{kVKy5V1b@6aOFV#(m6 z79D4i0)6QoB(CNH7ro!z!`@uAlc_QC&E|RYqO41t7F=PkR`hofY9}q_6o(V1@9&Q* zP#QE0DLLCDD!>bA8_JFn7?Y5Mz=xn?m89>9p{co|tTZ!}SQ7`nk9a-q54)ngG_gF< z-SxVT$Jp;Qpqe|{2Wg{!pG=c1PoYrItyUs>YRu=9rHzoWNJzp97DFkMGU4o*-y7V{ zx=Qr^BKX7dCVw7FH~gHf!&{2Np6KqdWAOR!b#2(=;l4|%;?J&mp(2$^}Mx+{FZ?NOa?+-OcpHv9& zL|8nWI~8>$RJU5S{i(66JZU^MWe^(ddkQ#KGjcd)QL22Eyn2(?`=0j?^F+pr zb_hSIUm3x@N2KxQ^uLJoi{B+TxH>Latn~4J+Wr(S-)6V;k*VKUSwXuqnUsdETVvP) z|Ci;8%7>K!q082OIQR}CNCGlb4XmWd#ruJQV?1L9MUv*B?a~o3TInOXtrSj@YEr1` z_5My8b1bhcKuy_|<$c|}qIZkgEqjH@ROIF`ml;Tim1PQ_(LatrPSunT-xgHdk-dks zQ``leDna1(pNuazWYU8d!S+?I9Mu`L24q>B$gJGTQu5IW2a^yjV zGB5P_j6x+r7T6(m1rwAF|DDo8WSGOY1=~}9;plgC$w(XiLRqc3(Well8k{vL#511c z%X+e6>^tkcBm%!tGW&&6i4;XRe`$itCu3leMUW9308dtL|}JFKOHthD04x z8jQ7}&exo@ck;pNP{M^5?wO83I2FC+1;nc?Wk|JAFPP{2r2q(W=hL>bp2;AEO;h`h1W00CU4rPvg=Ok^ z?)pSe`QuZE3DweWWDPue7xsKF2l{cWn*S6z*WjTQeKIQByQO7i4GD?47=3^73Na;@ zHAPz@zt1T|{uq?7O_(OtPtb|>-VTwZh{6VI z5oGg6(S=2r6$-m%hAYT3C6tbx;Lw0AuRhtwt+h{hPcyLHh#qG&kla!W-PVG&P*epn z?~dObB9DFp8f>!@{Uqv(d(G?!e7r%0Z;%rT>y&urfCU(a_}Vi{3P*|Fy6#9OuUZF- zWG`kB;4m(vhC1$$f8g_RWOo}|M*QT|Cl)K(>d#2foa1;lkMg*_BT(NVJ*j6!|7}eZ zQe3+giTiQ(ha+t6=@Jpr^jX(9tcQ8le%zNMYmEA3=C~l(ChW4AQd4`A%f{v0=i6Vy zC}-1Qm-@VU`5kEW0ux z99N=zUT1|sYFE*rQ~En2cYiAHVyL1w!caehW~*G!nS%%MC{3j75(Z6WLihlGFq0zB z=7BPqF91iNP6|zQFa9fbNxt@8c`<$f3(`IHf;XWj^V3Di!EeZ4e`VKaI>KigAcgih zF%+)nQYr|?{mc4==H8g2GE%nf-l zkm04U5wbsw2SAfY`QC>n|GMBa{;Lh3l(h6pV$f;wa5U9PPtE^tUfH{@(M zoFOo|aQ1m)vF?ZEW}~mWz1v#M z;vUb|_L9++z{bT@KM%4tellc!R3pFLf!;b~wy9M@76)@I+ph+pE6wrHhPY&sZfMagf%tefd39J0PC!fio~QP4%;WknP;kVv zGVgOLS#)D19YIV@Le=5!2}X_qXJu%VfZTAxWixa`6QMo1cF{w~0uvB7`5xDkviCd# z=M<7kL)C{Z_A*i&tpiCbeDeV z;9|-MfqPblFz@JLvxtG{^9qHcCYG0|XbQu3F^#IdBvEzI)lgBfHWhJ(G5k$Uk;YU# z!^h5i$R*52`H`Zsx9(03PPROg{3s$RH+(-H@ze(@BTme1(*JZ}vry!MA%`xAT482TU-(|n(6gzs&7M~*clregoPD-Tx52&DoUdUZ3JgmWgPA6C!cW{N(8 zl#m2;{!HN_k>&gS3o*uy64mTEZG>l~x`w5BC!uZ7FBGDKb)>3!jiD zK^osw#2_*_j8{0`7K-V7fpp(*lA2~C#=CtBW;2;tKBedMi3IIkcZMSE&^lKrVmP-< z%M%4^`6}FZ{|^B{{=R_Q@*STOK#}*8C2JEdYFcOafxc^QWm0wx!%XG@*le!(?2W8> zk)CapA|1?ok#5%=ioa5%gDHLS`h)LUwVC5I*eSuHDlKb7;jorIvZVCLANNZ!Q}TGe zq`ZFSq2kP0Y}b8Ikbc%2822ijK)&`6P(L|wWl_^AyAO=UGIv2^7x!Z0v}J9ES<3`q zp0zV*Kw}jm-%DT#6{J;EOX0gb&%sL~57OdPScdLlL(2oVBP4k=+NzVOvwnFq0wy7OB{loe<30e&llI zKUkMxrQWuo;I(U!6ogeC9E5dDCcz>ZyF?hf)Fok@3#?m1ZksdNis_LIGeHty@Bo0e z2@imy-~j+rrcgpq)NBFRPac7qXt@BnZRWoU!!Ud01Xw%*rp5ydsu9&0_YGVJ)rejf z55RLM-f9Nrh~GGrBRD($1WbX&6W~H0J^=|>UiKLH5yC%|Fw1gIt&Rm=C+-3bKw z-Yp^B9rhEt*QCt_sEmH95Tb0gEum}+c$D5#giZ8ls|+)0knUaAG*px^5QC?C3e(Wn`gl;p=+Ld8ep+{fny71Cg4C1bOF#juCpe5U=I5w! z+i+1+FS`%)CtK!{rJ@uE$!?{6me&f*^m?!cr8v5jBoukw6-^t8EbfXX1>wBLAiO+B zn3v}u=r!_`1hW&WE6?SIgW0NHUVSK-Jw?z2Q2fBH++9%nds~sOK^*n+1SVi(%L0dY zC^UYEMAQddfbzEV9i0p_qu@)5nAKtwTX__XP!rgEsn77##oJRB#+XemW4K7J5)y zr0>D)2+3>W=E4QMIIu`)8Mzu^PI8a~=h`ld62xKupRsexavN!q^j7Bd0{SGG%nKjp zMbfWJt8}SSX(c`TKVxJPK)}8aBJwd{@Ngh4oE~lw0m-*pl1yCYeUfkYeTmfFlng{% zfJE<4U9c2V^yL&REEY{p3Mmt6gT&M2fyCVu60<8Iao-ol-oG!5B}noOd00_w%+{u2 zbK24=uNNxeI6h#xl)`%9WD*Y3DSIV-NW4d<^e!tE3b9+1Og)e~C7MFxJyN7QK^lt` zdF=Jsq46#$az<^?m{R0?$50R4BSnfoJ*q@#ZUVFAR)_uV z99ALey}^<+SEBntVJKmHmzev_u!Y2E`n>eG6!648rI}q$eBF3GSgU=rHj+38MN~5W zZL;>JCw5Ws`*L|uC9{@)y%FNa1`KPp#iw2|^!H+GdVT+>`CeE4Yr~0(YX3UyvFbg) zS8l9>^wXBic)ifIjm~hxZ_(DCJBJ9rWvgKhonOED@LOQycdLaTm-_}TH!57F%xE2A zj;GJaIi9}?%<*opk;^BlF*E9tR&aBe7|B;|u?|FGz2xEN^kP=Si~DS5V*{5d8D{6z z`o9`3-xn6c~I&D?5UYi z1G%Xg%AOiEza9Z0x0rhDufirwo8Y;%nf>Xa%UF7Sz0i>xYgrWqT~ScS#||Kg+tlHh zTx*Y5$vUG3x>HyLDs&eg7|KO_tcOlgBB~Z2aBPpt>icLvU_Na^k;{Hhi+5SnhwRlyreoJJ0qL6ktb3v=aurx|w;Xam?7>u!A zJu7-9k7-fy6PuF9G%DoZqci$*lm6-}^B78KdD%6d6YEE*v4cYRZ&vz;B?CZV^}k3W zLm10}ZmWJ?G6e41N@_z8r=S8dq1|k4mI$A=G|B6Qo|m>y$#+C_JN6gFlD89hAlM~G=<@N zq)FTtHt&%k1tbAoWXNOhZb>df*ye`#r;D7Usmzg6y8rVGDIHWG!6z;w?%`)Wa6V-majzA*d#eTmZq39|{(R7GrUrYaY8NSRG3?c5cs5Zh2pREP?% zp}31NNz$!H<|z|uV0t%&>2FQhqBj%|#aCwO;za+1B~3S1v25gq8mWBj$4-o%RJ?1N z{)jWD#LBtLSIJVS1)jS&5Tf7L9#c3ZNIuK6!&JC0UR7Yo43!||zS!F2{+5dQME&i| zeK?9@Og9a)Kbex4PM9Pptwbh4;eC>zP{F+2w_0oR@_2eVzWvqfPmmY?7~|!=$*>Sq zD+!74B5umCAnM$4GdaAWD*R+{3_{^eFp?W3gpkJ~5aD-5!yz!LN;l!o}UCd6;?GQ{V7LVSvzxBGT4vNHcje0wGRmFfH-=$pgx zlpAcF-1O&R^OG=2qWRi=A4BM73KrT^f6P}G>i?7u1*dxTOkF4+$)=`R3b zn|b~7mUw!-fJrLWu_%mRTNYS`0>o2;p-|&pVjas2TM{5NeLkS+%hEK;@oQ@RkAX6= zL^AKbg6}ujJ-*Md>+T2g-OYgS4g=o>#?WuJ_@(6?rhZMYbbJXM0%FOAfWWqgz?Q!H zEW+uLXT3QLX^_A&1fZ`Pj6*;@D_VWbfpfRhVum3=%@wE&0TFpI1<>b~A)vn2BmFgX zxgG*7x_?_>Z$qG*`o3YNU_Uls6_0)zOoo6ctTF_MVnTF#rFVt^E$I*-YEK9h4pO zp`42B86Bf)d*F#g^YyV<*Y7B!Lwn=sAWTNb1X>azF)u@b#^u*QL|Z9zN{8D+VN0`p z7RmA0DNKff!Z0@wSgNMAD(Hh*V?oHed~93!HE50Y6Pq#^8kJvzJ+iGncTZ}+>g##z z&D;4k5HSYQB zi^R2t{a$>H8*63uc}tnSUg(6s)$;ILq{m~+;qMI=enXiRYZB^PObwUmg@pRPFYJD6 zTJ3f=9~jQ{vFp8u-g{u7w`iruJ_R-O9);On5EXix6{b4c*P1XP_a0ex1`XuiBf*Ms ziRHAUX$@i9v3yH`eHLl;@c}E&9EKHV0t?rnz$$G0fB4VAyTppK88&40Ex3m5Z%wP} zzInmaUf&P)JMVHfY!^9_n~i3a@6wnrFA5c!LgPK!tAJ#E`wml?v$8)fF0lR>u_sH6 z*cUO&1ghsZm1`xvb`VLxHWWEbR5%RbRg6hKVm*?=6KbGvH-*C8lypQ~C4&i)WW*fB ze*Ig2tC=ak?>VC`^ko5lzC6(Pt@r0ydJBE0rs0DU#gxAn&=7@rUstWeRe~f66QogC zY;9HBpD%)x@|Wu1bFZTge+>nJRZ#JpF<7)sR#&V?Y|fZ_9QZrkI5ma8-4y$g4X{h57aZ>fipap+cc@ zKB@I&jDh+hK`GIx_Z>RT4bcr%Z#aoHQ3=%bFqt?S49SM|Sf-L|PpE|1!~7OcQ!$~Qil&5k@qxrANQiHa z;>7)1fA@$#>4F=?r)+4%Hx!9aln`G`3GwfnN`xm!h)<9bUuo8>J4ugCuFlOg&3+ed`TcSRiJ|EEZWoi18eCkeMPm{v<@!A_~MSslbzD=IJ?LF;v z7~hx6Lvj4^2`P?uh8#y=y*l&nrOPFim-~)g`uSzP@|0#V#nVD(5Ym~KEnW0_p}Bpm zH}LM@Fl6oytKaqyW=*&)TH|BysQb9s8n{hebare)h1+{{QGX!p;cyuCaNJ-cw;%fq z%20a}`!1oqV`!*7U0!CNX!^OJmI2TwrG>I>>L0zY*bv52jugh;2mM}ou&!;tY{{6{ z3(eRkEM`$y3;yr(#V34(C7+5xZChZ%Sha=4EVHeFvD2N)WD}?`miD>B)E@|oSsVtf z@&;Q+V``N%>`lpUr6HtkdKCr{(qYi!1tvtcE_+L6LN7%~G$pN|2x(3rrS=CRk=0k6%W|`}CUV!55_?Ax z^3-u4JT`f;>k8qv!k|J5jG49CYFPNrE;*+Zkno+BB-NNeOf|Md$rsVn@VUV7U13b4 z6~;80FyXt}Vj4ZchQQrT;d?hFaElY?9*EWC6tw?g$g;kP$|hp@v-9ei->5C+yLq@tdeyYbR2;RWz{pR zlAUc0l%4Jza_epiWoch>YjdJMD)rTD<@>$Nt*N^`_Exx1c0ygXNPMAczC6qZr#q&m zP?lW@W&6HRmiHNDE-E3S-SqVWhn1 ze^A@K%X$x?{A6p0(%lrwzcuY%$}OEs2)Cz|EL&d+c01p?T#biEDUVF@+pRp(d)sC+ zLlB>C8G`tI)iBvJL8)i4I&$AuvS)%M6}b-z@rS1=l3xTGCAo-YIKnU_R~X~E!FaHi zSd&ylZ9#CHZ4D%!?i^B)x2848=Z(u9?UJXkb0N8TC?wYhVbB+8?@>x86YM&bEXF0b zRx4P)?J!nOXiX|7)}@Sv`?fL`2$HdIAC$2$fiRPN-4Y}^z&<8<1{ATU$U|8v$ zSSapRI2KIq?9ZVl*SRlI{%s%~3wz{F=}^YP9=UV2#IbM@hHL`+Dni|ZIN2kVjCH4Rf@w!RC-Y!L`g{W+v?cN8%XS$Wju#2GNKmNMpy z`qTGh1*FlKp3BVhzG(UH@^HhtMJSu&1Fp-7uNMJHqw$-v6c?7GZBMO(gtZD|Cay5- zuoP>;+R4`R-wJEHDXi_Lu(q3$XwWYymx%M0Xy`LC>wbf+5{*;WEkoJ~_EsGvXVpv% zxF3s|)xQy^nasWYk=xRxl*)J5N<^kD$u#bR5|JkmGmR|?^i?#z(-9J=!%!+$SbyW_ zLw1v3;jD1GRm*UjawnR?ZOWb2Ap^3!3gfoDvDV&`Oo#$`>Sfg7_9%Czc8_?hW>EU} z-dI7QHOV%lOXK!^Tj4fA!tMK@aC-ug+ZTaI<2Hr3$Nt_`!nUC*DkRq+D&juplLqTe zXWApng_Efvt?s6fy_*uq#RrmIp1KcH{i^S#m9J1;WIU;zOsGo~?=|<|7b8RBF(`ji zx@=w5XvjRUZ~Vu7tnm@Lig~O?5zyh>R0v@QCo0jns5nYk6V>|cpD@;79c4pVg<*Sw zz$6%}Eo@JiZ4I2A?wp#!*=`DFXRM=}#MKa9>C(E@DHCfymiC>f-3Znz83Cb@6(kKVa3X+TLY&aHq_3A7k*|VZDU# zgTcsH?Xpih__)|w7(3s&FcwW;)Cojcf9`^P-FR+@={IX*x_s=_YTqVfH%$hNosBGU z7EM1FBY#SQ#UD98M^K#K35e(CSbvz`jSo{K-clCN)WlG22!{;?T~Sex6$K>>mJa!W z{CC?wysjkek-HqkIMp z+~$2e97>D}wf9JXqX$LpEeY^!`cyw#sI4$kTVbU39tqI9B(+=b`?NSaWD|`FwfCre z>I4sia$iYpYFF~L`J1s@Hc>_rjwd5SIPRt}c8_=`Aj>B9Xm@4`De(@_@741aiMO=7 zGc~Cv9`d9p)QS~_Qn8>Un=rTPr6e70;VuNGmWQSgnC3Zwv&VU^hfX$Op5lzB8)}Tq zPhH$eHZf^b*G*gXxS=n_u%|wI=ZB+k)Q#BUKA9Obgk}2B=ffo9*rVF*0EN+;s@-`@ zwYy$ePx4OOi>N3DWy)Epqd~J1V?t@#I`x;DLutx*X!=Z4C@rpXh9^F>f)Prar;NxH zxlYIWjR~b!65d0n5h1mh>0gPET2MXJPUOBX&i$>XT0%%_EgfP9qk-DgzusY`cwss_ zJRXNgT=D#(omf%sELzrcfBP{>iNYAs2@C%@K{Qy`k-baC^vTwcZ+BA$KsRLoh!Y$D z_T2qth>QSzJ&fv;f;DJ#&%GHcV_$4i~zM#BPl~bVGQhqQ8Y1I4_(5@Ah>TU z!JZ%~h7csvOM+g9Q$lJ4Toe`^0it+4_vbn?0tEKbpC3q(kFX=wHf6?XFpL205@qSx z)-VF5JBPHqn=%5#3621JJu(8+SCpl1UFILREH-%Qb3HQxCOPbF3jwE`)YNiVSdF&~ z0fp5g=4XH~83JO z^-*>i2Br@}SD#VC?3FjlMtCtV8I71a_Sf7)dBRSw3kO$>KR#>-mv0w(C2Aj2_HF~# zE36HcoTdvk2E$iw7SQyPKiYlGmruO!Gn>As3+3nkY&=f=rhN66(~kW$!B!c~3E%y2 zSnn14z5y%Wek`zA6tX)J>UrNkd+z&XATZoOIqIPT<8QUK&F~FX^Y~yp{kqx~hAXTn z@!nuzc%Ss(u*%vuy6la=3vABX*%Xeq*cp!RP&PN5XlZ^QEF2fX(KtSeo5#-hLT)uv zKQtk?!YbYRX)qx?ZJl20-=8r+6hwRj$rzv|9RudpW|ZI1N{RIn8TWk$u09q+g83Q}CQh)CLzb4MBue}9fg58btlX!To-E>-*F(6{; zsh1OG3@EIAnKK3mlQAHQs*HjArt&)=riw`a8c4^0xwSb4Hbm9qgUFz-z0V}S0}89( z)ro~P83GhJv$mf<0}A8M0Ac5@1pU4AIp_A~#<`Ci597rPez4MDNg(Q(lKarw`0uFf zenW#jK2Sn@>C@0dc|-MV)=|BRz)+aXx~ZPaE~p{qLxY`Af`sQ&KV#*$yV{D? zgc|w&@=^p0-A56&mtlqN)HYuv-W!VHo+t_T0+Mil-&DxILzmqH3h}pe+2g~8F8g+& z=CDuwZPn16;^X0|KTDa&Zpv)e@DkY#hU`YW?4smiYhZe-wTbNAlFAkjs1#s12T zx6?Jr2{tmFt7R6{wItcAWtPixNs>)TS$1fy272uZdD8a#rov$nW|^`Wh%?s-l?SX) zeNm#MvVMGclc;jzu(`7L|A)j1<2049BeubY#0CS2wM$M@C)hyZZVHLJDI^vrNMdz@ z)6_k%OjF-rtNa8@a2J#=!Cfv7ES~HR)fVXrOK@o`oV6Q!39g|a5bXga@D9EgXD`x` za#p0gr_PMRS%vl1GJ}P)tp+&^oYgLSZ`opN;OungEQk^n&eA@Xc^T*=ftRL$3S&i7 zd}>iSDaHi5PAXdqax2Zo%qgw2uF@nx?@&VxWC41I7;2`Zu0l(vuJ(oO6N&-)q9iHV zMYMbBX?w`7Fa~Im6Hmw1hRm8&$|qYxrJUxaQr`C^Ja<#5PE(yErczFjQGEw0R2MDJ zs6N3qsBTt->I!2%O_=1<6KI$^-oG!22SLJX12OR!T&N(vp?Mx3L|J_5igtLdFxCc5 z^X${DlXw^mL0r3JZLkH-Xh7W*W_MGVElx1;P$yc__t(?Al)`P1A)op;E@6CvUAWyu z%S=(N6d4l671sMG493I17t}y*+Nb7e=7rs2iidxjt*jbc{%32-AGSo#>xG&y;wado zqEHAFRI-+WeK2FJi)fFi1J0-=X+hKHrNasJMSTF@#Rq(a)n5&&fHzZjvg{ebR+EL)xgpHnqc@;Oz5ofovQ+ok+1&TonQSm!B*tz&pdrwP!}FQ zc1-`iTpx%$iF?a*@#gv17ZVKuWffkHzi@kxSh>sAzxoc)o%>yf-lJTJ zV@sCH1G}d?26nS6e01Y)+6tBJ#`=-{Whjn|a9Jc%qQMA?gYEW8W4nf4t8}rs$Kgf&AT+RHvH~>CFqQ9oHxCUD=s}+IxH~x+T|{#JNQs z=9cIM^%m-%u%JrJB-CR;mBX;0N??-gOr9@wLwlI|_awEkCoWHu+Zd`G&8(ED?H$m^ z!g z5%vE2<@oe`K3)In|Cv!EeRI&*tLxE_L1VB!03d@#q(Gb_u%K>(rcRz+t`9TYK|Ops zkkk6Y8o|tL`^JQSagu|+{t1&O)Mogn84dpmWB4ab!oPW5 z%*`MW6bHc$Q~5Siz zxcik{WT;SKHCGw*)m09aJ+K6QDv6dUjRNc8K{6%NP9;-P7>5dBGE^qekSWQ-a9r3E z^T)wZ92Z3WVO$)y1lH>XWJ;BfygND!yGjL?9|!CzRapNx%%JHde_Xs+eh7g2Ow<>1 zpCuoE#&Z7a#;d?Y_-1XK2tVS8@yXcRVRd15r!=#bC8a^r&&A4cT63@b`FlYvtW_sb ztQQmuHv*C~##c_POZCSs1@(HNDf(2ujKqF4nW)>x_ZE>WtVVFPWc zpaK=z@{xqL_T0ULC$v>qjo`l~*t&MutMa}ruuB3n)FiGs_V*1dHAa4H!0LYyddtqr z0)^eJ(Eelh9s7s%mpUQ4fy9Z!p%0w+t=0}t%eXGiZ^^1h5f}Tw{!rdfn7$h7)bHVg z8Dp|C+Zuv0o#0dz_a#iX${N0lDI_5VLJDHf+^bANcFK#`;vB9Us-{n&d~2woH)U0^ zg(DYvU#KiXXv@f(&1nN>-1LPgtRMOBg+h^7kUKS78jkA~xd4&jeqCfwR_SN44;{ zRnYL5!YG;&e)kBY_IjR$$LcH74GM=MjNba(IMR9Yd-;AzLhodEpz$8LbG~ob+r<;% z7W3O5n+T6ZtwqKVBJY7E9uH4cOx}__k5udqPo1nK`YH^{kH8Xri|}Fc9?{dg#J=Jg zHYCneJ%I|7_o$w`oJiuVuLp&{8*H68bEn!2dyB}>ecVh}!{0-$NSpNM*`7N>YC?ef+?;H;06^2^3z>X(;-N&p+=G^jR z0yVv0NpA*CV<0IAl}<6mtv~kEPiN-NCUiC|DF~&yM3;&O$(|?J7U9(l2`?}sJYg)~ z%`Odvko)(A-Rc9`O;`dY3V5Rz#qKRx^C)WCa~F-mZiQjZmcYVp?BY}yQWNbGYqn-v zLqMPI9QfT$3Fx#hl@N6TYqtFLV9nMIHU{(~4@Q|$*Zn_O$Rb7#&)nPQFzjs;n6O(^ z!xlO+p%m0Jc2fvSDt|jVOfdg5sz$&?^f4LdC}Q0+w{|*=@m=)6(+OKU#hNT;nNdS& zq$SB@;@y;R{?@cQ7e%{dDa77JAI+W&W)Ca@U6eK<{Dj&dJdG&f6-L4nCWN0rLpZlb zA-jnZc8jSVPGR?-QN`|ybffGR;p~|kJdX-vd?zgNonj1OlD$lzf!w@LvKMjniAz-G zCz{MB;)`1x0QM$i0PKNf0Ena}17JdJ3;-I@0iZAr0K#MdOrT)^*rRX&5G4aZOvwQF zGip5m9=FuU>xKR}XaCT@J30)zG6a?Z0I5e|o#1$vka}j=lHj1}^Wy(PcXI+YM0KJU zqJ8z(gYSJ$ura}T?8(iy1$A4L-U{W-r?L2grk~zd4d=fX)I#Lh)+lX4p%fH}Y@lAb z`g`R=v35(Kd=#0kk5Ue44TY5@hC21vl|HDkvh^SD5u23f9tX-&odoKOnkQxX)LdGw zuLc#C8)}u1<72XuR~sLbT`o^g`-4rP>;3(hVJksgGrq&pFSS&I<6i^WnsM`LOSgI4 zk{_=ZIu3X05K2?8J3M!QeQ&VDVdyqujgq@}Nm94C_-XNCnP|?&hBz#eDoJh6&Bd?o zD|s#2-l21(1mX#HEu>(j$b5PH4pFaV4%tme?9)Rt00?8=As{In?l6@R!2TqF>x-(e zmk6$~EeZZ{OS`;Y=rPbMwKE1pyL|5dUdDiFm%SoAah)(G<1=VU#^pXHzpUa*;jKrGW4) zT76qU3EMmjT_?rvx<(2MM$87bxETmL+ysQrD=*#*xiytXd|r9EZ`jXN9waxR zrH*Zh%K}>1Uew;wG#^Ecdmh$TT^FE*+zLa9L|_?$*v+Oe%n-Fpl)z_O1I4Ln0u_qy z(KP+JQ36+3mpcBMx?IQQt`djnlTLS?BGwZp9-AoGOA3>jD6EEFGLt=$rhr~MvL=+@ zHEI-t}v7Y1ttMqZB;h* zap`j4`*i1!iFZ@@PWzO6{DCTrrXr%gV(;b+w&MG-cY=oRlfZY)#3A9k6>Qyf7&asb zO!Ab;qoGRDIcWG!l%y_Vs{W~o_xC}G_Y;VT%a*_y4dnm)Z?XN*XA*|(3S%urM8i|( zILWyUhUng!WSHGenPXDSyfv-V9qW;kOnb2nD7f@Byr*84m< z*3|{Z0WgD>Qq7M0ewnLeyI3-|3vBzFwWlp*^m+koumAhId{v8S|4SFt!gf(mPhBSd z;IIz(-X(OT8MXv`G==Z2mwr(v5b)gzEXdsj3*V_n9$vZ#@oj-!*RNEF4&T)Z*5fJ+ zUuiaLU6uHI0S$bo7rl)0U=syjfxS01bjk*6(*^m+kX+emGZ3ZGsvRTXOqo&j!8h2A@+)bhJ9ywHB1-AtG8$;f5gRR20Y;BuizO_xQ^vZ)D)AOx6 z>+=SG8cf1Abw}9(CyucQV<2vU6O?R$n^7pkoVKLT>xB;6UH47c7Ip8X3%d#1CVh6v zK4IHn;I?*&Jbkt`@V%AJ@SWnPIl-{4P9RVB*TXbLRLz&BM8ft2yYRiUw}frAf~l&* zFjW;8w<*t{A#AHcyZ$F8Ztqb?1*Cb3*xJs@w*=D@<-^0vbN%Rr@CJLS!%{+c5hG81 zU_}UTFc4n5gm60B8VFC}6sQFFJ;JF!cm38SFIQh7p59>Vyu8148p4nAX@86>gr}N# zc7;se9GJ#|gJLw55N`}A+9k?ZG&8WExT zqzk^Sv1Lw9dGXu}Oha`;)lXwm8A0*e$}?(Uy4_eWFR`5{e&uinC^C*jN)VU!UGV!u4c$YD#PtO;IS@H~!-{lCm@}l)VoMWeNHNWiQf^ zQdV@q{(N^BE2xe?iOqsKbv-Ich`|t>tw*NZ6KaUf-4qUYQ#dT<$zg)HZ$e)U8Egt) zh0Ek16Y7$KJa&;T`LL?60`N z#?<6+?$v-90HY9EgDCVDf%Dw@PJ+6@THZ^b-=l+im-zh94qF)kqN&zv|JOj_QFWN$ zZ~E-k_KK%1A@q8oYmEFZqQffZet#5LT*tyjh4m27F8N)=8MefKG<{win^0fWR}2An z0<+Lvu)n4*S3}^1Uyi$3yJayIYrij6mLz7rV@YD>rfp&VZ*xet4(%}Y%S??Qn!RRw z{~9U;sYm?1@uC(Ut;;JB)93kt-5^Ocs)3%<<{j$?$f#6&bP_f8@FFkVi6-x{oSG;i;;!fPo1FuG!(1n zbBD{sQ+&uasIH#d;|?EIp7>5}G+UdiR2{yX+B=*aReawPL!U(Dd^zJ3afe~@DX{Rp zSQNgCHdzDuKL$Fybn^Fcfen18Ksxt01oJ%tsXn*xUEg9oQi3^kM&tX0y6!%ze??+B zjTqkzRg4VZi4wj~pn>na&-gBnD|{zP`7T|)Z1a6fAbk=A@U`o@iSG)7_%E>Vy<&3s zzDFSSE|I^_uz~LsNP$Xx7bh6s^|`UG#eGHoeuJ%&m)90665v+?DFeJnldpYLOd`C& zx=38Z43a*=`R-6g)Wzffyy*WJ^WK6<^-`3#TUa#&^IN!CsHY_&Cf z-=mLumldKC-&+j*#HJ+cjSAoQ=%eO@oQdnc*62xmH_5Wqi12;XM_({ubt_o6=rAnY z6&NcvW=jsw-C-+Py}SfDmET};D*p`2NUW6RqB2>D=85{=;kBj6gm{Hvt-8QcM6F-t zP+s&&rZn0m*1*iRh7jM1Xei%Jp}aW35U);P4UE4Ytbw_~#t<(>_8H|%zUIrr^mNh? zhv_M0MPah2nA}e%UUMmzfFzdRS4|VkhleQ=Ulb%IF}1spxc-?TvBGekmcT+{)EO0q zB8zqj0d%%Cka)UtY6^+FDI}(S?jF_WZfzm+Q1g`%=nXcK__1ZlgzZV~yO6lot0XU< zE)OK`rlcQJ)3D#OVoqu-ecwNNU;T0<^wM~?PSw9A6%~P5=pDQ$)?P#;WvwW453PQL zwF+ZtP$a|C3-(BeHL0GoBALvho03^{QyAJ!VW{}UFl*1)HGweHU_Jj%WV+ngRv7m43rym%+3LMap*LX?koVXMz4yQplnpja1&Z<)AxbGPn&6>> zM<}l_77PV;>J%~AhQSc1wM!NZTj$I$=%x&VZptta`y2-5M3?6>4Aj?(2pI;H1M}IR z3UC<)6U=wUsg?d!$S|0!3@Hfv7s?YPyWqqLVZ4}v_+yWA?DOoxcXMk~{Mph!pG4q$ z<*619V{%Pv$xE<#LacEY+zgujU5W5C-*f$ty`|2Oo$9B#fL(C<)JLY)_6{2svWp`5 z+Nn^;KB}KBb|o!QD_CCcFf1<@m{d=uE{E!gcp(LHqNw(Z3%2c9V8aJg*1`RGOX|E{ zsOrh<5ofqK3~U!z*bX&FVIADPOH3ta*ph{#>2nUpuT4f@mZl|7WftyEfbY9te@$JE zS@=QtezSIygHRryFAv)}U%B(&Vc@&K$oCnv@ZHRl?}8%V1!R0TTU)DJp10J`>jm%~ zyD1dLQ!NChe5a-;hxE_Jmhg_IPg%fUOb)ritcT`l6SJui^pFjik#k^3TD51Vcq4L2)d3)QT zeoF{_776dQS8TmIIt+Qdz(Rcpp#o!KGlK@^i?c0tWH1o*+<9Tf$Ap^9X9(~{ev?Sw zM1NK6tG}>@CHRZH_ta(9Zwu@)0{Xaw?`q|>Pm28Eus%Tjr^5Oz|Gl7wAs`Ra1?XR9 zUY5Vei!pgrVMDpH5}qx2^x5>kUTU0CpfHXC(>;5o*ax%50*-fy+3XA(5+2H|v^VCP}VTAKEZ=4tHQujp3spDhwAZ7S#TO%d_oOT-EPE}S>%z8;k2bu-C)}c-%>=MMe;lJMu9L~ zVPv>ToxR*XQGJgh>RnqT5S;sP7u`VVf&eQhs!;;sw~lsLN_eO1!h3l>}dn@I8gJlmcmfVOnsn}STy`wT|PK&K6TvC!nQwD*E#~cLe1a{-;>*;la z`3W#FwI{6k4OmICWMT4AoEFeW>MNu8dOta#BUv4-&CgRCesn5-z9VTpI^;>^pI z0(-r%ruVV;fW12!suxkcR}@xed~jGtcC#r3z352;$8#syQ8BI z#p*4c^+jaChmOXfwZd?Wm%tLVv5H1v;I(zxf$-zv*+6VcuXB$Bv1y;{3iP>qnb%i; zJspkznqVtx_a3cKd(>9@U-8|H93J|Nw{Tm;&xbw@HsmIZiHq9mpV@!**!pE4p*y{5 z>F2jv+B~-LWlL?nUg)f)HO3486J>b@ufQ?@u&_p8R9k1z#MzKtbkvGhpBB(EXN9RR zW{S$HK6MvZLw|cY4GaB6jC_49|HFTUF~m=>rSv%8IW>jfG$#CRQ{nRc`@&^{gv$bw z&^@?Np?gaqeG#3oe+ffeL!qWqS!K<8sawPQJ_NHJ@Tl6n2^vE z#J$i~G(d^kqc(bYz(+77iP!|$vC(F0JO9{H zL|;Ve#OFg3#tp`A?FuYI0LP0e3`3xG&CtD@LU*d4XbRo;2%P$i{JO5c3LKkzgWaS1 zr0#9do#NjCp9=}yMM~_C7liG5WKaRg7XWsc3fb8o&uTQ+AN!oNB-@iM7ECU}kp_2) ze>pa#euTqpg)z8`lz8g;OEMILf!W$6gZl&eZ^5hZ4r^r2>*v zg_eZx6Nrh*mT>tZ%A6eV?=Y-e5m@*R;ZkAXyL!f23Wdwr{7?jJRQSF}xYXz72^S8- z60I9-&3Ar8dWKyS6`aK7Rv=tD48oYzC<*4GZ=N4; z7?;?39g0!w2>-gJWnM4zq0l{xe|K~k&RP^$7+?R44`z*0y?2Q`Wri&g9!;N@9v9We zd_`)mPB6l|ugFtwur=d5pZzvDyR}}$t(5CO7Ar#rf9`7KPlGYQYnKf00%L#|5XXRk ztnf0!5aM6AWX|h_COkh@bCIQ~M>;u5U_X#%e zy_*u?yXlMi$^dUpNPt&gQE9w+vF3Xpl@q>C65n+_ZgDSsr%re{_X#B7yTWP;6u#fr zFGF{;B?11vtzT;Ep;!N94A7Q_c=7CEb4lXsmPC5J(8HiNy=NFu2Rxj6F-V4ish?fY zOxWKel6sdYLC&yY7*HhzD#KupO6pG3a`S|JeLW~a-eBut(3?pz3`U`}zi63ZKt=L! z?qhv23>1bEq`)MzpHV~Dr+vxn&3uK$P~Kc+WQhUMw|CgE=pFQ^_Uhh#t1!P}yFL^g*^B#3noe)0T>k&SSuzBv?RSDLkJc>0f^VM;(*})*O zTKi(G-Xo8yElFAK+sY6iNamyj=@1ZG+e6?YaA`mnP4L{BV1|IgI87xiX*(7Jh&4&u zt;??BC&*8*VF+|n()Mo35D;IPwwn_&1k_ii?QgI#UlIN2=GbOgW->BF@@`5bXID~g zAt*b>%k4z>`?f-Df`r-xv7SC#VzFgQ1$`AQ@S#7G5EdJ(t0)QN3WI)RFcw?X7B)-F zwgwhc1qCWB-lKy016BT#tXqA>c8MG89*ZXx&{fdc$iQN%pwgAFSVT!4d%nX|NW4b` z6_Aj4Qv`k85<#yQAmwBAio>vaMPLcV6|s|zi2g_tiwOF1`B#m<{&S%aOu&>6Cm|&#;AgwlxIpRy>n>Py{t6n0lxa zIB&~e54JGhV5^}0z|*5=*j4oJv#P^)YKF%L9v|(n8Wu@XXidVqXqg>*lA{O;r&kyV zYtIeD-4phNsie>IxOF|{yd{fXFLccBqd(pq9fo>}!18TKBrXQS81OFZZGBukTgJeA z=TcEY(-(UrZiX`_B;>oVHD3Rkx*W&A;iY5xw*}^z%dNArf^GZ?t4hXQ=A!ukbmA1MXwjS;L^H%=q?*E&#nHvH(2P7buVI#vrX@kbZ-Up)8fTI z_eO>8dqh!ng0oHcm2@|qvM1xA`zVT5toXiM9Sbg<^=bD>g(wxZ;ALK>b-Wn$q*QI(k6GtfLg)20Ea;*6_`YP5iMIK|Hm9-jxYVm z5MX~NCOxw$M*J;3^en32L#HH(_zJ5~o`^3nd;s=?!bo@RvS3MYTKr7Jryh!?(0z{{ zDxheO70SPEyq=;~=uX`)Q5f5uXV^8#@6|NnyPCmgGaOct?59~1zMEpnN~Cltgr_Yb zygcLSxd``rah{Cdl1R@Y?>+MCbq>SYUV$aRV|%~B$awV(qS9<@2=LRLLypr;VZ6w> znAZApqZX+!e!cDnTP5R z&K_M=K*@3HzZ1Ssw#fG_5%nzc-=RZTBD}(IEP%km_wq9levgQ%UG@|#5xy1EM0kp* zXbRu=h^XoW&(L*Wv6lS?Tk-wSaWZ@#Mbs{8Ccuk4dN^S%yTdB1gztM)RJFykc6Znc z;n^V%V-%2`wQIJPC#)*YMZGc+_53Dl#jSZ?*x|JYSB8MX81f0r5Wt}VVoj#C+9gB& z1RFwrH+^3GpHLYB;wwYGIUzZ{z|K8~Op@~kTMdD8XOtNN6YQ;$i4VrOnMwc>^R+h| zM=CH$a%d}u%&nJxB&jD#avXvr#}SYu^%IIAe@kvXQz$%~yI35)D-4x;fral_G$1gp z0-He-MHAlDiyBS|`IK94O}o!jHxIE$%-ngE!JuB_T;7|tyMtjAV{w>heh{d8<{!$z zpfC;w!elVC+8gHIzc0amk3?%AVg9B@d*0G$uNUe_#)o6QfiPe6%!iK7na&FAsh2Gz z+1aDfdY3FDIL(sDSh?0@4wYA_`I@Dn)t`5fG#ok=}bEC{>Dp z0t$i@L8XKA5(K136-0V((xnFylJ6usf@Q|*%>C{=cdmaJkN(b+oU_YXYwu_0M2fY2 zG3@#PuA=GMTJ}wu5k3P|9AAkomBfi@$CU~Fj&<<}Mz2BP*|*0UTId6=oQv!Sgk%XB zi&xy^uEBtqejWJ}Jb9NMop`fu@tg(fHN*SP{_Ww$D0d01#qhP*NJA9Ja3iu`s-0EZ z<+x?}O+J<6=QEJ^R0GBXC922Y(>2WvS%@}Z(~N-In%x{EydPI!a}r^(xK1yoSduE; z967&w$rRzcY+gB8*SlfwJMa!kpS{wO>8Ud}J=xe1W4~<&jjD0o4$tV;e`5(UTk$R( zEu&vI&!Z4IaagA|YSBdG&SvwC6&&~&0lX0wgD5X`z0aeYqLwu@esQJN80=?)3`F+7 zTzQ_FwwBVWpj%rvtm@)%XfO=fx*4{mdLT+|cA_ChJHcO z^iaVpV#S8Gm$4SrVn6V7pDADVgm;ATXw~9^=3TZjgcM-t5#h%-&h@4%vh;^bcMc@l4W_b+JpU^D`;;)GD&FW%sMlC|% zP}I_5?9y!QGu=|5u{!r9cAQME6?U8y;|?0Ag^HM{Z07YX-mRpLj^|L1R~{g-D`2yH zuvu-9#lS?@Q8(k2)pfN(VpK!C3yu1OQki@jwZyiHm#tR;HA8%Gh3L#ap3c zDHE}u-4ulatC#gVzp?S+pTk$;*{F7QnHTgPs1PoQ@>S@lqg}O&*X6Ho^mGR1+p=nx z?J$NI_yGJp+W^sgq1`It=~j_8VNjcPro)e?yHGo2@eSNeyF3&{?glG+;oP@4X^qk$ z*XiYHqz4NHLE7C)sE`9|H>!FDd1`ax7Oq8}NSLlzW`BNW0JTv%>adtN;=n);cQ^~` z?^>JPJno)SnB_}Q9^*m0{5m%zt6WfLcALg)dT^P>Yf7?ldZAfaabqr2z_x@aG%XeW zL1~E-61(&oB&oDvb#UG9Ighi`gu?PvOq7yM+3ULgI%CAKM6Gm`UxTYj7iMM7-4`3YJL}qv0wOQQ3t3KFRvFre$Ws{0t*8v$ zm|%DfuE?5LuBA;XGma>4Upl|4$t&isGZfUg#YJ7flc(n3s;3^06D6WKXy!aR2YyNU z{>61z=Ubj)o|XmhOGJIU9Hp%Z&ron?Tb6~(V8uhHRm4XAnLBmH7pA!ABg>aWUTLS4 z=~b`1j#^bR2JvXMj9R!jQDjZ>)ozTKS|T#mja<Pw8eOUq=u|aYxZX z$J$@T=Zy(_b_;k;Fc^)r8*2j~9~U`6^c=)&;5k>Xc^O;_T)eSa^A}N_4src69mh zS=UWRv-9}GIW(ZiZY&w z1+3oM@Zpe4L&|hV;?y(yGu+UurOlQo^$n|sibRYtDPzuOW9O<gl-|qK(e@_4tKZ$eFNE%LFsS6_ehS6;rEX4VtJ4x@xnY)%F4R#`rz( z4~WxIY4;nw8Z~EY{5PZHq?$^h)~Hsx>P1i?5o*4xD)voB(zJ2oDi5!AK@dvW(|N+S zM~say>O_>WI=5DeJB?0Fov|m$V(xVAMD7h&l6b0gCFQPL6LGQc&9uE!m#)Uv;>Og*Fqvt~r`oop*c!ym>neqYqTrwcNE~Tbfnr&= z(2~Ja&7DVNSr&?AxkBfs9fnp-+MP=iatE)h%o`9QOs zdwHvuyS|zLAgUPYzS6#e4RJW{i%@a+sTZx{@a4Bx!d7z9nPi5zFN$St4X!3ln|5TS z3@L0VKnhsG%oqEiV2=%ZsxLq7ltfEGfM|ij*Pmv4!AOR*cwKRMw$6FpY3J%bUgo$( zOub>>?yLKBnUTN;SqD~tJwP{p?t@YWX6pnm{Nx95mV-Tr^nd1qglZe62g16hL~v(s zcl%1e`&YeHcUvgXND=G~7vAM5v@L}C*hXzPXN`OKNttwfzVUM1+!VTod~JNaw+U%;%VfR4xzKXaZ(aX7 zgWtN@^?1MaiDwy1Q-+!-Q26S+-MDa`X=iBWR?lkMv~!1NNBjx&how#(q|!vKqqug( zEhd0tEy+o*ciakkNYB%ebO6C6S&bt)|O%YA))pF{U^aQK@VO2oQ^obj~2 zP?NSRgu^95dThxXVuL=X$fpB4AOiFFcBjavT#*Z}=MxEGdJsV(<1TngBA0L{6oEvE z3*LQEpJG_^FMY}vxroUQ7Xhj7X2(YZocvDGleE_EfK0^;NMDu(pSAjf3u zQdeWHqX{UqU&7<)?!^7EXXBU^hhm7AICo+QppAcw)k*)hxRCXiqG9tm8tetWfZ$K~ zh;-8|XqlS|i}5Ft*aek8X0samnrU~k-NgC#&8EMpU;hNey|{z$v^0Sfo#%J|Jr&9C z-d@-kHhVi-4OB9-TSgi>#l|s1OaUp>_pN*acu$w7@!f-MQ9H6FuN|-lRLT!8^WUu& z{@Z!{hX(t%P?u8}B(r&rNbtIewYziQI`}$+@4D^vc;9u`>$-8hoOWW7jwVW5PC|B% zi1TJHs&89tb*`39S4_7?qDimAemuUZ!P>fyxIuU~`JqK|4-62+!g=u*t(e4!cPrf= zV|^=0>Y{?b3&jqr>{5Zh{JK*bU?lp#Ns<87swVxW>Ic#QNbvhTa)^9Tj{7H~;EV3bFPHu&B<4Jm zV3*0dAML5U!tzpcchCzWarl`RO5$+Q7d%Rq6haJ*!g=hTrALbAW(2;8gVd>d1pp45 zf5E|SUc|KC{*Q~oKLH0+t~+wE^t(;RKbDJG{6I&+{8vZepU_d52cFwKvcGxGa&e(| zkZ8WTO^k59qfLxxzEM?6vZXG!We(kc&LlIqvi<&fyaMzgSa3jay9d*fa|`J=0Sbq% z-{Xc~g~Luq=P}p_)rrjDhyQ-d`FA_6 zOGj+C25mNcnMbyHnJD+z4dye3 zJMOeQk{N$(JfOGh|LZm+5~7;jpjempawP`{vtoz z7uuY^M$4q>w|js9Ga3OY=h8A=U#VB9Il#1QO5(@#Iw~}}3iPD1b_Q;M$(R2`gXQN( zJwBRz88w|75qs6}g;tg7R^H8~xl2bLpC?#}e@zfS8$5TgBY6Aj1-I4Hk@ersojd?$ zI(}!Gfu?VXBmB`fgBd4*R=(x;Tg)d5##C5DzS&wg0!BIdzCYVyWKowS?ewjF2Zjp@ zzdQWVE0Kv@jA}-rAl-uSgaP{AfrJ5u-gt5MGcW4I-M7r{)IT{9Q$GDZ+S|HL%gqUl;Uc?xFc=enD`J6lxFn#jwkv;dfga4eX1G4>{r?B7 z5MJLMaxrY~+xY?j0IY#QoA1>yIdq{+J>O0d0rkk>@64tciqK2u0%JB`6lA9)O!_3s zerL+2)=4zb=-WYwCV-*7JE_DH5H#;X-taSFSy~SQ_su~dZ6?;G$4hp~(NDMpOi27Kjg3y5zaa)lA~*X?Q1Dv< z?tSxWeP1C!T0KJRU31*3oog|7jduCk%$F#z8jRo<*_?b%5Z3?2s2>_}mR2S#2If{K z*hc-n+N%EJFb%M-na;;0(cSSY&}R7KC0~tlV&>=mmd5^jLJfLva!3F^#PFi0&nl~T zfp8w#hCI?+op6@Ew?j!wa&jowYCSosvC)xnawbE4QGr1rUO`u3NDF;Csn+AVJq?2D zmj2_rh6g_ZTRe~HtVukN;GbZEECqkq7yFVwAdlTKh5-U#_w#=i*MA;rfJp^ZK@SS~ zxOvdYz0=A0`i9^7R2x0v{8C#SxR>)JD$+5Jy-v_(){U*fa0!5> zJeg4~iWS8-#95qomy;NAgYXd25`=pILye5Pdow=-=}}r+H_Hn(>N|e@;gTJRxihNz zA0W}6H#W=PQq{b;qrLqXLO}h06$L)x;6Fh@0<-R0h0Vx~1D5LreQ5=y;-#P$Ais5+ z>!E(@s~uOuHVe~Z&Q4D(+gV@3`1u+M(XD5s41yY1-m{5C{ml{+yEj=YRlW z>Mw-ArV>ySs?jnPe^KV33#qZG#-5U%sYZnwqTOFA0Ga_mP-;W|9p+?A6+7Vm<0ZR^ z=2sp0vlIxR02FGu4h3D$D;&_tAE4{qPS_TD;V14c`XV*HAg~E)JTNE26BRXkp{Y|V zYcK}*QO%f&hZTkBN=<2~9+DSfo!Lv=@Wr4SrfLQ0w!CW=)3ss(e?6!6$4U(*6{`bG zw0}9V_{U5B!A1Lz66W6#{qsyHxMdz{`+Q9K{QC3xg|-Ev`PPK9M|#H+&ThX@GeVx2 zHBoXaF4Lwm$Q*QA8(e(@JYTPKwVV}QsclIiqaJzKclt^%?EaDqrgH_`v@5V$-vkDj z#dm7_SYqHiE`J=RHG-q7y|?|dcKrIoCEuG={8dMO@eN9#&HKA8D_~3!3Dp&Vqfq)o zbAC?l*PZ>GAlD&;^CNBaMDx?B=s87;^^~ZTu9U)*jg-)o)|9N2#S=0>omMSqw+#X0 zATXx*{RpsFXZsVKp`DJvuLAD}r1@{c0N7^?DER{nK&JJrzFYkU1Hu3d82G&i_=N#G zf(G-&)W2nW1DH6NvX0qaBzTG1*2KERFf9(-`vq|>-Z$1A9a2?IiKeOI0QoCg0EM0x_@xCtKfnTg@ZX05NcdkM0)*f%E%42x;qMM; z{av-dGIVENpzo*W1<=KDyeZWd{(-B{-BcNJ2K_9CwLtE zk5+DqG~cG(8+4Ap4Z$$}vPnU*=ztDroXxD!K54cvMLLLyFEF-%e92OtgiKPNoKK}j z;`2dRKqQko#KS(10i)zcxRvRa5EUh zY}6mp$5b3~j5G;zT{Rq^4t~49q0g80>A;AIf~Ih+KzhL_@bk%^4pL7@deExwdjDTb z(C+;+7E8>p2SH1FAlv5z-GRLYr@|a~@L7vFTSFyU_)^~_(-hRWqDP!3N=XrwW%YF^ zEL1R~nt;BdT9S0!7jDHk2&79NW4*o-Ax8qa%fuEK&0&z}9{?Q4Co_#X^;0%HlE;M2 z=lBUw8GrG_-`X%R8Ws|*BS*fZx)ZVwq$O+Ti%x`#JOE<)i*TqnlOq|b-8u7fl>b`t z(-ymaVmWKkYF!u9)~r)n{(Qi44s1w7>)zkpQ42;C6xVNBKd@eJYn+H!@rh0y+38nW97v?_5+BF^uGLNv$8u)v@`TP+sn zle?h=6B8YgOclY$3`Ru^+)k*}DD9xvpNaEdVDhtA3LpdYUqXf!DuXz;S=GfS@lQhA zO{dKn(YU=Jvb@+23?iNm92eT1uy(zdYS6=|Ih!URP%-(^fn%Rg0SyoJ5}YTk>MXygf z1B%T~j_|49cpnYoAF;bNs%|o78!>*Vh9Mb45TBTW>{)#gU7@5MyXq%_@tG+wN+X(y zt%5nFi&PRpzH_;37=?0YT=Ap&*~Ju?z~kFF^RpiMwFJ;~KWDM4cMzKc&B)mUB|YYa z>iMO?1J-l;hM~0Xz1^Wh0#rh%6D!U1Zp9vU6~rrVwc_1)wz)j-iOK2Wqdn?86b7>Y z6%0RX^S^Ci1HrKNSAqfh3mJYo0sqFqpzZ0LhuZS8TJN1#oBy(=Y?f?+G2Sx# z^3;f~y-OXG1IaNwX>ISifF}L{=W)(o0`g~d+TT{$0s?Z)SrDGZ@ZKbAK&Nv-zzLe; z7kt)g&dd<_o}2bKqOeKO{r$vp!{&u1Rcp_g^3}Xml`SsM6PWtY^J3;NmiQ+D17iCx zvBa-n_y+|;pf+l$xfzVmEAJ6>D$V&yKvEQ=TZlTcSx*YvzicsVhG?V0L^e$f$s6o< zmx!3g5irwk*wL($=nsNeG>Y{r9-OCS^+ zvK^0F0)C2_VK-0$os{ZKFiI7<;S-5UUp8)nvDFIlRRk<41X&w+kd4JqLzsd=Ej9D7 zy6vJ6215k(SRFN}7RFZ_%7@f%gjKk>u6#aqsOGoT?Cr2Irv4B}Y~U>BRB+IeVGw3D zLqX8uSme&zf4K(M7Xs1t`Q93l@2|O9huU!7Sj%ESbb%t5%nQ4AH6Kd4MUk6bEzlKU zyMn02PwUI3WH3mvWFQ8$IQCAfE|hQQ0_&(z*~T8asjX3|lWCiX;gc1h`rmE~eW!?k zQ^=yhf-wfu0pZ=N0HgSw7=sfVdyrXIOy}3Yz=1n=i8dl(K43(g!tWVTh-SwOpRI{r zW0ec6$tn2G8Zn;VTho#Sthx5RH5Dv>w5Ir!bta1LTMz$W5zVJ)_{|p8kxQO3CwE8C zr+Y(69EQZ|_AS|)o&4?GPr-z?TeuM>+P<^{+SX^F1!Gg~M!N$KSWcu>yTRJ9+b_Q@ zC(9UH0X>B;0XtY!R`ct-k%KCx4utC6tRevVl4lq+zwAq*Kgr1&39&O9)b65&*pIyX z{1R6XG=#t5aihXUJSz%>LV;ev%5B7f{}ZeM4orbqNA!lo%SE7Le5@HAOoC{m3?eYL z5YvsjTDzf&$tP>HUCK6r99x=HN?xkl%%=lIvF)T3#xRiXlNCHNz$xZ+pA+g+RQzzw z7paReX1w%R=!&@rm|Foz+1-Z6gn9UHX};U=ps-qosV*mk_QQ`k`(?)e_r(KhbHE1< zG$(Doiw{&*K+XAbv#R;ansW(SYJ=+6E;?O1wEHFX#MnmF+4zT{>20$Ut?@klqOV}c zF&Ze9mH(3aNDIcN@cb21?&g@;!(&rIgXq9@S;0gG$bx1lYJ6qlnXv~DLTrPdLN*4C zwAMdP52%y;SKj*Z8UVm|d-fxI{d7v1A#A~xb*)btz60iR;X5$5^HFF5*hz|Apn|ah z%Qu$=nBqSa+i!%Cx&g--6*O8dIEYv~2ni*i!$G>^qY<~>Gyb+{Q~0mpvP zk2;Jh#ox$k_R%s#?0#|YJqhONUwO7!k5{gPM4+SSBQ5~wzbhQn|J87C`NXEb!r?y` z4xP?a3qVt{@q&CLRES1 zZeYOR#P`tQGgWk`q6x56S4166wdO(bQ=f1F!x$go7*HLNOWWjQF_=IYZANP`K_IQj zew2cUUcRrf|A%Y7LKA?S{=RAZAEKmLSl6Eg2e;jl0V5uM(+y&|7XaM=sjyig2grkf z4?qm!F^Kz!4nWKs=2Klnh{>rfK;c-U>A7qZ(AG0)l3O4ck}zF$qWqqYob{94Y%y824G+~R zqBTV9excH;ZS`??y&;?{8?;D-E}HB|BB?=CRsk6Gi8PGWjnNgU`xo#5Bd9(KNeqm~ zfY4HXF*zgv9r#gvxTQWnJNT9FJ-ArUvquWz*+^FYsw9jz%F zCvZ`NRoliIMV7-v_=N-r{gDDM5Ldr>--lA0tkid5_gClFw;NF-0$xKtG1y=#D^eIB zW_aol4|PNsh&)#E#9k_mO{RLq*ERz;6dVI7AQ+Nco}GW|yQn%WV!QIF(s^_eFhfnV zGZsXqWU~%&cq#7`=o(n&K2(*(2b=BeQeb|L*xO#!zn7BVf_CMMQz0T;Et*9Ol>MeAhk9k0aMwGw}td zC)Qs&j!eGYh;y@aGv9i%nm6q!R|PG!fzVM}(ZZ^$EcZb^`hTNW0Kwfq`^?dZ)w9$S zRbpe02mG4_2!)7Bpjd^Ro$Ole-E$?g@oJVw&GMJA~_szq*d}36Gok!R( z36BK&8hw1wl&+S#eM5+Jr4JoNn?MHIt^j#EHi_{c!c8Q91Wpc9>__Kq(I$X;=(N`j zw?gtkr4U@rN>M}Yzo8vE-BA{_6;UiO#WJVZ-?IE;`XX3yQPH~0;7GPlS^g>AF_=QS z8x{Un&!2}uhqQ_qh*m|Q^CRu+XI^~#wc&)bL%o5J?Y!A41-D{Pn`=cvYvrjrF@q1m zr?xHJhQdEAp6DO)^Xc1<+~xZ60s<)hhT9D~O_?F9At-l$EEW`B5Eqm1fFK~Nu#c?p@)IR~$q73Y`iV>b=gv2)LyZ9>?echN(~>Bvpf2Go=64HEgA}ekMSr*+cQb(sRM5gK^D*#NgJKaBV!WSaofAf41Wc&u@ zLfyXY7HjvS7iS6Q&92A$ts7r&Ei4$-Ma`AiMCe#CXj54!dM-XgKZA)u0TeS>$btZP zq4krD`|SEG%j(6d^0=!Pue2BTZDawwP`Oy8@G;?mwqvUySRw0C=z)q&&G^sgjuCYj z|81^0-KgF;whP@cq7KS3@JXD-b3;a-<(wL19M$@8*QviW!sc|8wW} zE}=>aZ9w$zoxy$8Pv&i}fBZ>EarcUR3e=Vj=OL z5Q1F_9_@N}OIk41!rEKm-0+Hl3JAapI2Z+wPI#aR1)*CpcboSR{m|UHg$-zT47y`P z9Y$U^WEHB?Rw#Yc?mMCm!8$$pv64YJY##q!EmLD&_f`7phJX0T3f{Xk{0=94rTNj~ z;eYJ>_7KztAkCJ}L09_dxh+G*-4FLpC3w>Gu4H7uxcKpgTs?VQNuu`J>(so_HU%`;MsN zkrDhT&L&eEhdvfzU&%8akuM6~MQmjF?T@SgFv;$8|BVyAERg>f&v!yJ{zVnXtHsc}4Mby4Mdr+u7=$G7M((+1c z3S6?SO~jm>H~W?CsBRG&D;7Nn$_i z`?}dNBQH@yvj^=L?u(vhw`!}N9P?W{KV9A+R7lTzkL`SMekYz`wH^1fCAC*_-3|95 z-&Um#>Z_V5cI;=tKeGRP5Gw`i4Vo)k^n~|~y_d2AJHw+2Sl^UArd*BDzMNtjF-I>f zvsRJGCf|U^QIJTy{`M}DQf_BQ=H-W0`2KKdrq^XjzPbk&UpRMk^N7m4d~uR9PmFi$ z%8D^euB_(XqX*n$&^x{o8Y_Y$cxRM-79%7~Ofzkc##BUbiA5AUOskz}$KEC^F~#Dy z73ak@5Dba~o~&!NHLmIYfP<@RNDWWzsGfTM35f>%y0@e=&Rb3Rj?%;qR;Df_rTxPD z2GeaX=4qy43D+7obco8evFExGPVqaZ2ra9AxJ8q3wrg1H$-31T?(NX`wWbU)`wyJf zx&fn7Ly$geK7XuU##L&zDkLb3L6QY;)W&JhQ^n7^j6%Nh^$o-OqFS+{DR{*Dlh}Pq zliD7xMn692>i1Ip&g>)SY|1_91bZe4iNnG|j@K87SM`?$B3?1YMc^1^c5ErduMpy{ zK(=n9Vx;EnKG4APG|7kTrcVwY9Y~DAf1OoxaZ9`86|w#iXV#MaKJYhgw&<{XRi19=mZmIGyyV>zyNk3+OT=uO1F)c|nMMz0A45gRYc!PhM!j z$_>8DfIUvl7)76Z)?^`4O;hr1{zWyKr#EFzi(R21=zPgWm+f=Ty<^?6beBUj5 zcAveAaX7Ucdz41Gln=mkz{*K4H|(bQq>8LP=(~`*c6Re4;=#0o>*HJ7>hAj?i(p+S zVs*AFJ|4K$wZ);crzfC-#>UWwA>`6P$K*5|s*SYahPrR9%|bl4H?x3G>*LXdpysot zrcf(eTU|qt&8&7ANK&Xk$R@7!SYCDt%!hyV85by4`HO7E~0rs;VtI;C@pF-9X zRx`uj#9u@j_Em?X1oW@=Q#djk(EY&?v2{U}zR6Ml53L73(N zn@N`Ql?c$r1R0w&aLjKj=is=~058+jf{oPwX>ovP<&PJkYJ$zhAjA z9qYlXiZ1FZG2i}IWao7FlDR^q)3GlY^yEFF%e~4)oYwo2XoThbaC6?C`<;|=i<*!9 zHF`6Lt+yI|ULba^(5^%Z1!z}wQ;-9Eh) z%xE0oUUU(QF|YSgh-jdHxxpPAlTK;B2<(vtiJL*1E@wzM3?dI(-I|J^zekpaii*`( zql_~jOX(tRJka7lA!TpSDLvh{XT~4rg*!=wVf_}ho$0~(5^C=_F;Z-^eBEw|v9xTjh86t{e)dhaCJz1{>(#P*F%Jv8zpm}>8)nGWx{i^u#f_^FlF zerO#P-(IG{LrvH8{N1n&bu|!-qWg(6A4|r_#+6PlRu-MD9&eOM^1m({_Ou}Sd0VY= zByj;DE$kAv6(?~p+4O2LTXA;ab?!Uk5@d8n5z$ZRAX-PFybtX0I(DNw zqaVlr0h`f*5WCmyM$zW{`FTB4xYt(ul$kic^&P z3>-MyvlNzFQsSi>sAz?&C=dJkY2ff&c(v8z*V?p%SDxSpUFgOFKeaL-HKu zq41t_mCgHEZ;`yCGu1D4T>?Q{D`}p4Wmk_Vz2C3(cAowgYwt|H>rju;%55k5*+8t* zBdQ7dm5HV}{JHwCC0<5bxJh&2+h}=N#?loYdt*1duKp&YkR(z|Fs5T(x@T||dut*x zghF($*cFeUVmOD8*#irUi{nc7o-{P;#2k%l28A{8Sme~bW+zCh_Pt4i;tg(pr>nB{ zpupX(-lF}Q)6{4I7V1oZ4ms>4n@~<5giC-WkFlS^BRklwdjCq5)#SDd`zp4ubyM4s z8V!-RxeQpBBQ@LnXDwr-;}p(^!FXgvuf*XIh@ZXiGRRwl;)&i7thKx3Seg+wE1O-W zs)@uf+~>DLjTp1#n&zG0^U(Qqzk!N5zsiV^K+3P+06f?e-q>P8X zRsu^g%1GNe$K{2LV)}^IVNsq1$3o^IUK|g(qz%(hrbbN%kzOhkywoGcs~G>#l7b%g z(0wRAhM{Ys$WwdoKKi64XEida)((}c2qY>jZoD~GG zr7BuQ=w}j9L{^5$jLcLU8j6BdB~K{D<58QQoXgCQ%i^=EFJFFspz|$3q<^4gi`&8_ zExe_~?)jy+B73VXA9>SIe+R&yw4%E%UensWYfmZSOQo8pA zjc;#qzZhtL@A>|Cj>#(<te0p}b)2hqe7nMKBFM7YPURyn(#w$)2^l<7s$D>z}>l z?LKk#c=Q_rRstNuk^n~M%ju&j7ly|yK%SX0-04hOAmzAGIg@=ooUb`TvjqzDHQVk0 zSDf42cIq}ATa4RP4Sz9erpyy6JhH5E#3G7hyz(5!qiqQ~@o@|@Wm>aqXGRNxERqn~ z$~{A!`ueFCakLcdEGYJAb^wvEf8oRAfTe_^%sqmP%nSElzUWGeW}jSH#JklMJIHd7 zM-;EkAu`KCwJXo?@zlf$wMJr!$BcZ{gXI35fR{|&T7NbEQsY)Vw0|fuI1Gtk{7%HoYm4zSQ`M60yh{bwYBg$q)UtE3c zKnxIb1v$K7P5TQdKDd_#KjkiY1Ge(zWi)Ac6Fkq`L#?`?EaP@4iKH*`AlZ{kN7kDJ z2$&0Sqa7iSC6PGPt$mRYbva*c|YYt zZ^|cmT5i~TZjo)>sUExrHq~3e5^v0^yeFc4j{bG~WrDW_&C;zViORU?Qsg|2&H_1) zhyBf30qS5gSO+dXpqIZ}UZ?+x9#iVLrEK>R9su71wxwOyxgP|RS7X;&-d@8s`mpc{ z6{mXF8b^kB5RddITLLlPQ+Wqo39U+2rrImgI9EvU`U-$6mcwC5uQKccFbD`TS*p^E zb7ySR^lmX&gp;Kmtn-#UN!wCLE6*_?r6E^z)<-lBYY~fv*~#mWK}o>WE36FnSQIwf ztXQEXmYY-43buROalE+&+Zl^QJ(Xdc=cc%6$_$|6)T+vEzHeXvBjHHyuc#yvXb4av zpSy_1*E$e#jM9S8NiNAfU=O|9cI#qRiKxQsoP#1w=a!1F_mKrM4U)bzNMrHET6_w4 zkV8XS!{>>oDF8b%OGzn)S?2pSB?Dvp40$}V0U%DK#iwkrNTxMGSu>}L*y&+u%AZfq zPM5MEdQsI<HV|j4iDZuxQ*E0~4hc0_Sl{d`P=TZO_qvBk)l=>K}FT_6fLwRwgAY2$P0TzZ&{F*mbpRI&eqBhQ*7Z$533)Cc}}E3Z&Io<1419`g`t zd6`LC;R*PWf+%MfPR7`!^x$(?xa6pnbkkbe%3`I zU%O`R9e}^v14(ybk9pxuMUUm&WHlewk1%3-4i>wX9!$PUd&S`Wi*RI!82SqbGUoe^Q zA>^+jRMW)`Dw^hd>>nbx8nMoLxPKpMl^c1xzhaI3?N;y}aH%4JJ+x{yM;8^ViPCe+Y>$HE|X>o!*c%d8M? z@q=FfnRb5-ocEBHsq^fVUL&Ux_q^fc;tljcTu;?3k`OktRyO2#krt%!?DXF1UK*S{ z+A0l^Bt37x-qSE2?S74;iEWDS-Ap3NV29WUNoCbtIt7JXOl{X6_>>z+DrAM<+LNUA z(6ljawJy4pD5@h)-K-$#%Jl6-HMfBJT(V12!LDf$QX}WDCP<*PamZ>eAI2|ib2Rjd zmgTRMy~F0ZL{nq*j8&6p_S71LMujdZAwCneHeml6t>e#}YiZZK_vDzRYnMrlDNhGC zzQiM(R%B`m+ceji2d21VHHF@|^qgIsuRTh0D#a6ybZ@uAP6#&KX&u%F4L6k;9){iO zhaHS4wnf=1UTO)JBc80q@i%)NdU?Fo=Sf39%ql>v!;4Sf{-~!3mXzP&jAwc>(Yb8b zpIhBiHtTv7jPo$iocw@1?J3|LA0L}N)!^3h%DdoHAqksYHKLVKOZJIA>FxVStePkK zSeNZ6wSk6TwUgYvhlKJLH3mTuz9X}SaTHBX_iJi3uQuN{&(<$G!!`+ldww0y$zDqRo70$?1~lr3yPijl}tJQe%_U@lZO)6Y)3S z$Xw*)?}m=1r|W~BF|he2-y$56SyJ-N&aLM1jF@Xp82`{NOH4Q*r2}x8Z@qNJQ7bau zvRI~+s?_LEp6q@fy1C3vfqg5~yb>j>c8!YH4`1Z60s-M)qRR2+nR=Dp#g$w1B<5^K zI&bU|PhDdkd{!pR8B5Ez*p<9j-S)0pl#zhV-I$G#Z0=cU-1t!Igt9P%H;EnmRH$*@WS@QI?Tq^P13rXIoaT~;V^5L}a#qph~dYi->Ow?r* zc_qE;xQ9&WOUf{tfESl=agw?XCt=C@98-WQ4I}O}#bqXyZRPE(N zty$#{e$68vsM~*8Z-_GfZKuizwRg(H@lMW#9J}x)U&DjtU^fZJXEef8dDu1P*>?lp zzw(`tX%mvrzL$V=M&I9E4m(`-I)zq$d@O*B!agS#^xbuFnHOZB**%YA0>iSZVo>US zILmK*+Q;8VEgC?pS0sgU>b45l^-iA+tB&pOUu-A#2b+Z8Sj%{;5c@6 zZxVkt`Gsuxsr2+P;*)$VkGu{rvdmVNB$d6;!jFN{v8HrHr@`vfRl|EYXjO@ZN#03; zX7+NWRAR?61|AeCTh67k8sVj8+Rv>b$?0fmbnAdGfC_O7npK=0@$WRNIFK@}yt&u< zDJ`~n$93o^3&GdZWkNCxj><7N%pD+UV;9X|i%AIb*Ia%VI)9gwea;43-svu6jr3ab zzA?#5Z>^LL`z$FXSgc8^NmSkSbdxv)3BeoB6TI=t0qDsHZ8*X3&D(F9KaJ{dU1qc0L^$euKcU`auI23q)W+HNF}|BD*$as)`iEWBg3Unek|WK zy2we3?-Hep5mcD#Zn&;$>=7+VSxTkw9^zx0r(Xe=d2jWh0pEd?CvB418~)kVFUXxZ z98VllaCUmym%OE|U%XE>hiZ_$ljCT6-UZF%nt~cqsbumSvga2gY*dw}ebi2e;N6o@ zRam|OAeO9*$v);Bkyk*Sx3NdLS(WUu`ySHU8+)+*U#Hjp zWW0fa%UE~N`z%PyO@a&VS-lSIzoZJ4MtoK((Ee4RPo%}JP>9Nq-Zz6`&e8hPW2 z&>1#xQ1Ko1mwaLKHkWL9C9q@fT`sd-i8ptg+J}=FA*C+^y`b4vZO%q2bzTW(T(HN< zR*Ua`o)1Y@NMN1G3~40ds-zI*rSKji4yxPtu#~O=IMQz@bF)b45mrLv$xrlhRW{?r zy~Daqwm`M-lkZJqp1qQNn`GY;Qom} zgaP0)xUu9HX{df`d7^u;?m%`L5e+M)1RA1hi~?965aih2TEhikbsqQFn-ecc$OCj< ziAq|K-yo=<7*#l%Lux)szYni^3>?tb?zJR;Ne={s)*-S!Jd+U;vOZE@uD9X~*-KH? z!%u_eRWb?hTr@uQE}5?@n^)-Gvl9&#qMT!$i+u;pHT~U_srYbuDOxJ~aeP}?v6x#} z_XPqqvsMp2PSWH3-Kn_L`!C?|;~A9a-zLGyH`TBhE4b;can8k2Bl&1~92dqK87z5W z-mI}RKn|oiowJpc_d0e)T${ux+S}ry3U7kHw_Ni5a#CJd4`JZ#RFpON@e*GoroVZk z;eF$>JvZT<3Bpye#-<-Q1S+K(5xTGd1H?mjR?Bqi_lngGXF3v!IO z3n54DJ6U&=iL&6>LhIq(!qHIOli=yGy55IRV8> zX=@#TmrHfEdawNedvIdD`ZVQfO6%DBe3Nm@`0zadJr56h2{)hNoEr4HipQRHMI{vH zE}B#JW-;|kT`ruzbpX!9R@o<(V43w(6t@;>9%+59W^vUHZJGft<@HPJ+7SM2VC;8%?xAa=d7$? z93F%H_YPjpOLnirs@cAZBfV;VU&(RCXW5UHN#?+Qop-QH!4XmkvVi|(n+Rm7b1q#2 zjVR7q9p`TEws}AEG+VlFf#RVKJM>CeI0X<^T!kiJA?tqE+1~sNFO}E87w@^=cAa&y8d*WlJ2Ju5>v|F2eWd z6keVWo?83n8kfF=*puPm#&vEhyxXK_l62|rM_Bt|)X#E`BA|Y74qKN3)}_MdQ#-+? ztjwB^tY}Brz&ruO_dINs^A= z+5-Sxan8>vbEH&?RM7+mWQ$XsH`Sxx=X8}8ZJX(Yxv=!ue!jR~2NJNc<{-0|Y*lc9cfjy2PtIj50a2*=#wgX|r(%ICavG}`k5|FH+2K@Tlrz#553==bb` z;Y|bLqJ{PUv2>MTO@D7+y1TnZ3P^Xu#^@9Qhjd7HgLLN@DLqm`LK+n50n**wEvcyg z{hsT2advHcadxh=``n+n&pBUxHSUH4U6b7^uFrW>K_hEMkgU@@ZH`((V`C*gc4WTJ z_z%4lH9=HvZ0ZYwk3~PtDKc)Xd-nOy1h_Ldl&~_;OKk(Gz zt?gdio>b*$1g&sakSyv7bz~fKBb>DnvIjqrYrq=twaiE}3|kV-i;9}HtT*jz?U|-+ zeX2$oj4V92IbCMCni7Ayeu{ci)s>F`60sO<$;aOXD8a}hXTIt! zujo-k!HZGx@qFqD@o-#tBEf9+rGOgb5G6F;%$@l}wd9xCeYV0JM}%HIv}XA|0gBO8 zt=v~7!rWK=kyHrD1>!YZ7HNSfdR)wu#Dck1$%Cp_k?8aOmPb1c?e7N`TObS=BqW}i zTC|=>2+&cp>n|j^Jo|55kqBmT+4Y{C4u=+nByGoyP=}5UZ2bpRd7iKb?IS)iG681| zxW$k>Z&;>)X&uvL&<&w&0cK`uC#W?tRvp!a=yuHG7rXy6-^7&jj**B> zGm?Op$&4!v8!rqGz^7)l_^4MDbbe7Kl8E*d;{a)$zu>B+c`x7suf4vhk?qHIZll2Q z?|pM-#*jrewbEj_@*gn@>)Cwz(Pbk>e5G)Q&sHlhI?Gbt_UyU?8qc5^@s+^D#YYCm zcO)OUg~4?deA}y$YTn3?8)jRi_)$`W8mO>O<(i1Ba4rZlpCGf~A7$N)Zqq@zEF^pD zl{>?6r_zy+0+aYvC{lboQ&!_Q+uA_-=& zum$l}YtN}*p<(~NBbfVcNFu2-CNEMW7bZkaSWP=e=|)p4&22rOoQFB>MR-)NU>L+# zMBC;_ZQ1TX^^qM{Jrx8b>OodJa#BwS)shPulTvYqgXb0+Pk*i`2}Wns4j#n`ShLmOwGIx_Pi(D9iQQZ8o8*6Yk4sqs zO)nw)UXELSW6h?ctTLRcNXbA0D24kw@)OWt}EGN<56@YVj*e3Jw z^U2``eM!xyrD$Hkq4I;$o$bGe*;My(BnF&YF8RRDh7(}7Cb8>#m`;e~3^eVmsOkW0 z?He2JwkqD~EPZ(~!#ZA21xjv)hDw$d+m|G?yR2COM}AJrA+Y982l`=fb+bi~;i_V1 zefpbm#;Wg-h(U^=ScU`4%>@!@dDeG#?Gd!EB#8fJ#TsxR!wU-KE$f~@F-fw~h)Bhk z+H#?TTPq1NtU5nI5QFa;1~hyd2cal<>7=o3BZce}*f@sG#N21`#MP=8PM{G+D&;-O zZ|ZpIwE*%t->LgFxicwHr_Q$KZj5r=npNp2m*kez9Y!k%q*fS|(^B&m zWl=EQC+EBSgi2$`?e_1fxM<@cWkD>>7!6{X54$A|QQT@c5t}hUkmdU|^yr;st7etL zNEYYH9Fe z$q3Kw`HVWzWL2+wa)`II3+_W5GNmYgKuT}6y+Er#aTZ+d#mJ6p?Aa!p~v)Dus}CAf~>w~{(D>Gmu~ zBg{ZmPl&t6c>k)9J!?y9UMQ!d;1ac)5ZV^3Wzw43`r zXIMxHSG05po(}{g2h3*DgB`=aqM&k5(0}GU=zP0XQ|*VhN8eq9+c!d|AN%T2~71 zV!V-mFJ-Brws;$RJ?lZ&I{C_xk)9=_A-IWE377oU5-I?8G0kVU$BL*RZUTGs{>j$U z-{sP(cejnUPfj@VE7mDXd_KDfpH;)?lBbL;h-CY^Dqa8C2$gK8AHd_Md z=-rJ+TaH(E1yqU7d5b^>4wn z-KqH~O)r+EVQ)#3Y=|$}urxGBfK6H~W$6j^b`-QArP2r*hQ{d>5zZs75H^NstmI+m z&ce*0@gmtCvG46i*7duk#4gZnVg^A$$7#Vhv60t_DYd2UZ4A+UtF$1H*x+dEi};E} zIA4YJN9!UcCNt)dUF(kT7z+zd$^8jt$u(DtP9MRxsZ`FgJVxgnPK^szksfnK&L#5B zSBqBpx0Z_@P2!}xa5}&B3Q#>OxYm~Cvt#S@XRv8rClVHtO< z7vaWzO9Kr$1B9Q?`G$7@d9G%plp|7Q&L%A>4`)cvD?Q8rJAQ!lx|azKAM%g_<4RCd z@j6bBy5Clr>}G!pdAjT3Tz!p;GAz{YULgi*BDNGbGZvF*OMxo&s{O=)l~vgGKZZ;V zuqEk1D_OXn1V^~48L?f%HV(cs(q={ftP-J2R4E`NFd1X(L2Z~4AoU?1@8j5r$~;h6 zA}`f8*~JinS37Y9Q2qhGLe*^WzgN5-Uhrk|VbF-K&A>n8m9?_?(o18*&`n-p#{wP= zBv`C{QedJFS%PYs2%Qx~hosLKaPir)5vtG*bFx1Wnq>N8{_!-kTTn6OHp2X4>rarT4C`p=C6d~zReK# z9R;H5#tv~9K(xv%4YkXV`lO*U)x`tIE;H@jN9&@BrV8PZme^ z%}J|BL4LVmVj$hn14faynkz>U=I{(naaSO-VZtK^)7yBruYpMdD^XdAw?%{mQDqO8 z1-Ll<2Pl)Ib_7-ph_E3+mQ0Z0c4>&(f)x)=n=1&MCBOqQO-_|Nl9Izc=cGaAu5F#o z$D>`H`F{x}*m5_90WMTokD3656HSrcoD`ReP+yK}?_3s}m1!S1JSv^K+Jcg)dQE>P zC&f{h{A+TuEN|gdb+}i=zDK!&^$>{3i-B$(PRy`=g*99v@^{W-&eQpNGn|bn&vzS}{UF$*5yk87R`dutInKHiyQl*qqPr@D=n$!)2s)i>= zmI-Gu!~9JqN2p;;-Ix!hcHh^-MQ~TqxXFt=2CT`VY0(mnm+X!YRl$p_H`oEawT07q zeO0>3-_Q7tyFzp76A;9ca{$1y*z{3Uu< zETiZx%-VS5Ca*+P_S16CD4F8xmYp?-p^7^{>P8n>$n>rMHbDTty&YtB&n&4N02H{I z(Fr+&nkdb{eEtXx`e98;E((;cM*o5D4|lYxCf}>7GB*VxeaI=Abn-kUc65cmSq6U_ zR%Xw(qU6=YO(W*OQ0Luxo*O`wtNsN`G%O-ac0*fs2H)lOrk}vwL8|JN8uYVzj#~ac z(^eIRAy=psB>M664iga-L|rNAN_T28ULe)`vB@ynUN~fCE zA-$HEJ{tCF@$dmb_PeV5msHS{AE==Ml$<7>0@R#iAxl0U`e3Q|3oP1J74y_IhrTk|wj&o^QJEY>CJIhSm8G#4m~}4orW!fGi1XKxX;Npx~8U z>P!{Tu%Z}t*P(fa9}C!3@766wt=7ushMNfbUM@Txk}cQgZPxiuwg_{v{3Y=`vOOzo z&KX!M5oYD2C4Tu>H=`=H#;3g7{x)V{zDl;BPIZu8(!u_1+eqT!aSWSZ$ElWaqSM|lu+ewfYDr0Y%P7znyH`YF+ z#N=yce3l%wvuPtilqh{x=1VuELp4%GQl2wR0za+E z(&&;Iw%jpFm`osih4cYlvl0{%I^Sgp)VIPUR&;n^JrcP$3Mq--NwGw;nqUT2(FN(Z z!m#{3J?u;+1a~9HB$zge-uc~CsU3?_c@7WdnMth;EH3k;S}`~QaVEU26u5p ztE``NR&#N`X zCeLLzEb-R*Je%#w;dFRTGTdk&wS|`{4&xDr_D3=ZII+ySJ9c4k7iY5k>j@4!7T1n_6cD88#~)sEAVpp|G|qEm{+gJNu#bQtg&ZdB#^ z4(fLeu}Kx5=yTwy|CA=Vdu$JucLiJkqi+o;+qK%JXv&y0LyeEf^^>p*a0g2=^sK~3 znO_gGJm=N#IDD(a=+a~FSn3gH{WO&BzLA`A97gMwwM6>rEM6vqo`PMP?GtI#5n&U_ z>o`E!3tBySWny)?=rAcR+2#7>1n=Ehu~JMR_N*MpP?TsY+$Gtbe6a?P_zgB*T2fU; z-jr)P$6QI{7wnoBI?*vC>=fga{ycn8iG%{&*Vdn_HE#H#CTRUG7tzO>f7sO0aCAD3 z9jCL=t+9Kh)_-_hlD<3)*F9`#pdPfyJJ$Hy%dJjS_kqv0-Z4~1Q_9b2YY24xBJ;2o}Yg%eh7Uq zG#(bqj`dX!<9;L@L{E{dwke`b#Gj0A8Xjv^nP1!Q^+?aa*er2}Odz1+COOae0bEb8 zSE~s3Et~ItT%i2W)A$@LrYO6xqzokiw$7d(ku)zeMD6xj`5ZXs=tfmci(vv%Ke=*3 z&J7VP9`u}ab{Ny6v0S)yf_g~Y=Y*F*tj@SjgW(%e zrtK}%VmS66H5R0n@}qxDro};Z!r@A+JUq2;M4_0;abA~>jQK&?T%$dBun}Y%?a^35 zcDiVHVn3$BKua`ZB$$l#6NkPi0)maeQ|HZtY68wZL|!kPMFXd$6D#-;dmVmb{G+9M3@eqlQg%(Rykl3aY(gigb>)Ps6UlhjOx5 zI!TrCL*snaqn1ldU7WrSU4snORk#+GK;Bs@B7!{CxhaVxK5g6m=~K$S#yVCaK8Fj_ zhF}$h`%;|i6F+jJv{pN+;h-2J&Z>5KcaBEL%8aSNrBzAIHXz_W85!~xlGHi!21Rl z#=cms7c-yl7lVR70e7XiGgyg-m zzewmd3KMYXF?6Dl6<}h846(hZjeb~iGe1mB{hEs7r(w^eH1v3B z84qVmWu&;gr)K}HXPr|YE{-J$kHPE z7?YHyge_&kopP<`Kr3qW^KH_t-s1JN8E!CA0V8k18kR}E{H^iWv-Ii5EOGENwG9hY zkyRz3vUL!ib1nE?LWO@Ogr~e#7bh(moD6q8#kJRqbBtk6q2X4w?O3Z*j8n6!iM~Z*DQj zUYaGrXjMZt$m?q#OFISxx$mmzIn1;W6pSA+mW1`2Ff9cF|Cx9qeS!5#V$Kk`ZGYv& z1mtr2^^uWZUFf$c!lMVy<0!SGE|dNVmg{L0TRZw4$A z=57sZOZ_S`n+fVCno{i>ZUOx`sX-Dd61)twdV-9l`r*DkdOds0k^2m11P_|RwSNYY zHsmaGfn)Q?fDaldB_h<)$AL9u$kMgcsg%4xQNLzg*CF^CA<4?Hdcn@E~AQgnUne-TIOvIRNfvN<@Y%uRC-@z!hdA7?DK7Ca-+z!FIjI z_N9-~6DJ<0E*v0F+F1Ajzv(Vt@%Pz`3whuPP)R4Yq8A~~ z%i#{V<;wRUU-D};67Vv0)k;!VO5J64zahXI#we-m({HwOcb!*5f)Cb&NKwOa?PJVf zoKrx;Z$$QGLODc0VY|WK>2Ds?9XT*FUlJg7W8R22}C#EuAi2y_=8KIla@@ z;O(U9TYidoxPz1+k25bnLYj+th?0CU3Qx0~@fC7*rar8Jv8xd7GR@W|8!qm}<~xnI zy*Z;<s%JV3AxBE=5B|BJo;~0ZaXPlQqHAmmlMk+I;isC(G`rp{T&a5lHnlO%s2r8ot?89@6MO37TT?IP}&JIIVhc>`Txvmnax$G6zd^Us)z8=HRh0&UY(OWyi%)6|S5@Xy< zwDyu%)C}<_z(8c4B&cLyy!J$bzxD~P{g{NAU^bFyOht~<4y!J{Qwshv%iel;rKg;g zF;2rSzss0L7Y{QGOlz0RrO$D^;W=QDkW?ii&BFBv@uVELvY~~VEZglL4ui89Z>T$l z#ZQfsU;@xc^4hiC9`3~SKKeIZ$BJqMp2l(ML7J7uN#v}!@7{?ib=sd=*|-5nX9x}j zJyTaD64TLYGa?#3fi;z;((Olk-bJ&s=-|SjocqlT)>zIjsp6GeNMi@qk?DOON$DQ?#ZOmsh4&XT$*Ucwh$sP1f*Nh%4xl)4aZv&@ivQZDMb1BZua34bwYc z^T<`B68w>HWT~g`fGPL;A7|+kEVQi5$Z9-JN8Zfvc^9!QdbGC$3#+j|fxb`bhOB`h zozW+3QZ1F&oYE~d@7*hX!%1t}h|}5)x`YNl7m`-_Nwq8{dKVLF5kwFE)prmY3?1gB zkidiGcOelU7HB8|H7O(4PfNy?z{}))vjliyr}771kn4%us+>4-93_z9wWq}is3RwK z$@a#k16$jev|6LJ%oX+$X&j9{Bn}i4CX1}^= z$8EAFv1DWQ=8Mw}{~mu^PRe_vT3xZ>DDup!c-m7f;j0&EvwSWrUut{;YDG%-J{hyT zRC|T4&x}p!<|*+)7DUW<^c$F`6U(9GnHY&f&pt~rfvtvMjfb~O3i7OP`_LB#a+&P4 zgTdoAG%WJUKauHHi7-20BZNhl_5;rwK*e`Uum9Jpgdh7zzY#mUKoW{->7eqWGn4h) zUO|?B@=k{fFA*N}+uxM(IZ2s|a|^pHaIY?=FJ^+G`Q*tWzC=7Tk1<#OyOfZ9Eo(+% z?$tTHONF*#Vf4BvXZ%xAEw7=UjO4#=_Mk160N?Z;-UU;oLj85__L#IvL%)-~g#fUw z->2=y*fr$09oxT>0_poL8G1)}+SW#j!s&T!wTAK2AbVw=@rnXl8`kC{ED@RxuZG@P@Op^JLgQu9e(8*(+mZ&inc;6u_`gGmVg z`;WS~i)gjqLMTPu8gRF)_$k_Rn6FZBO6fZLYiH#7_5HNHiw@4-1EJ6emAo{ zkKj(Nn{?IWpZ<+@K-#aOWNZdkOND>PE5iO@`&3R;mjJ4yr!Xs0Y?Xa`bcL?i5Kf_UQTQi@Qd)>5gr7MAb+NmkNbsR*aF3_Q!kcfRz z6b?DiYk?hTs)Pf`gVeWsI(`YeFGXCIoQ8?zjGRr&sE5{dLSkc2+r~M%z~LoQ)MW>l z|CufcN@+l24$QO)+i&&MUn(g)<h;!eu-kT)J4jx$Emwqvrw;%r8 z@Cww&o@w?KO_#H6LZ0%tI(~=w_Q+{jN+d*)H=lTRYa}O zj6{(FvMLU$OE@!8d_&bRa}G6)3juLR!5O;5@xsgJu`~a`KnsZ;K@29Nx*lzQ!%9c%y`U%(`3XMbLHH*JcBw+%- zqy(eC(7VQdQ9R?rI^r2=?ph?S)&ZWC%y67}($g-pfjINyEHbQ-#KFq`0JsAT`{Q9i z`S*mI1@bQJ`W53P=90T@PoglA6EDN#P^{TUol>`&}_)cI0&sF23e6*7z(e+e6 zhYufjX&3KJ^?Xv#&u2Rwe-WSYK=EmyeT~?NwOC0mjT;oejE~~ zmr;nTH(z`V-FxZ=FCR~PV5SbR_d_d+2OJ(K12lxN!t?8F0jg%he>*+%nNR)s)c_$; zpA3?|R2C@K)0kmAkU9ZxK=aq=AG_`9QSUP!6s;U0EX;G6)ys%lgor7tE!SSa;I~jt7P$961MGMh@l!)AVSqa8aV-*^eP|q6!(WN zrQ$M@+netaU0L@-wn3|7k|%^=Th9cWE|=#$0RbI5U*tue{*sh#X=l|>)Ed-A3^H`#{96$*z3xRV{+l$*L$)IsDsG1T(l(|@@38kQ)65BRibwRqYFfa z;Y^j6Kcd(3W}v+tTfjpwkc^qXBQ6s3x0~8&HnY%xjLKkEvZIuTUruSJTA22dt7em>nrUgu$(SZXY|TCisX9J*z&!b)`<-Gj*(TY;_Gxd z;N37}_&<;}i`bc!Odl9RS-`r}9k8S@k%-a|H7KnXNL#T+r|En`(%?QS-i zPShm?BkJ-u^R!3Gf-(~UBNgY{jJvFCps7yk{S^a|Wn5`-m$UT0@MHR@U&x1g?Fr#z z)9>pkpK~v)U~fD%2eNXH$WD_&1fNL)b&v~Oe2k-2t;Ye7ttIOE5kEUwTFqC zkAsM&IG<|xtX3dB8Y`^o9zPV#y!~B{hJl>#(O>hd{V=;BX&v>gkdb&3JY0siv%Mw(ROj3WTy4Ta_z_87J7$&z(B^mgB z**?N2=vC~aXC(W}Y%ckAO!AyMJW*EmzGn`MzTy`d9tbu4SZzkchM^N5r|cVc^@uYv z6S$@0DRltDCtE0l>ESg8beoYc1hzaSHqS(6EvsLd6&U6KM*x!14ELelLN(}2lWlD zu52?36k>m+3%g@3R!VPBFr{9TVPUksKM4JLetcVyukQX`m}cA zSPlNKW#@-k4&HQAx^s{HvrvO}8@8oW!oPpZm%#1#^DtbEjIO`SW3 zkqKJ~$qMZAto$`cdN`l<8L_DEdrNIqFtHbD5nf9B8ZuE4`la!cL8}v@YQ?V2DhC~< zvTtZ)^;^mT-`NA-dP(@zv6(9 zLRGaU0|r}KXz3;s64RKEoO`|G6sD@JTZZY0y*ran6Fdu&i+4?|$|Dsl8dD!gs7@FE zc;9f;afoGIt>4!RbLZ=DOIgj8x)b7RLU@TeaeQx0#i9_ZokJt-WdK4)gmwow&1)i* zKs`!Ri&fV%jfmJQhDMaollZ;s^3Bib1zssJ)0CH%rEDW2(|gf+8+*~ZTdbD7iRPNb zOvk4iDeoyqva2M2O&`q1`6G*Jn#>DiI5l0uHJ!X*dV`G7kJ}%dZO8SE%A)Qy>SeXkA2)0 zJjF;%dWrwUq?g32pAg#m?eEp?m_d=fH5);ASU)BK<2GX9$RX zsMwROD7wHHB!lsiDmOw^L@Ohachj1IL2ETC`0fAU3!$Uj zbZvZ;?VJw)T@9&;JhMVsfc%Z=-mRfH5J2q-f)5?CgQBlA_(v6e5Unh!De^njbNBkw z!T1V%!Z&?t7S89NT5og@Z1^mtG}t4g!Yu~+3av#^Rm8tpVT?X6txlq$Z5!or)STia z`!s8+sx#&PXpRN|16 z{Gr1u$M*Q0ul6uB7ZoPIz)}Z(w0qctVyQy}a~vmELixVzOPc%Vt-GdoiIyF~y%5uMGc6)yc&i%;W1l`-kX7tRkRB8K0z-=7!R zUqY~8?|U6fG|_aqlQXKh%qU(e^5%8AEVxir%gL@lDhZ8BvURqUUa3ML+H(tDSttDq z+F7oodTMvefx-qP6my9gA2UvaqfQJC=ZG^6i|X1=V3*E@Om6Ain@EAT8NdYClvKSf znWPja1sXJnYRv+rrYOH;oe%kNJ{b%x^P7+`x|>0?DYS61W4-C;{jy5A!HQBQ#ubVp z7<$&b2P(6waf&|L1X_ytrCxz-n1i;$rkD*Pi?}9mbj2tgUAvX-*}tHQkaXfN7>%|s z)0y?pE!DD5AhgaZQ|2nE+4@1d!ttY5Wl#tJ%*hKsKh77 zveo6D>V+g|DCd^TU`uYffo>CFR|0eptc-AMN3_=AB)kFEG#Gg z^L+cKC}=|JOe!ye%aA}^B-!_S>3cTZfWEgPKSG@qW`7s!*lR3YqGZ*3rE|@N zbA6Hhwfyi}JX~W@jdhjw8ML>@fbRD=*f3Jt zlMcAZo)~a}UTN`PaDTr|fL4EY@H{w03YjAnAMIUbUD366L^n-9n_K6V#*8=rO2Te;8N+Jutmy!{ErJ=JT#cs~L#CUyt@8HWD;YF^ScIu) zPYVP<-~;%oGGE*$u{&drs_n{sB8*_vg{l4!2pwD&#rvRPA}0(?IY)d*xyiL%^5Nz4 zatyW;TzeePIJ5-LqCD(~{E6epJCG`6=ZHAFEU#k^CMj@@sCcp3dB#&}WZd}!c%zGj zIzA1wODDCY3@~TYN(QBCcYK=wj&XztT)1`Ug2BPvH!j@sWSyyOfU~j zTF&HpC&* z{BS!0VdWRk5~*@x-C=W3xZdb_%wZjzNJc2jDWQB4c9a2dDM){N#fi0ytuQT|Xp}C_ zVOhN_j3*!Whd^!;B|hnHmIJBCWf>6Msvm4LoNXYkFZBklH}hLFn+F&f=T}h@Qvvt% zvbS+*%=h%``P5I?jsJ-kK%8usvxn~RD>aSD86LZgXDh-}w$f+c*3x_Wo>&EwEXiUh zEOD+N|LDg9>c@k{f( z##K6;x4%AdrTAj*wfGbBt6u2GZukZ41Jf%sWY<1eyoz!Tg`LW+9I4V>zzChxR?T0w zWPwEetoCXGgV!T)Hhu{yOTLphyn4wqh>ZMSqldfSOe>FWH61_(4+b9!;3;eq}o*s&eEYdJPM#1oZ9M=*0Ov2$tPSJ0}pSl$% z6+jj#?lTt?kL5Rc0|Q*Bz_%<3PDikJw9O`Rl4AV@JgL8}C25Ks2;GdMIsN!e`2tlu zw?F)2ZL!TR>w9~w9mKNuHd5E|=CX>FlJ8c61Au+yZ&aK%J}EhO@2seF`bm<}84w?9 zyPmHLwD~ym8aVFwnrLUJ`K64A6E?haOp2w|s1EHHk|4c9 z8%_k{fFq-*j_8~G*H-@K)`Fz4ICz^O8T(knDVNE?T>G$9d%AndhCeu;-u>0z#zcL5 zV=Q+es*gBc4KP)=7m8mdwkP0aN34kxZiqxUbN#@&K8E|8#dCJm3+;CA zTnUHUUfvXu4Y> zidbWQ6Rzgs1qZB(_E@FyO=*;zK=4p>nX-iB4^514S@*mssgCT~EK(5$(e^JgWfHli zDIAA71q6v^hazM7wE7XB-~py!l9zNQ!)_+rX@^q1?>Dfx(>SqzSt0D zBtDO2AUds4DjhFj@0QI{D#Y*i5wMfrUH%nv$lGXdHMiypUGlpBRuG44H7S8^wwX@L z(UwwWt4*M%c(z>FlMRiN+WJ*nZf`y~2f?=G!?Bgu@Su>xopX3myZXDwI$gBx59q)% z9rKoy_QUm}{xfNPRYJpfJsfq9BF5^37XusXZcOQ>iuuL)vzF<#NM=$2ok7}%sAmKF zxhd}{AJ?~k3&ej@XwW?kTi1j0YDW06U)*q%mvRNCj+-Nm+(HU+l+bVBBvKu-3AIAW@|+KlrVif$)oE3)>78~F(*$%n= zBoUohmP51`nwqFmC|f0-ErTw|JE4GnI`Ow`-7B_zk8NCQVO($wNC^abSc5;9-C!G9 zSPw%N_E^TMPN{KNbwp+C_Skh4#Fx;KhXzqVq@!OOWB|p`^an+A*DPr2EAEHo>hTZ~ ziXr4{mc0P4SQ`55rVMFroQB-Y+Ve)+kue}skm13kK+nQj>S8@-$$04hoR@3NCR)a3 zkIVR<5B-mC5Yu=K@^c^DS{ZWLBU*++v>OE}@-?kd!JI6sz_`>5rOD9$7;FexK)4nl zjz925N<2M`j1B(4p8V0IG=7=J3y3JKP2#RzUam@5=61!lJ5U)_t&&9&DsC(qdw>nZ zf^2z2wjMPx(`&DghS@`i4~3IV<)I(z?J=AA%*D?T01i)(eA|06MEem~MZyC~Fyk~{ zA&IL{kFgVxSkNVmSvz8l>;bOEt&(*Pd7Bud)x$yh2a>IW6usy)3 zyV?ly|0`6KC-s~yw{P^AjgBTxTBwUT!p5O8 zoM9DtSeNGdXFnf4mMz#M)^1ce$rmQV4grk^XLmSO2fTW*y1$*%URw}|J}fiU^yUe7 z&>7f#h^j)aLls|QpZfsI(iHgs4xLO4TE||~V@*QTbk&Z@yF`)dmV(MZD$^nrft{;F zm3*(Hc=dwza3a59W=78bhm*f(KDW-A>qvgyy?@GC+dI;O zg1#0tn7gzTH{~nXX4M`^1i!k_uMs8A6)~P`|J*Vy%;(?|T#c*=_)Nkn2l`rYwqK2{ zXFsVo_CDe-92v@1Hn=o-5btlb{q=AFw!-|Cta?_eEX9T;jS2$78|Bx4Q786-BK>vQ zI7y{bEK#Ci3FCP-nRuv)q6|hf=p3jluDZ6|e=WSK@eQe!uqdv{qgZ}SXjARHKVc61 zBTL&i2dv%Y1}h_r9Fm z)YnVB-|gb$8`RScOZi%!^%7VzLT4IZ8l{o2Q#>RiNbX$<=S)%2osurCEbuK#O^ZK) z4IP#_`fE@h^-?R)EK|K#!P2+n4^VbNrQ#0g zR6)ID%%+;nqtMnst-aVLxFfS4|*`VGb&*i9x=Cp1pU&EWi1! z_Co#~?$W1G=Von5z?fwS!|UD`b$}(M!XlcZ?{mo7@z1KT14#AXd1ZMH!0jT(Bb?sj zG6><-mvDa>LC!GV*&;=2rLS&^@7Gfj4Z7`-Ao|4e>S>!nB>(m4-qu83=FjsTlli}2v*zFa?q8aJ__JSmxcKkxdROP$ z-#@3XKKxyr{P%1B_;=;s4^IcHfBbYdS3mqbmH&5HxgPpU{?Ea&P7%_%b$R%sjl8b= zp6TDE{6D?FW7iIwGJhVApN1Fz{jHS$`|u(3=lT8bi~VoGfxj-gF8`kUeF*Xx>vvvx z2aqRZYPo)njPa>4mV7LK^{33V`ej1l*tb+!Wnb49#NIV%~@cPQ{Ix)1=b+ydE??eN5em(x*9EWT^^@-`u3;ia|YLEP4AD@kU z%_Mq@#RCg91O!X>Mh*-^uYS4VF*uo((wv7Y$4C`#Dl2vK?p@lGdUoE;idmb-@{#E7 z#KziPuE#bzCn(tf^4-KTGZE-^TD8ku1)$g9KuA>zvY!SHz0)U)tFRD9n}CGkOPymo&fzbazex)Oemzdvnp(;H1)Q~F z8ZS^_e&;V#{LV3r11Ete3+aY6T-g~9Hjt{isMCpe2RxTJ_xkI3)S`O}5l(6WH`L6R zI1!Tw<*;u1+%+lvh|bpp;$=yPC!0iMTv}(W#r;w@Tq=ooEu#SkoG6FL>Rm*79RjfA zk={8E?+>tZd%}UN^|9sqJ6a6WfW1C2aT+hk9B|6 z3n?o{rTVTf*MnkCux1yiDX=wm7aVEMpQs1kdAG zIACt-ZKCMQho6VtDzk@Ty|9 z@>xyd7~cj_h83#gx2jXQrM7X#2@PN_5=}tM5(uK$R$Ti3DgsF7+6% zg3pI^v31j0kw%#^5!R~aScce4migB=hLf**4}=xdVzCSwo-_?crlPbscTy7v>KV^5 zs-FPTeK$bgi1?BK*CKY`>tdQy#j}m@um@FF6Mn)JbvH9s^`ED@d zvO$L0b!Har&dcz##eXJLX1fG9OMXVW7t&PmN~%z^iH%sZ3#f+&PJ;XTvN<928+wqt z1?SwdpS2YB|LA(lpg5c73lwJ;f-N50-EFbp?!n#N10)1zaVNM3m*DR1?iL`p6Wk@- zeSiNCx9V2i54%-cwKerTJw4s$oSB}+g_Y*t(&zhA|7|yU7`jcB7{%Ew(CQJ4pjP_P z3G%#J&wHI7L%)abwa0ZBF3u$?P+dafk`6N_6N98KH!2l5^P?f%XFl9iEeBFl|L!L> zq8o{ck&&qg3O0!YLNFaT$nubrBbxOU=YqC$oV-*uvhmi;*)a4c_`NW7;CLZ62vCVL zNIB{@m(;QMIUx?n#k?ZR{yq0d3ep0;QPhcgv*_h{8CB?_H86=GaKE1^_>!3KQ+XZh zA;0Tlh!)pLC1m6tWT<}jRpd+}d&`q#1_?(CH4Yz^aXc5AQfDfP^#gM7-F9mY5 zx^*)Hr4*>R2AKlCAB~l%5p`{L<66Uu;^IS2vObFFec)@Kh*`FZDu%C`ZZ094^sq1p z8?-tJ?+%w5p2j9)t=cGPoJFebocGbA{%~hw{;OFLOGPY~e?T`})y$TgS*8z_c`5NXmJBhC+(=ABL0o)rUq=^$^s>5P#o50AK-a&6y4ymZ1I0nWPj%7p- z)!<*-v~kCJ=^d5@fYz#_+??_eW~@L;1qAXfN_ydJ=3TWI1|!iE(kl1x0g4@_Xx3F?OV{i}h_Qs4Z8icMe&+vHrwATMkI_*?m7B6rSux z{FSXd`*K;O{TD`1fyRVmlNxv$-L@r_2R$l2!sHfzA-nlL7;Rrr`;2<~=}_8~flIM) zDpGwAr&t>10=dWi$G)ResgKrQktT}|B>Kq(>jh2=m^$(n?}w6ZQz5r`V^~VJ|9gr5 zZ|508X_*j+H4F^p`~Pp}8Q%ZvJfm;F#*X7hb|ym5Ih^7%9s;&>NV=gp z(QnO$WD=$sruieMo2U6_`o(%D0xno~_cqZOju%aw<2qBwiR!s7GF_tv1X}&|^~WD3 zX(nWlBaV5N4CEU9?gJA~h{O^P+bW+fDZ;6uO1_Q)+FmtL*K&?hlKK&X=5*LwMUoi? zj{cu7Hz%@c0tA) zSt)hMT+cIatQ?Oa~i|F5yq|2#4)`y$~K zoUmbRnNQY3_4%Eo9@{k8y`4tWaFNMDQSt0Ks53VE>iI4^UltC07^Y9}cS5un2E+u* zliyJHbieLyzC*!g&ima@X!oY>*F*o;Zjrac?$@Qa`x5`xpZ;%~@8SM$4^9tjKmWb` zysDn`KW*-QJ9d75AbWp}d%qjrd_U|)^MCzY?ffiO@`}3*szt?80FL1K{EABKWF>1aM0$g^6`K(+g)8=G;mu{KYt(%06ENcGjcG~$> zRE(icq-n~3o}>xB?+P|7$;JHv*0a6bh^0wHJjtb*M|_`I8;|zC1{9IL@jvn=R=s~Z z&WS@fhUDF>IC&ASV?!+5H(_wrlU(_+?Ya|nTP8}=Jsb?BZ@-BHx!y$;^a&qM!++!N zQcAW=<{mmX>W}KL=N0YBkGv_HGlhO~c};eGHR4Q;!}1vUQ+|N~dCpSf_=niPv}wI^ zgJrp*M?wTysE*0azX+>*=3!rSj_ZSrM`sX9Pm}PDv5PbG&&+8Foq9;_e>nX%RzrAe zmgdE6GfIe*dnfk%Dn{P>>d+(P89Oj{82#CAaOYve^5#kQ@-&U(*@n~sVc+R|r5u?^ ztdH%9BWE^ApoWlp9+edJq4btAX%3Yo9iB+7`>P38I0-@LFMf6y$Av(wVr}H1iz4)B z+EJ2~=+BTr+MRX;?@yHI6OU2^Ma$t>UZY+vv^Vlu31{ERIFU%UGIV zw(V=jA)(R6)UW3=Q+m>8+3Y*gB7lO}a@d=#RkdNBf+X#2-LCQSL#n>=JNafKz-Ox% za?=#2&Y2{EDnf2`R8sh1Df%Kjz27K~Rcs4ilb>!H1+v#{>_jBxhFHAR_wh`V^n=x$ zcuYuNcg8Be<0b1~F}<)h=fnY;@B6~AO)WV8NGpz3WS{++vXC2kflK>WgWdLSlC<(n zqS_Yn7Q}K)>ma%B=fIY>7ESn?7Cn>w$ME4Xcs3WrPE>o7$ZmFc)#pIvlm*{LV7>pb zAc2dX_l@IabAR&&>&B7Ooes{UuaXaD@a5=NL@{?NN08NK=#ZnjH2t09x{@KUjD~Wv z2VJofm(5ltQu1x_$r_-bD~A1K?IU{}7WVt-W0PjrB=oA zDOH&k^0r>-VqDELEv`|jV=U*zRhd+bhf7Oy7F>WU0 zziO`wWfDn`Wqjg@8_X9NLtj6BGf1eFmd$-4i+I5ebm*vK zd`dsNu^1nVYhj7Fur~H#nl8MX@x*fHw9zo3&EEJnHrGLRE>3l@9qRhZd`FFfe4p{7 zoo@e$@-3lrqbOxJ58kG)ys>VOSFTrj>njmjjWf{x!N6m#;o06V$}}DAH-(=9L(;<} z^i#&Nz0~#unNu@W>2V6R<@7DA4WcvxQXKdlu~0`o`(Nr%{0yNix@QJH^KO#I5c|wm z{5H{hThqgR;Dg*DD;4B0Elu>dI`yD|H}#U>n~Ln7JRolZ-6^VR2zgCe&MeokdYoO2 z0Kn_Nsb(KM`7UAXuFX5=B&Q{_di5}R%~AlaL;VU)^qn9}b_=!rX!~qpRe+huM0;Lk zkR5VjC0oauO$DWex|EU~HLg<88CH(gi&g)~jBZ@j5= zJi$!OAsXJz7s08i7LK&Ud?vvXX`re(T{KlX9jx4Q!SXheO=h_k~4XqNw^Kzhce$pd+`Sk3uQkW`o2xJJd(%jJOnG z->og%nlbWS{mcbbMakn`JXy49^8&r(h018>aLz8%b0hp*ENE(l#y0?WLCZ|i9Wa;Tt4aszW_#`I;za2nJ)8OtgdetwUThxZoZNk-ll3RQq z@g%LVZmeG-Rm^q)l8<5=Z#0Ao=;-688@-JHwB#Cg+{&@tcDCB}8gnUE5#W$lkOJWP)Taer??<*#n@2dO zk$mgInjj_)jLit_QtqV3(~#N#Me@1K;9a7r9c;N=I!F~Y>z}f{2VysAJ^SIQt8t#E z6aHh}rT6cgFJtFe}1<- z6+t1(9-FunmWfqnM$O-08K~v3DlmbpgF%Mv4sK+L0Iy$6?n+ z$d@QpL1cds6_q=Tr|_LmnqQh>7l7N%aus6DZPmINoB<5G--bgHxXo3Lb<|~af`%2` zRMnP>&-&+k1VzfRs`g5QjZo)JMJ!)r(LO5rvZkPf$UJ}!!rWaYWAbyRq~4EJpoxkns=t1qP_$@L{8TUk8PfjGO~uYH4Bn&==X|i!WnTd-(`!xrpXD6N2U4?bXR@h@|%>C5~b^r*WFi6V+wG$58o{V2OdukRPD+8*=wOje6?Fv@|X z0q?m!Eqrd61(SR!>HbscyRfE7Q*%MXKQ%USuy#dyrW+b02LTPvRGR<1f>x1MA926Q zYvm=yBrTO!r*Iy>gaf58K%u9{jhK23am!BkpixhdK4R5iS_u!0!27Uh6bR{k`V#YP$K5aqC|`=g;YWFF8eEy7xm~|gQFT7V|H?7zX!y3`yS+= zhUlY{i}BsSqX%T)r~Y%YzBrouyyRx~rjFmzMKovIYy^tZuXqLp zVr;;XmL?-FYEtAj;f;%6r$|2B`~*ku^R6D(Fs^-ux?Igb3vUtJi)l2}6@@w6vK@T) zw^7RS-7t_V1H&Ye)X-Jc@^h{GJaM?6y)i%7XbZE|>XK`JmPEKba1K-uAOo zxn_NHHTwm~oz=V)%+Xc#%D9O|-93r{mu?YAF3igTX_A60IRB~cnfs+0T8pX?jb?G) zY+vbMoivM~3G*#rrI0y1wRZGN@EMn)NU3$aIBgn<-CKbfR2NoXHlvXr4;1WKws+#| zx0YFljeQrKZf=Knfbmt_7-eaNM^F$3i)M(ksmZ}xqPqk}*TM#xIfe{J*J+^S=;-J( zhGZ=4Tq)RbK8e<0lnFvsb1+`oW~)(^J4bwTH;BJ0l$Ky?U>zwJl;H1lVVBNdu=s6B z>QIIt>4;*~A@EV*&8vEKAfgPqRa^fgn$lE?wf3Q=253vqr@fA9PCx6G&msWF|sPX6I>rtJif97Z-JuQgUMKpsx zZIc|ZWSmV<)D@$u)|Usw3I5xZu-;C+qR8h1>iV#pub?4NZhs7Il-o2rdBl>=1u%zx3m^=pdm7Ee!rFr18G-%A8d{Dh+n! zJ8kY_?$!G5KmjA@6?h+6i5?6ELKlp>f}*fC}eeb0leBMNbri80Nuw zkUPmiKUYEMREpWNk?o#SUTAJDJOPvxb5iA?c(F~5<0DZ8H_u29(fxWIaQ^a!Y3t6gzKi~2(ejgl?A z+t5%H+`o(W(b$kqzjZolT6eU1H^qe5TGV_B{1M-pI5uZ7e}>=){gngK^_h*aCeo|c zw>YQ!JCeDlba}o*fy=L++gILRNLYqol@4~Q8#Ir>m07{ao$}`?s{Kf+ug}>6d&I%$u*k6cVt`H}1PU+$X>f;0rC<+Y z+fd5faa(u}fn~bY0t$D*H)->+1*i+6HywYp*yJDaZIrwLFcIgGrgkZ@biL;OLG^`_ zSL950G)~K3Ea(B{O@JU_6V25X7FAv-;D<39{D)njA@kqN3QrK4>TmEFOQ-E&L3{TF zE_Jx0Y>L8lh%mC{hPlw`Yy*@Gpv#&{!}n0xQFmYTKPMa#^J#4KOMK`pupRA2m>&z= zfUAUB4%{-&#s^)14W!{ z78W-Ei)43^5W$#?#H*rU90~h}!yh#IN#7IBxLJSCfv%@*J6;oo+Y#kQE~5u2_0+%o z>T$=Hts(Fe&I_!`CZ+A5Ug<#CO{}`3R5bq~Z|y+@?j6&SRA6%g@(v6Pfj>ul^y58Pv70N(pcqFTkCvQgtL9M5SQyK*3 zkXJ#65|!VF>rg;D4K@NX&sHkI^ddRbhrxw+J$ck6v$gAYk{(0-vCjTewbXmAk$uE( zXtk!}OqOQr7CG#il|J6lr}^1U9D%QCJk6&_jO;b&0xxjPvTzCb;1XtBX1#3 zyNVH&dFi=dh@x!Ku}A-xVgkR-J*rrJ_ODXzXseaI7tQpat8^Bs$;Ju~z!rbxqu>a% z{8Hg_blnWx&E9>oBWKgRbGrcmmx$5!2D#6qT;?Rk^0DRJOZ1x^XUXH{&-bQS9fXn5tIdix$Ynh z5XV_k*l`8-q9!z&H$BmNwi=WBeVDfSZCO3HTn0D)M%HU4yabmP$h~HXH`g;9NX~|^Ff})I-H!j zitTNXU+k4epl);^W1to0G_F0K`CjYovgX(=wU0Es=@9t5VvvB`)*YFNJ}rYd>iJd& z`EG=8xzIMAfvL0)>oWfnsmkW0re@3=oF+7!eRD%*>G-5+l3`7&h!~!9-`+Jtra4 zj6lSAWQp6Z9=ATr6G3D#ma6rUUbYzX>}WIzzk(;dPV{_5J<{Kj?6ADYAqsiP%ZUnRv0&2j|8!b zu}eAJuwP&ghtLgMH)1*6`hs9wTRFYO(; zA$7Nv_`CRlv9yNKN}sAz6`CBLS{r-Mu`B4?ujA?){$H|0Hz}s*bB#W@ zU6S_7ggNB3gmF|TpdwYe`37a!?)v09#N>&-GtPK91Vs^o;)7)r8^?JZZw=d)lDLhf zr^)wc$W6yIh_7(_%x_}3HlLsUp-Ttx?bbqk`&iCy{N4}YnkQAY>0k%^l%Y=uxBBj< zoS0b6!SUIaP4oIoV6-hh}cvr z4;J(0F2g)eOzTGQ%nTYkeC&4zh#mLH$!T-5(qJI+@Fz<{k%j==1l!+o$sGpd9>nxC zU3KOuxM?}{)H<)R7G~29O4U4_)H-^3YlY|zJz6$3xY>)5<)d2Xq|II>eNeE9tLNO( z7kGvH(W_ANmCQjN-^PoNFksk5j>@RhpcGD_S|`CJ@ZPnj9uvftMPP}8pwCfmb{0K! zd)rHs2ZCP<#;9{PBMQu~_WJb=f{WU4a17Pu*;SbgYgnwwJF_paY@F3OSQ{g$Z9(|jJ$e%N1*6M)uCJI1^WTaTTu`9%12cay61|yd2P6 zDHCQEKEw_UC}M3f=gLqj&QEAJAtzIPSSX?2woj$*7l#y!Q_;d^U&(Y1$Fm1>eCbra zcA<`FB`Fyw#^6G0->Z!pX^56b$H2FeAFAP%H|#~HBQ{X!{5nJ{b5Zh{o%cYp&`%Hx zcK=VD;%e9iX|h=Snd^9mdh*Br%| z8LD`CiA4M$o={HHB$HKsI7=+5d;B)x_WID&4X1p;Di+OLi0@>2^qqbu+y!*i>kpCH zOom`WR!7kq_Cs#5nXU8i=jd?o$W--HoN40^GhM%G=sZWT9ZF5E?EQ(%z@8A=gj?K| zMMk>!Qdo^J^}=5v>g~I+9aI?1_+$9fWb|e>5Y^bJ3|8NXDO)qf9D+Ok*QmO*m1YIg z;V4^kCC&Sjm$1ImXo~kIPD|uamt3Lp&l8fzPk8hnWq$p6aE{bAX&cNmsV&K7l7_hH z5%w=JC#L?Vg+(IqY-LGENepH|_3Ev44s}6enqnZYi}H9znM{r*v`*~s<6h*?^<3(t zT44-l869&^Z-ZRh57S3)EM9$21N4>N>8j%5!rkLTxBs4l<%u2UxktL(Fz@i+c21%4 z+F&eH?~C9pl@JvZxS}wB5G$^zBSr3EI>kbAaz0OGX+%Ycrec6&q9g8)cBN7zC`rCc zmX7)2mjVAXxG1H~UJMC^;H%o^f`xLcpQXGL1WhgU0O@?ejhy6mw9tTaUx5WJkSm{pZ0)LK!lzQYKy_`5KqQu`iJqshw42|}~ETx_QbfBvGnSd%$Nt3UYM zUe{p+5soPR`5l@p&cZ!WkKl)Ou$jSqem(HRKp$hxdGIIf>CFZ1Q0w3?$S(uJ1rGf( zt@p{k2+7cS&OR9jB+lq|USl*M^|Ibh1{wOnve>6*OvriGyv&16M#Vb4_m$s_>=mD; z=qXl^^VSLb#yw=-7W#uq^y+@<=CZRLh4-PUS1L&h_`C zys*J$sqtK+;Gm_CDZrF@B{rUJnwgqg6w8Z+ByXXkh7DEulq`F0G(CyI#I4az164+e zbwtJ?thyENH!Ns08x@`iKecm{HrJE)kt-c&^zud zWx}a5)WzEhpIOt`x@3wgWVNn+MkL5*q*aZ7Z06SU$y3UNEcfjCU`$rwUpn$Q{MHxo zU|&>?I43Cv_x9afxOqWzf~Go^Tdj#>8e~FdbK}g*KqZ(L)#n-&sp+LIvrU_-u}?PW zE6V|qh^BYe#>{o>r9@A|caw65@{e0}ZX+@L(uTjk(R6eGm%D?e|r zr%~^@+ms6-?3xCKQ$N1(dF@bdeIw^n$+K)l7xd)O=$YU>3IU)K?tS@7PSD|iwlT4$ zj&J$Ne-`xbAO#_j@;Krn1Uo#d;U902$)jXU0srOkKlms0Kdi~m8eDuA9nht$xd(k@ zoo<07JTugo>^*|mezq(H5N5(l_o>2A0gL8u6NkqU`G+d+R9CD+=c7%aV~2E*ayHxC zvEgRn_K-Vu&S6uDDG2D+&0RPcavB`qts!7Jz%N-QNjAWc!&wR(*TOY}cSCa}qd+y< zqT)xm@t87d>Y2TLvkPuDRLVXrh86yncw_|04Z;#UQRTsBlC9sSJLBkB2 z&dB{OW$cI4TFjh5ceHIE*8|e=CO#eTPS__9cNivM|b34*wdPY%yc` zbHWl>OAuGtg=dc2X70U09@N1H6;Vu}n;7&ZwvtlrO&IGkmFzjuh)k){4*Yc1=W=RxS_)f ztAmk0EzAIJS^CwaA4-%YB#a;Cj85zTQYyXZ5z`2{Z(>(j>e}0~i37Jt@*a~vbi%nuk5sAnDqL4KuLM5lpuQIxZx_-mcxvQXz1>!?eZ3ygtzvo+4smFWQW2lZwM7n0$iEvOXT3LCCy3 z5j&5(i4&~&X_i*G|IfMnum<8E;*gJA1Mx$H$Ub({t_G`qwF(Y{6~`vk+A=l-AEyB5 z`XzQ?zIolbUB+=1)Ph{$0v~rU{ke~+ZbL;yYWDGK zuA)Wv#qAbSH?5bRGi7AjV(Tuw%x$68$w`IPxm1~+%xm>4iD-B6|4!+r=bwl6Apm%Y zFhF{W1O|Y2uJ68;TuHTb6Nb{f3dMrBf?o<|@iZLXVGv&nOmiQDD#TuYOou;q)6B`r zfTT)3J7UJWa{jD^frrM1J$>dL2NfYZ_REk&Y(?bGD)_%@$8e3PWnzA)*k`o%{7i9r z$~YCw3!qL4)E9JRSK^3p?1PxLITl$UJ;paGW+VdxtP|UG>CWK4mJh!H+Nn15B$j7) zb=|p)eHBCTCcPS+da#psf=Zu})WxLpcxSW|Shu=BmN>%P_+222Bty2$coR#Cf*!3} zIqBZnug1Wp9_=~QXRTT_Ne0@&LgQ{pNkY;&*+HkOGXkr0aR}m@L%D2=Uy0rgjXm#Q z+$&u`GM4pi`R$7MK2V!^@vHy-`gU#3_~%7%Y?DTF?k7*>jvUnCyyd}5j?5QEZQ<1* z1CMS=HbT#@UDUh5u<+m0*V=2muh+JH`yJ8QX_XVe2b6-lE|#;d3xYEJ`#`0J zeFd)Hb?r#s4ZKNer$1Pmx_3)T6OwXH)58-r6rOdTY&&CDn5g-?6NXIO)$%Q%WCo8f z9Y_-*uDMKyb4FSPRo}qiAxOB-hp6{hH9WJ=-?BZlY8tf$h6GqwDV znDL9K;}o)>6X&reDGYY-ci^Zvi*ng!Qd%!xsh{y0D639_q4%L7Et~OL+r&cd0#yv^ z1H2(+^$7`m8w9wnAR5NDjId)Fv8Rz$Xz&O6HJazPxpF?cWB2RI~;b|ae<4vY6I=lUs zyP|P`OgY31{-h9y*0`OuN2<@TvtU>2zTTkjn*qbnsWsGn44++D zE`Jy|;G+4-e{+GtCA_z5&T%Ch=1O|p;j(XMzqleM-&C@UtO6#4p9~#3nS2gfiCWCO z4J&Z?_^&h_TRQB4cyeFyu7I@G2U;dgSh9Q>N5Z$$m&&{PvKk(hq?*_X@UhCOK2W&L zB!!-PNtWqzqn!A%gAj%B$x9wE8G#BM=I^g-K}JNZ&grRY*@ecONiqZyYed?=v=5k` z_~J1sK?taM{40vQxsbpBxM?{y8$QDV4gp=wiSWIk6j@H;{hh{j+M_V`O~uduHX|SV zWb*mTbChbL{pd+y-ZF3qPh^k&=ENgNA&t?|x>YB&_;;ck6F)AKuOqx?P!mdu{V7pP zWGF7&+w_W{1h%+l}%^K45&0Eg@dn zA}c(V0MnZ~15p3zhK5Z7&l1wdp&>Z%MZ~TT;~tAqz+qUP&Y+7Zx@YcD8-;VKn^r7T zV4toOfcR=IJVc34%(`P-^&{e4KYzGF%zE{ulkWqURJ;jC_Sc%E$yMQSV9HhX)h!AS z>}$b1lj}qW^-^PI#s2Hs%U^UL?;c3$Yo2o8yFGXp^bbR|N8}^xwLiGR=##!;#HDk@ zd0psPr{O1pe;B4(*kUzXKS`Kq47vsN-W>WY5gZl_YtW1XMt{$$@Tx3*fg`=e)@uHq zHRRo$6@Zp7CK$^L7Og==_A<-F3BXaOgm!?htDsnQx-!PZH0^>(1Y?!7EBJ$m zM4_^+1#=Hu+Mdtt`UwWNj2$=6DGF-dlI!(+79~A%=kv zpP}N!N1$U%w6Z6s66=HhAkHa6dO1SZ+z|u%wHtDz0sIqGq*uHgn5U_>SC#JqW>(G~WyYY{j7%!pn+P%kg z%$WFYc8iS#o#IkJ-wECDW8dz);O8(V{uTICV5ujn0-JJW%68+QEWC-V!IaeThD5-E zZ!WDn>V0YeT&f(r0<3aDmpJxEi4*{(g<*bc1qB^hH?hJq`9#7W3rCV<)&us@tdk$L zhX|7)8KVAjWo0|3KWSQ>Q&8Q!&~R{B)Vz_+C<5)GKhxbL`Lyon+*G`p5_S zD@dmDP_IeoBmu)|pp@P&*@YvotJ}i+oY@!*^u8rJ$GXq8*$S}?hPRH``7y-x0~L;i zR(d+|2bWX0cSR(`3$d$CE{a0GN8uyej%5KsqF!T*b-iRXG<#XJ_$8h#a7+vkKzx?1J7VP0;S^L$_nchTUSLsx4u+gq0`gcA_*-bqyL!NsWT>ove!@PUy2%obS z7v4r?(wI$L#$R4+;R=4J7Y#M(6+yKCEMGJtY=fAL&!WQqK)qGNyatVqoFpz{qe6Nm zv5}In3>n_ZEo|7LkQw>mEToQzp&~lE?HrX0y{ThM#X(JF?GvUHhav}scsGsS1k#sF zEi8ibIFskfGQMEMH)aTh8&;3b9NjkOGfyPPO6WYsVA`Mj!}7&VwM#=JTA&t{ro`(a!s;9)SVhgKr|Oms7tS?La6qcF28c0 z^`grP`csU1&L?d5;Lk7@4zOv{EmeN1#PKASeXxhd@-SL@>0mviZ@d->; z0KLL#tJ!pya>h0%vTOou7FBLn0K8<36p19e3l83x7rSN>1OlCRG3Wt;zi(~i4ks_w zsC@uL4nTwKZ!+C}r7eeP!z~cMr9>T)#-4`DIf9z;g$ znZo|6{EFTpMXoMGl71}xY_SVkN8C%fPF-TM>5Em&kEIbz!L8STR-wRILfv|HTU7&)R1KcF@Ii*3%`wLQ;gXI&aMqylmb~bE<1vx(zCyUBHnukem=Rxk2Z(2l-|cNHG={hgaBVt>|k~Gz%YFdXOk+008`QEKQ%s#G+?LY!VMT=d{lKTowIg*J>kde%qg(j|E!@~!*qr=tS z(RF)mn?PXW!7WwNJce2q^)5Tin~U_V%v<_Z&vCOP@hSxDvS1bDhj4So-3#%fjjiEV zP^Qz7?v{28&bSmdE$YWhR&2@g%iesBHwgkejN00vKQ6oE?8BE>9BPMcg98M@z~Ks! z{0#M6Dwgjr|KiYv9-C#6*LG;*{xN)pAi^q5cTX;Rqxu}v{KLHwGLi8AHnO=s{ZiW{-L6QOt1902j1u0vM{4oOR>IO{-h%27vkKJ$#sy{UjcHMt zTJ-8%UM3vdWU*tgPVIXSGgW7Z5IR(hh-+!La*AAB4?%lbCk$;6Tx|?U&$v zc6A-j)Q9^HH?(jxt|CB}$FpSt&zO%Z3QK3(5T3}kzMMtuiZy%quVHMICVvbs;oP-& z6g@}7rOPX*Q>Rlnbm*%GA6^c~0`~;fYm8Pmq(UlAX*JV@pJ}_R2!ZJQyn&jR2f4vv zPIClrCLxrfYx8rl2id%;ztVVWr!{LyM2doYGRxllZhQubOxe~M7-joN`7u&BQ47TC zmJ&0E(g>T!Bge#)-3YL^nxyUy9j(q&XwlX&w>~e^7oDK>2yuYr7{v?b;jVIde_5dS zAvAB6G$16c^X}nha6MrJt7idazByu7ZQ76S(%AF4V*Z3CjDb~1j4=9;?bUnmxFOX> zdA18YvXW`ueHMhJ6eA0_ggh3Wd6nv1VN<7fG39Inv}ox$66xAN&Fu-47j2&Z+z*vA z%wIL`p}c}A1H`&(S#T-t*y0S8GFFtOlv>P2mkOquzrzgKu?r^4Ov18c2IJQuHVkan zsl$Tx;@z383KbvEQbt(&CB}Lx=}4Hs3IZV3gL(8Q5bNhpDej6b(Sb2zs7>2BSjqS- zb*)2!9k+pfTuzPTtn@m>idSFQPjFRh_F@fp&#8w$U*!DGHDp*&z>EHOH}g( z3_@bSoxhpz9E7m*6Y(`Hy=gV*3=UZ3yz^o#5~bCTYX?`b&!Y$gd%;7+2LZ`w0?KDp za^62~M^q#Sllk$?6L|#0RTU#tT_PmwgK+$&uD8!Vvtyd3>`~~v+9*;>x}D_rYfe>K zz1nNolmb`S^Ti82=rG1p>^hxnf96n2ml9W?Xp$= zL&3!LLGHfaZWSza;2*WGWRf z(D5RpKYs$RkT*evxISbEi%z;+0tH$4TntLbPq*@<>F-uU-AvtQLFQVLsVmBYgeaMf24M^Biz;Kd#CZUoCwk4D9KCa$SGY$p1Id%v}uyU(njJlb& zCnz%ZiXzr$tK7Mg^Uf*a6-r#cb$SC?b~{2-T-+-U!@Z3Rf3jYL*$XFjMyC|gVX|cI zrOLuTb}3qTpHMn7pE>)P-jDs|BpSTS-N?)0mB4 z#{W>o4%jA?ORBy2E9E0ThiH(bQ)`V31hG;E9 z1GR(79s0=)2rkWs0lJ!WhF7E|eFBg z+ZO7KDH}gvtP;YP51OVW?_e^6ZHHf1vNU(7bq=tJFj`fyx3G`nqmoAu-;d(Q#qdJ( zk%&?(%P)$RVc#;SaqIT=C5VyqQe1Tc{qML~Uud33FXOLNI9axFwK2l-P~UHbQQ6KZW62QgpA798aPJIOEOxT$x@ z>^}E&`^Z~p8d2t)$>HjfS$N|iSY+WsuviIfeioW}GcqC~iizY||9}+MgAsK_YQ<~H z79kLOr7vL^%m_FeMT4Mq@)!i@}=E&!Fwd`z20bmvh>o zGyPqu#iL|#`A}z%e&#CfGmSwLKt1Cp#JrCmUhUKDq#3wZ7jG1lX+J$fz0Ra`H13k< zCk~I`6fbar;~*+!1L;S>$#PAhUKhb$+tgGE4NHO$g~w17kbMjEc8LBGY?OqHW8tlJ zlk7r>NQ8)E2Y(;9Za%J|vLsJ;gsg<^1c=QJMEflZ?xjTxQT58F%_8s26BHxXG}(%F zLt>qkzjwE=DA?zy{lGQF*ag5>o(VET5mNWn3N(dL0+EZh2& zO~$6w5*-l(fy%(+91l7^I?TiLX-E9)COgqYK6#6TpQ{4`mArtfaK^IEM^F}^5J|`& zk!bs_3hKSp!i)@7zHTd7y{=%YcJtH((opCXVgm0d(V7wFbK z-`!gIV^Q^$ikasS!9mRT(j*gg0G}J~I=2m#nN||eBFV_#@#R{Lm_x}We)1pQz{hty zhSAplhp@Mdit`EH1+hVcyUXD265L$^!QCaeySuwfAQ0SxySqbhcXx-dll=bo+_QIg z&wiQH)6-quT~A5BJ=JeblQ1&ZTN7{Ztl%tj>MiNH#h-7*RWLo1iofg4Uxu$*guhEp zWY^my)514u+KkxLJ+{l-W2@di;$mk#>u+t~I|%cCjY?(|#CM33Sfh&7pC+lq78#+~ z8QD&GV5G71Ly(E}dreIA7nMouXzuMCzk#7R?A&uY&Z7iomy=nXmv{|c`TfATq5x*u z_ef3i>);_saBc@C$Tu69pflnu2O~)ITE9gm$Ob1t=6qQul;SoB9>f5`IABaZb<4wI|psbAX8E{<8^yl5X2}% z>~uasCt}slc2hkheb1S$$0=dAq%vW!3OTE}38vH8lDRB^hfZCINMTa)+JeaXl{88% zlTIAP7T35H7#yxb!-jQ;3RCnE_D}h1YNlNCcCV>IrS0+cP#R!$D*}U(%QkKKfA%DI z5@U*c!7K>{@-GP~mrHhRka5NrmJr+_#P^KhKI&w0tP{qfuFPe`*#N)q@Ts<>Ax*(D zW2ZWF3uzn#gphbzr&v78x%#QA@CZ@zcU~Lq>tSZ6c%jBlGOM5Qu-tz7qp>P9Rp3OP z(p}G8$n7jS1_T{UWLODSXWM)0Wq2Y_qI&L^5iJM4lg2=1M~Zy&)$<5M1*C=3!k}@v zdLCQux#ApRh((BW$UI5ZH7!KM-Q%jfZQd7I@`;!)w{z+w^fwiXbfZ zfYc&Y9zxdi=O00H^F$NrSb~|hJMiJB3nIZCm0`WT`J9ZD6qgVz8MrJ2c@1r7%M!9B2+O*3EF1EGz4opimYww@7;4*mNGUy7ol(I*V)r<|GG7ZgksR z=NR|c(L5!3kvL+)26WZ&A1>7D6hQZ0=B=W z3vmlAORtd-bo<*UwvQ_j;mp@M0M_0t}T)7u``^m((}4zU7GwbC%bbe*h5{xAQOcr1d*6?Ct(6m3U#E`kI# zB60`5Z`t3NL0(xe!`}I8oW1?m%V1Tgv#)ByP$=;xFx;t4#i@(KzVw&RxeF@zEV?!TW`WBavY+}2E97359F^jKMYZKce|EqGe6azCN1I$6 z&Ng19S#wGZHB3uWT=JwJSOgj%4{jrUwXcyC>uH5X1vuE{vmm?Ib@h~Xm@!=CSez)_L$45eAq_rPAb*?Q zMZan+3fDsqYJpO`_EEJ)-4ddsq`MTUDi_p<$&^D<2%*t1Q#xHbl5+W)3kPnHL$OI9 zx6yVgQLVigt4F-E!{cK&J~uCE!}cuU=xyO*LNJ|J8&UhZl%gw1mw{)qF|4cMP`pX9 zwB1>(CfP7`QWute@2JeO2udkyH@|Bsbuv~(vb4D79on;fY>9$LdSApG>gS)^IM&8oi7L3&frPmUg7@1Q4xjm&SSxtLVJ_m_D(G-*NeXy&iOttt zD^SfqCx8vkr+G0}9DnvHr4ja(#w4D?AR-8L_0YL`24We3NVvnUIe{=$1tdiCH9feu zMhN?Au{nXx-r`JwZ?5EvDDu;wEu$=ZPrrX)a{9TUlgsn4&h{DrDV%Z+JzFMRcDE=h z7}+G7eSV1Wabr08Sl%rr`Bi6|7G6Y8NX`uZ0zlf3F3Szz(zyxbgeR83w|g-7=^q$R zlSO2t)h$%UNF37A8XhR=sn7#HLY~Z(7{SrWO+C!rF<>^-p(vclug4xsMeV*t3;!TZ znQnd+NPPna;9B#-;4|Px?Uw@wc#)&aAy7*#h-36yg#QN`7sI=P1eTsL zV8ACtuy+yK+CTKKuFW%?0 zPPu~*6P!G~Y~b!&8>GyTO_VG`@FLMzZVp~{M~n0zO_PodisF;W5QF{WS@=*zyf(Yq ztGkZ;AVrhzwnb71ASiqx6O<}l*7RQ^7wKtwKaa{?`TC1?lCCA&2lJ zxd0ZmCxA-P6nwYs)5uV?%$N(S8s+cKXRhYYtaBhL1eZ!^y(r@^~Z+qb2 z6dNW`$nJByH=pJLXgMvF+|#{10*guRYP?f)#D&&o!)!#7VaLIhL+u{K<`a^B2{lp` z&}xu=$Dk%K2!lq0I{PAnVhSUIk7yqEojH~=KyoA6uL-rN(7TX#nh9$ba|V`ObKcQg zOefj`Z#Ja)SX`0augabZK*RJ)KAihDAUKj>)yRElU;4Wq@yq$}btB4*J05)ta&IzS z9u(TBt-yN( z8je8Jy#w?f=t;A{U<8O0l^E0|upb}m5ZZJ);@t|hP&UojVznlXMOV36H-Xm%(FC7) z9M@of_9Q|=w#}p2Vj>)iWTDIsR6-bETP|vH7o@F8!L%G3cM<=DR!|%S+UDNPE-6;t zS&JV0(ypyhVan$V!WZih`?htOusbfI%Qd8t4TVk!)@eKv5TY9hmoj}Aq=8Jd8p-qH zUdjp`H&PNR3Ew9Bn~tgyilJvelwpq$KiKHV`n{QdI^yrutJ8auH-@uegVc58S==OP z$ua{_+P<&6pcl{ML5+`wSBBH52z4`>2G2r36tM0fB=R>rV)au^=4^$tI`$a@kXO;I zx2#p-?S+N4(YR*kM6dGi&qZDLp?FhJ>%bLIQc%x>gnLAEH-5VHY{D2&`C$U+>mb4@6H zh`(X*T)CD?qm}ghgG!<;jwI}_l=x`5U{4dsqrm`e!KkC3D%x($t_k!eFsr4T|* z%a~v_x?c+J9YCf8T=G{U{@K`2xZ)iUtpOcHRkO=B~JCeQk`zO$D5$R;l8ZGI715F{-TwU(H0(Q z?NR}DG1@LiqzbcP;T_S!Emub6TzH+bK%I19m^+7kW3g1TeR^XmXp>_Orm%L*a;&Ya zhGI^H?&R&2s`mOBw9nx?fZLU~VLb%8BA*N;6s0c`61as$EP%~$;9u~#HcfGH5Vcyq ztlFwNR9fz_7c?Zt$$gtKJQEF`@ZWkYbAAnPKF%kd%)>iAj@)qqcC-mJ&33n-7diT) zQ)v7usLJ=l7uSRz0lO!ca2OJhf)XHw)VT0M3q!&5lQf3P9=Onxn1-BXH3o5_<37L1BAW zBFkHUb9lj8rzYB(#(^7Wu?XV5;OA<^wV`1%L0Yp&V->Xwz^6DL`Mu+%b;;nRkj>!r z6IJ!6_a4$5CpkX%YCJi?XF25{J(oFZ*yf!**Z`|Eeb+T*WJ~8@|C>WeSiGY6*})|w zDs9_6rX*#^KWGgs*E-uws0V-04EC6$*U0=LxUbS+CQEt9e#_OQ5kKA>*<7eVng^0@ z_i|}nDFFG=KPl>P43NKrbA}9uUaD|9TKDksGQx$%ne&b~?u^=SEj4ck%6Kee&JvyMg_FlCmXpp^uw>>INXpo&d1urr;rXy4V~c6H z9d5wZlk2C$Nzo@NhlYEYB$h15z9m2zk)5lECR~$m=a4AX8(SamRKdkc$TJx`tyf-c zZ(3XF1~dYhBf?gNni|>Ko)89Zx#*k3uEGqtU)Layd%;w|a#c7x-_uh8K$b1}D0LGaR000g^z^s7g z=f@4Yj`ULJwBE{m!|FKjB#_ug7+*nll*Q)Mx;ZoTPxe#7_CX_3I;7ol0K6R> z^>+%)=_Z%Y6j}2m9;4iG!)*INukVV);7=&4KTrzZ3waxeQL&+ZH8=tJdA(eA6d-n* zYV3uxQR9`ab35QxcrOSca+y=jhqKXZVD2I5R^h?{>=2Gct!C+XVzAs~{o~{{?MIp& zBqY0X#N;|C(}Cl7#~A|dJG2n4M4qJhogQ<;wZ}ocjELh*Wru#eR#q+nvATg6#Bsp@w|I1&eaNRz>DxNo*aFLBG}&`Bh&bAe=|0LYW4=8VFx*lX+kQGQ7sV(UCBs^WIU;0%s z`XTW z!(tP#M|kuVJxgtQ=X)Gb6Bv5n;qq2BfLzI zxnVuRc*wrn}wamQl5vhyXIl9P|JUA~_Xg}Jyszvpa(n@kA6SKt zelQHL$tmc+mzFM+)k?y*7}}CCWxfTZ$QJ){q%@~XGZoXKAs*RzR?U#g(!X=wDbNX78Bm3xEX2Mzlhg=oObMkjGKf z5${nj1=w%{ScZJF=5n=inO|%asA6?V_TjmgiYnN(27aXo=+vNm`<=#6su~;uJQ)ov z8;QzMGtHI|1t5GWpS}|yWvpUwk#sLq2W{$fd-!+}XoeJt-0O=|=P|p)c7i#iU=RF* z2}|sQlU<4pVE%47;h`$<|Ehi^{jW>kxhRG{MApgQ6Tw+j6jmrGb1DpHP9f#6OEi_= zK|v!jbhK7J{EmsR?oJ+dO)IUNIgWr&XR~V53^s9;5^dMq$F-Y@S8OTGn z0mg_B$3p~65paHKUQKT=FS|v9Tg_Q2pNHF~4nh;{;+@*&3DkAde1_*NNpW_2EKK2M z3vvCf_;u{{J%lEYE!w8#(l&$Kb(?|<2-_rQY{x8^s(y#8Gm#dEre%sYC9Q7RGA73? zf9-CY-|}P_1YdMFCN2JmVljK}D|5?>XlmdT{8U%bfh|s>13U}aCKQV>}7V19O5pnl?#CJ0#K_x?`r|P>DFO zZ=?NI_D5NOmGk8@H0O&sVo{{&ryk06!bnYWN-AeK5u%z={N7?Isa_sZmcCX+%-}V9 z4pMVL4VPYcU{Mz|u!n#-v95}dj9S{`(Nqow^j=tv@|x&UZL#d_@bS8CthX~5Ib5>< zZrG^ZM~^A0BS|&YD7cq^L6JPN_g3~)iBO+9SUiLoCTV)we!#mF7V2QIC{*%A7T~o6 zDaVJf#DG{83)ZhxIM@ehH45Q%H1(M!rWVK0wL90~wy+ZAtZ(nk`yH-4o7ncn3l=Hk z3guiPvpsX^aC6(PXwjVhG;h8aB!r8iF({wv!_t!x2^4_ zA3CQ$V`HHw9HwM&5<$^q9a0wZEJ8_Ggzn5A!a?gqxS$KR7G-UX~WaEv_yS@S2)J@PO1 zt`l}mt3o?}_WoY>r_5H#K- zyWR$r_r11xkpUTVw+YYPsu*YU+QZOF@lMvYbzWAN%J#B$KfcRj)2Isx4`Vh9kG3(W z4HpO+hct*Q)YzgLZiTPixY8A|TrEQq<8dQ+ldSQb&IB5e*xYteGiDq3h#9rfp>`Fu zkjlW}(X0;Fii6Bkf44a%rIBO0Vr6=v$b)}CvYUP2y!V5ZMze75`ut2V@Qkk|F;m=v zyWl%tUzJBcjP!m8&?K8zWEN;9&tx%tEbZ1Cv^{Ot9k(iVfL~stXPzGKP)s$54UTdTuPqpJRB~@oVfC_kg`><4ZpeL@iN3-> zepP_%cg*>oftKqu?ib1m>k_w+6zD373cfBIPkl@fT~9#7&>{19uM;fR`M2Ur79K%E z&t6)Q0;nyoD2v`l2PxO|d$Sb#aQrGtlWfsNY$`yKltCX*AebNpK zL%SUm>hPvmBgqKTB<=N9lVaBAfQ&@?WDqjb+G3l02D2Hs&xqW=59F;enu=o^N0LLO zSEGAS8jF?HyV5k)5KuJtqymgUTaJgb!GoRxB1dD(X`adwtT$snRj+ckF5Gm`DmBC$ zV)W~jGTuX~+=jqWNMQ7H{?@qLMEX(f$4cc+oS--=>588NUErIJ$9+gb2v$1=6aYO% z6aLuR%FLzDuxm6KeCs&r9Mzw$V#=Utz{#;0Y2TgwbS)ic3C+@^r5RC5RTr49^zC#1 zTOhUjawHn0Ug9T!7O@&YZ1@6^BH>r@Evg$&LBpD%;zxc1u8w$mr8X>9jP_02*0u(lb$=)M1WGZN2G%1OuX&_GP$eDf@F#z~ z0QID&OH`nANK?B$n>m-okz>@Ea*gbF<88I9d49ir^MlJsmJeE+rXggT=CGHW~<3?M=8S7vn~QDs?cNEkX1WU+pO==+nb@KX8U zAYkUGj|xO0o%a)_Aur=r@tF4!ACnF#Ay@>JKf6el{ajS!H49N+0_I!lzm10D*_b(Z zouikWT6w2`HbcYLA zaO~J=2o(I|79ihjIRWX#OP{+OcOHggECYnYl*2i!9dei4Yd@x<#kT&}8(b-=We7!( z9H$tZJ`VtsxpdOzE6L~yW>hX}LWtb*W!3=*xQBG;)6ycdPU=#}{On{TI;2N5$=EuX z&c`&#cmN*j=BD92(7hCrF!V)ON1-4g(P91xgfCPvoTG0jGs zjxYl9N5%0w9OA@m_KKl8a7v(CAw8x$z1>r5@=GWxdfpy2!K)8iAb2dD7vVV`4l`#_ zD_`V<1Q$e*Z5tG%<53{bFabqmpNFMNV0eRqe=&U&R=YsMap_Rze_jP0+MT)f&)JIi zPX3@xH>V<%7G-JTKqE4eCM?W%l!|ALpzYMk*Xa=hU8@ioe%A$3`|h8_F64JQ$EiYh zGIK;t+lz^K9#{7a_MA>uCZf~TfjD7(f{Nz_MGx9zK$rjkn$kam*OraJ=Cz@ zMnHBcB#JY?z7g)LD3JQR8TYZciY(SAfElua>`h7@+KFPnhY&|wAF(qQ4hr^B*0)#` zhk>(9djAQIC@j)V3P;0FwjGH+qdD>satl+ywm(G`8Kcb2BCMNXD>M$N4sA(Ac|u89 ztz_H-A8H^ZR};|>MSyy1p+t2M-g+Yf-#>wTYtzg%@frD5ssmjnj4!LM>@bF>JdswN%>?4*Q zw+04j1_7S|$8B@~UCvd!Yya8;1R6iPpi)Ydxa{(aUHDW)&y@U;x7OJ-LB>DzwQT(I zLCINlnI9VC!bQJY;)jlJw~DnWve7r#x|m=y-xU#a`bunQtlMOUe{Z)uczWM(wiwP! z^KIS)_zvpGq}n_9d0~R{WV3(p-os)^qZngZ;yr7rSzrPd|MV#SKMFvJC5=~)e?v*U zJk#CX-eoP}93NIsPtDmG85ibti3lo!MoRGg1OLtkB0K5(=c!5iuCi;$-%0zAX(a8( z)W_?P;K!ripO;s^5B2A3MZZ6fX!DphNTBwqHhh!58+Jdu?}+_m+GRk5>htt#z~A3y zE)L`VE4GvWN|PUTc(_I2{GKPc^8WTe#k63&eeTz10RHq8@%i=te=TUz$VZ3AP5tZ5 zB-`m}&S}Q))Q`7spw%25-X{I8k^|@F0mU;{Ez9)OA=TyKepmjoB_zlM$FTxp_U8Az;?3YT;QJf&KW2jvL45IeVOULdls&#EPHes5|%aUS1 zsHwJyKUwM8s3V)>pWl195MGBkcDr}3bIG26Vf@tbuX~o{%;2zlr!Hc~X<3H=)8mUS zBh2_}69NQ?>g~h%=O&%l2X{T)L)ZKXL%vPhvtGw&&|LOqTL{jt+gsz0MR*KA)d%^G zU%4*sR+p+YrT3+}_I>r%OC5WlK`rlbG-4C3`OxY(%wOb3Sy=5hC#w!yt>O;%>2DSU znh6aiIal7>vq?LwAzz+daWGdwvSKGm<-`N3CBSIYK?3nQf{lJiN^o-$Y2WeJs|OE> zV++CbR8tb=*VvPBNk8JX-bWRlS-Td$ucYt)_`K5qn*6(W)Z@gy#nfuK{Tjl)|Ft*5 zVgKK`d-a=OmU9+w7V=9RoTDIfHSKO6psl#3w4oq3QTD%{Q~Wyr3owm@!iiO3o+#g1 zwl6w9@YXdv3?y&M5}3$~Q@Y_{QEhPLwO_2G&-#S_r^P$L&i27y3xhjFO2mjn_UrUt zcO9sxCY$IxCs6m*YkMa+G?EY9kJW8qR0+UcW>eE6z2IgzbGtzBCT2%6p{#x#)Jxbm&> zp&>W<7M_Fb4@LC3Bagwx-=2~?+rQ&}5uAH?frk6b&2#r=4^a1asj->droW?slo`}e zTjFm+N1huyO9CAki`4_g@=v(?RNhRac%$C zG|qxhvb7xQV*v5WDhmX2CQ8#kMEdS;|L>HjL9!S2zsZ9{353&sBYn>UNzYDQ^(jm5 zsU)kp1&G`~(2Rck*P`47aBcrhWV{@0&~+Jld=(@Cvj0#&I0wN{6kMy@2>$+$8i*|* zA5o0ca^?m??0?OcI|H;x8>dhZxPM_-#Js!308;IP`~l5_4U;6Bf*CG|iGqp$8^C!9 zkZBNEt!jk%%dw_4BG7yg`up}lj&iu2A4JUNFV=s-X$|zvHQFl#*$$6LpVfcD@E05Z zYrcVDX&#B+{|)hv8c4uEUnLmRj6s4E;NJYUYPGBXas9aI_iE-4J zNF|WrUu*6Kr%UJ_2f+$V)tiO%*yN4+2qz!g0|nY7&*OSA@0(!$xkGv0Fq!P@1N?>>sv_?-a4m4JIay1iBYJ;*u1}khu2Sn;Zz1 z44C`99tr2F{CT;(qA7EnCt!$;m>GZiFLM!h<|6$b+-Ld+)Nd_WBBC#`as$D*~f>c{mOuGZa7z1YdJ;fAfPoA zVLey%;k&r_Tmu(f^+S0(gnMXl=us!{TUC{x`M6i1$WNZU*N&*F%P&NGhNHVJAGbp< zb48cZ;_gx!lgDd*KWDr2yx;7_&1my+?+paf*jOLYiy}3PfsiMDs-(9y0IoYZtju?)no-LQ@o2~KEw)JR}~V$rY{|R{}qEs?1@Jz-mc5K z&5y?U*!FbI8&!^!kplK}rJ2K5Mh6a-_@#J<)W4L|mWc>L3ZhN#dE;KkpvMpT>V%*J|B(wTW-2ax|p!y zcGv+@8`Ah$n0vDm(U-6Pj;A)yQ|!aXDA8As)gv2i`K%_1+cGtP2ZlEsF4Iz+i-4B^ zX2nKAzaIIuo{CK-wgYZ&o2$DIF0bfKe$lG8Z*<}(3m_va!0t;jG=`#XGHc_Psc7GX zbf}DacAY+wvY?@A0Yh;JL0J|;9eUWwYq2WdBHCs0LVu_S`w)%PeB)!xmf)bsz9VdB zCeG2CvCe~hp1F=(fq4mV>91@XgbTYQnXE_Gg)|4B`BsFZLzPjdojyW>LGeaL_Wu2a zcu<&sBp}>ev9RVK=^0&~WXfa)K4S`1ssUnCC4U{qg$@ew!-Xh{RwiA~%IGpoYNiOJ z%9*JPVS7qwg2&9NOxzzm++E0(b_iL91*_UJ!j*qQN1Lttd>-qhP+1&Ph{T^0dBV=l zPMKzx-@uu`Z)dRP3P0FU+#*O6y>)^TZ*(DP@coL7?z<~_oKIX0&IMKg+Cg{Mpe8)& zLga8iIf498vv4XcEnljclfGS774+bYXuNhca_E)qhSchtByl(K(cA9Iu;K`M0$;m(GfVs|r zoOQg);Sr@eCq}3?*m`|IfnS}arUw(^StFZ$)XHQ5i`8Jp5|(MU)@XRXyk8=F9*qq_ zhrSE+DEI2`k!x?f?Q4H8TR<#mRo`Lh{s?&>Zua&teWXcA+bMZ!@gPJ zv_IKF{(n_ybpt1GR%kFVe`YW+#Qzl9$=So&)akD(&ui=2m2={Bf>hbRc}W+(kQ7lj z@dqW%T*725WkZr{Ext&0e`XJEJS~sp>!p_s2{(fhL#OigisL(HL zjDhq}^z~L27sL<&-EDie89zT)FQ9G|I+Kn^aHvHA@s|p}%s)?k+zPQ!{5GgE?EbL3 z5uNgh>m7#uf3N_0-lA^Q!F1p70D2H%ptS35s5fdXUeqKKo9tl{TiZ&Cs27m>(BxMI zT@P)!ia|MPTNISsJdX6b2yd@bS@9AZiC_ys99Q1w28cg8UBhpLpO(3Hp_bw-rJ->ng4nxr!v)wWO>4VlU-Q0=)97%rec!LUZ0YrV@ls66 z^UHS@jepn+aqtuwHDz03W%+qCq6C2~*m;s%M z^CPD(HQk93bBFJ?Kfa!Yv{=)yKao2-J=PFHUJ0Ys{<~>cUL3E~HtG0%>5tpjjTSF- z56N=7F+miSK=tSWvq`Ghaq)HS2UF&;GK5ee+@Gb%OT2ew5>C2{>|+wVx=NojcfUrb z>ML=t^m?47S=BXv*We7JT3B>-(iC#)QnKVacr9`rV@e(!%mEk~O8>x|Z}W(mhY^%h zlplhsM8E2UwQD@H6LA~IwoDX~=O(PKY0A%+)bL3bocS~I2?F*xJf2ET{-ai)d=2Sa zzEx0bwkm3czp)@GeSIEH(49XE`i!zQ_=7FjEa%Vy(&-$Ss-kvEg@WR@4O_`r#_$Bp z2++%+LNuBHSly8;Wm_q=P>5`)$1{*USbx@mY<*kH6;}-a&u|MGj$5+G#o9eaUpf!;2 z7mra3TZL7Zr(ZUUG+b{3rbnJ4`+W%ySu=h5^d;)CU8sd|QP4+&6r$}6sqKwZv@z6D zFqqrrPDXHGN$5vVV zsgwekYf7@}Ehtu~I*FXZNk#{qJjPVlsd(U&WNQk(VI~DLe#M&R&`@uj`8~_(;T!M8 zL6WvUd1}RjNh@Ld#;TK9@60S?(fDH``4ZQ45%1F1-xnsobf=Kj|MLFXW%a(m)J%wUJ$#cgGJ@;b{?^R z_du4L!1_8%b@or!eDZ7X38s2^99qt)cXxj6NZ`T0hi)3a;CZl~sfW2;HroDdwwe6_ z{1xS9fpxfkKCNbecwr2|u>Xb9TH;SBH`h!^P_n>g1z?5Dtyqwq*m~@&rRnDa{-=&F zcI@p9UR5^t*nK@`Gp;rPZR>TVYwYxNHj3tZ=dVyO;akj2sIYt2sUxsnTv~!>-DU@`p@yi#H>dp=e<$K%l`Ps zY!;6d#p1q^%l+6gao6k0gqcDIX1Ox`A|0qgTXpKsvHHieppWXJHq2wyiT-3&)9NZ! z9P#3f)_VBM*nOqec9vB~3##^;lhb{3=Q{P5--|LfcByfYQ?f*x;R99&!}qog@B8$y zDuOvYZ3{b0ye`Y$SfPfjuM6t;JL>v2ehsS(qt!%qe)kK;FK)NX-)h%rZu^w2L!Or( zv=)q-mKV;9RL3TET%+8$x?a?*7tBaqC+EtvhQB!!WiRZwSm=e8E?+b%cswF8brs1% z`~k`~sP5}nPuTgkxmWg2zTI&}i7=$OU1lru;JY+vUfM3iXl+YgSyf~E;dgc720y>= z;~{(cK8M+LHTd4P3ATH_c->}QZfqFg%5b)At_t*jUGMnO;>mWycK5#ES#J@x-L4hs zDNARQad#mNO2hs15c2#^{H-GBd7C(St!#a;XSG`W6o1vp?_(#$BaI$H5UG3(5pBQa z3o2ICF!0z+#>^w}C4s)kV+#6IX;M~PR(I+?c`}bNtqs3not9GP<8vRU@|3BM6Pp*S z*D=-cnTX$A8))fQU+}getx*}w?OGA?DzQP~S88oC!{@e6k5zs%b7-R7l~Y+oRXL04 z%WZDjnO@_pP zUOO$TYJ6AE`ir+Qnd(n|PdZUB0&k&u4_)#ZoimjlPyC*>GUwSh2(`pRw^tzqT*cv6X*+yfYuyEbAmhCvqTkd=7nv zuTxNxBH2=*Lx$thosHn)g6~(qJw|A@e;H? z0(}#r*Wt7+I<)@g5C%?Xytph^wX}@qhwok#_GwPLtxIF@;!l39^9I5}sB(uEJ(~p? zgU}VDh~0pyX7EJ92~qG z^=3*K321D;(F4Z)i9SMpq#Zjv!pD}JHt2NovZuA`{+@ZuqZWh)Mp7)XR_|j9Z*e-X_Ld}*TcY9*DU0}R^!jmf3h9!x~-2OVw*73S_ zu2`7(Pjt6yc9bqYX2;~(<5#C*^p$O>~x4Vru6;=er6yNKCdAJ27FIk+2I;iXxl z2Bp*X;^m_;*hyEJ;~(=-w!u`-h)87g+Uk@l} zW@jEX0; z4*WUa0RjSFza?Z`nb+Y_f8@a4UC}7xp@F)35tIbUbuC+<_GYkBD^)FGY!0@ zl7UM$kRSy_Or3crBYyJ0Kd?7@th-HHWHvtOZRvRBl@O(reKV!n3M5f3kWloizFX)> zT8~;-Fj7tWVn<}cUvcblMCSFfQ*@;3*AKApo$_$**qEQ(_f+W(oPQBp5XuT z`@ko$;OTkqwh z{AU}0zoc9#bCPLwD-O0*3{4?bJb-gim*)$(-z<=`ir<48}+ zYtZ5=?Vc0h#yeuB-c7^S>0xD=uxpFrAc>~_-Gn*6RxaH`KGobZg$!+vLN9!{;?tU3 zXudX}C9-Uk37+RT-jN22In*TAE`hCNvAjiV!fwcs&a9nN;&bm;)dbE>WM|5ceDU3Y zPmZ;|LDKXaIPm2eRcdd|0k*#b&#c?98g9iZAELICd9h%v(yHB%S*s_HUOJ*RmGi$y zzJjym^k0T2jX%yx^+;n3dzO;8L@{I9q6~;r*O8Bo^?FB7h^>g*b$DyrZQ9PC6z1W4 zD~`W0wqYI52$_+p+xo#BNj#!7tEF~leuTz3=TddmFy`=h$<1BP)nuf)*|D1`cZ#9{ z>(#`Hh;4o#3BxmQeCRlQaaGO^kEYqGRMmoS>JU)VoYv-%I@V5KUh|2HP&=I<_fzt& zP`_w`fDuOr)cl5I{hm?WL20a+u~$Ypu@=We9O1C`&Tx-uMNT?f@Yyf=+65u96mc+Q zNTWdGmd}#xyoh8br$AM*bO^-M(spN2{hyl3Wz-4i6vcWZ#_smlQR|K!{4@eFiFnji zTlG#?RJ?|1ft1f%+WZb@^=mqdyzy~5pj!)7=|V3eA+v6MB!PS;!g~j%&^cuBhEXVl z(u0BDLPDO)T11PZE$!rKvQW>rH~?#f_-OwFxt zQw}8yMAz@m6Z9CDz00uu9yyEp@m~CG$YLP06OHV&Ln?1Vh4_TrcHqwQ4g%_G)CK`U zdyrnT}6@!3~|!$l)}-UR45_+JGLySo|BE&7o&>46S(jXl^QI5 zmdu)njaM+i4G7Y`z==)~$D$XW-lFnsJ}r2~Mmj(u-Q_&=dz{SN4O1ek;)BV4f%hAY z$=F2Zpfqaru~tSHPHa3qn69&&0QeJD;=MQd%sj|eURvr@KTy6Szv?pWCZr!-HKbF+ zkk*GtnoYLzQ`0gr^mdCsVf^Hj|4yZf)kaEhqY97uw9S4U8m6eUjR&2oM2Ep7%8L7> zDn!7ggsah`Ld$mbt4)x(?ODp%g={o+dxv$m2Trs@7_#w*;Y7x6L#w*2t?&Lhe*?Oz4Bn zS53MC94W4ZVlq?U;vEF{E)EZ~O0`Z#oHAS32I_N`AGBq47))|awFYRBZkHxT(P}_` z2O%HhyF;0XPT`%ubY`;Oo0_$MEm)u-9u)b`kn-l@=^O503YhmQJo@DF`1~@iyqLh9 zrw+G!;jL6bl?@%a5|RG^yGQrS&KOgd37%nB?B0(ZwXd}7N7%0Wvaz^UD7kk?eC)lG z4SINQadwessWJSJ2;k+Du=)F3p>4 z>!=oXE*WC^;v80%HD?g-<0C0(u;yR66={TT5%!gMz&7nPtDq6~NfHyqwbMiDnVFbK z{3^NcQ!_f;CdG}(9+pQiYcf4!5sJq)Op?J z$%kYBb9%*Qm+mFlq5Vly`8h|gWUJ8m$hq%t!pyt2kC@{{81Gs!6vI9`i>xq|y6YOf zSM3QvkOG_FC|a4{1U#3xA^i=FDUyS50cWx>MzZg*It2pwl1`7hE^|I2UcyDyZ#rma zvmT&tI}*OooZ^&Ev-(xZhHcfsKJ@UFp*5R@+40)ejBzD>4pqsXC7HyF)TWr9)U?G! zw&cENi}&f4B<5VvYltnR{JOl{ZzI>n{|h}p!oP1$V-z|%yp$z$#B+XefpdxSV<_%kA6bWBKPc78$HG)7=wbiJnV?Z1`m5(U4b4y)yiS`C% zRw`Mf{s_4U@mmgsHB=H$^UdrgfCModS-|o#0vg~TgS5#+6(RMqNhm#fwVseo z#^*)+Y#jVVV@~JbG#nKdky;LIT35@YLG%vDM#2pn$en%+dT$Op^ ziddjr7@-$81BM+S(b5-H?iZPuCU2;SXtC>ZafK>2;-;Wt%{?mmWNJC!wZ#5If?RA> zZ6k&I--$wKZ1PD!-VSHl_Sz#z(Eam_N{squzFnJv%w`^1p@RQg?iNr}<<4oI!cgk^ zW5g~y___0_M%@}t+pwd0a2E)M-~FF03RqxuZ#ox^$VzUY3n z^$)ca==@J71rGm6>dJ0`GZOLedHE1 zdJviV@j-!Dq>X0(gnThe{fbSS`CwUP6>4_@NzOAuD67yFVUF6#+y^i|jgl;ybp9AC zXe;N}Z?6`BD3eO8QOQnW%?-fwD+HKMIevb?jq-!mx1FIGFV{7p40I_Z019}i2I#v* zjF^hs4sS&s+lcyXi^R$=`u9T@*&cC3tNwHM>o^QFQ@PaDczq>n_>BmDj#ar$9hA9R zP359vz1@5vrVFvOT;=gNsO?S7`=+mqd1n!a@GV1RoJ18PR+CDE?D0<#0}+(QQWkM# z-eav`YpDw60<&KEta;Gr z_X?zGDlU9Cl@v7c!8KLLpak=ckDw78SEQmQ4S2bh_58nFo|4v_+i~P*?HK+K%_jnt zjil}sO|Yh5+xtIX;3Ih7ur-?QCr_T{hOsJ^fAr7MZhLnei+O6E6M$w~+0HPBwV4B|fOE9Vu;*+`Y> zx!8*jrIwna15+_I+*WJ+KbvC0YuXi4BU3Q8ou=7~l=F*wiIlx0g6X&BvWXflXC}>f zTP}P27T_XK^$zx=A{9g%lcE+m6Nwv#g-ntxFETYL;>0vuBYW6I=bFx33)&E3u4^jZ zzbfZr$(~K0;WdxSo<*I#m+U#y6k5(ey$7KhQE2=YQF;}4Un5Yy7Kn2D5l}ZlS>jln zR8fk0)i|F7TW3Obt0SoMCg^<9ep+}Ih=$x&>)wQBLCUPd*@J8Lgv>mAJIGRhDB)_} z`Em2krhWsTAo(`*ngCNU2#E?Mu5s3mBmMl$ykq8h(%;K~vM@R<^etXlRPD@~e5tCx&1HOBT zlykoOUVMg8fcHt_9+b@9^yv35MMECrcAY&(zXl>1@AKa`z}Q83J71KmYCvrnfKKO_ z1Zuh$8%f8#?Ca>d7$anog<+bXH7{y@iu0}js9d;7=p{tj0Ya+6UTh;49o}9VI40M>8~THlupKM{J@QrRVAYZwo#~(h_?rWKCO@==!^SF*b!Ek zzK~&n79nFkROCY+Wqv^@EdwkINr=0Fw^2Cl^G^qH`MgXuA3-@h42bJP ziIy(KdwhUQnHI94>0e6;;0%(UMj{T}H%FD!lf#4WB{k;gj$6%Rz%QSPknv_*>U`$Q zl?gh#WPlB-#D@~2d_`WQo*U!jnV^I^758z>^h=)d#(;9ro4?O`_28wWpMa2EElo;O zdh_E#rYG=x`LfP?Pbz3A7O7x@1gaM_tMFqts8AsVsVIRuAYz6rRAs3|m8nS}`PQ^; zQJsi<$muLW1BV}+^x}~y!=LW{zSVu8{`~m6=J9qPz5P-rqpC%LoQ1POkE#uNN;Ng< zdyvXBQE8t?xvgtaP$Y>M0#0KJsA~`Ul4v>s4S+ffx*s={TXNqqVP>=oQ*$b0YX61f z3Z`8e?u|z*jCmLu_s03v`}OIT$Gqw2!i-#O%({71nR-;*fmRMV0v71;12U`tvRi`U zPo!}!Mb4U`S&O5)g(8&x$PAfSD*rQ-T_%uU?DE zc7?gHDijq8j8iuzq+FCrh3J$gV5R&R1w93EgqrtEiiaL(Tu)i6AGA34!&t4pe!zMR zDQu0zB*hSr2sA?OC{R8}2@QVaxE?xsDF)Jxhie{Q=nICIAOJE1jtm|3;}GbJD63or zmf74ZP!JJK3ZA2!>W1tE(CTPXa#B759;N(X#oOO)Q0{)a-T-5P^UL}Mq`s{+v!?X+ zmphLgnB?|)THCHr;Upw(V7^WX&+C7g0~w4o!VP>C-FcUti&!wca4sc7z8zw-u;cS;h zSr#J4GewQP=nl^s?pwf_;cYXSeF}Lmf)6I2XCN?Mf~W49P@5>%7PsgON_{a&+CZfO z5E`4mPlkY%TX;njfoCn0=_Eni+wM8+JtBUMy>FO= zem1R|NB%q?X?)U(B&tFcX09xA=#E5sU#}3iP^AGX%Q5|=m_Je0uFz}h`=*DSYJDk_Sw3?Ut z;M4O{Ni~2F$ta+*gaGEO0KipcD2xu`dpP9#H@{cq!NcOSr@7!qrtGw>dSLPE;g`>p zzbJ&1RAo##eiRU+9?8vekcxrnCv_GqGC_3#X6lGaa;~r>Mcd~{Pxa2ajH>$Z#ep}Cqq523bhyV+^x5u1v6rYA;suY`SEYOH;0 z5TBx7xTMyb!zDqqthh^a zJm!*_-%o-%ojXy?c%GaIX7DOMLeH$0EG8<*W6qHKEz(k^1?ZiEbW%jzlwj=Sv++;> zKANp+erOcvPg@BsBz(Jc7;H5;c6i%e}wvV-GI$X zgxxOFVA#ywVqI9kXa6cZ48X58Cy?EvC)oT{p~ZEY0NIJQm~i+)Vi3;JmCZ*~Jy!u0 zo{xk`t$jRymdI;X-+MwL4Fb49RbFeZ$D}p(q69dj+)YDbV#(0*Mfeqp4Z-_NaNWGD zCh@R76PrRwP??|hqIhWA4^AR*?^FG6(+50&(9*u!l|}nf4*IosPD zvGk}v$K!mJ+mrRsmf-PNlc*0}VA}PKC+fXk3gd9#`{Kat((k>^ zi~h-rYz<304cbPI_p<*jV$vHOlfG~ck^$p&AR93a6LEUU`CqC3%U{9Rxddj+kULwr zxfvOmj*<1@LubOq1I)IuFsb9WO?YeOUoZ7B2Bo1K=zC(J%tsX-dKQ)F4v82OXnO-R zG%5n%cnh2-JrBgHX*p8JH)5)OI+AaeP}}ACn58lMFQ1guL(1^feq*5sKa?u;p}Wc| zL7`|4wi|f7nHzdu0jOC>U5VM^3+N^!grKnzs78|ErfvQ{A8j4Z@?H(3Y=9|xsLG{> zPQ$*UEUxb2E#>ZJw}d7_Nu%A&H4{^FXLkBj{rb1he|`D%yBB}_z4ORtMQ~^itmocR zGn})EUi-++n~46{v&%A7zFw_hyKpBZ5e6n}w%%2g3q*b0>-emV&|afk%(#fZF( z*XnwK?(`E}PRp*%AE@T*Q`ogeU>80qa0#~?6OHU+8rT}`XO#Sd(HT8Fk~@}s|B6rv zl>#$^)(3DbRYJ&wAV#eY+Hirz#LMDI6r#@vw$X$pPW5aJ!_P922B)-7##Ge0RLENchth~ha=$;4<13k)167qT3k ztpWl5YBWfb$O*Gpf`w?|*_!gbq$$Dnc=GHQG(0j!f1eRs8erBdB3xOX0DH2Xj+A7$_4KtdaSdW$;W5SZ=F8=HZoSoR$VCWJ2?GKth59MRRE) z6F-g<1RAEv*aF$V_7Je(wA{=TCpzkrQZ4u=h zF+|p@vET6QY!AB|7A72>W%c2fr0r@6tOR6FGpG}+ia0olbh8jwh`!Q)Nx-boj$*a7 zM>37qVUZ90Q{b(xo8Lmq0lD6x7GX>O)jitcM<&eSKg}*nqu6{&)MX|4HqnE;F?un%tno+CBE4GH5c?*+bF2XV-`Xu3YLqj73XHR zOaznVZmwt)G*e95@_MMY-#g}k2`#@EZ0xyG6^_AZ?hX};vv6Jo74^yW0OF#)mFx@z z2ZH3YnV0uC3;#p}*Nj@Aef#^$AX-CtAnYq#xW6bckf?ww1QH_3tFof-Q?HOCwY>Hy zQ-8rsp)if#XSXU7av!I=-o$Yf#y(JxDhvqa?|~vmDk7Ljh4L$KF-@z!{!US+Y$5-p z+h3=a3tjM7Dz#~dXkiA`>=egIBrexCx9gfVI6d}*wKy0t0+Y4$vkmzVDCWX+y~1=o z(Ty=il;mMjoiR0vX*6dV+*$yl<-%2kglhCbKLczlHlgrDUg7uy`k9H|r#9zkVS(^I zdEO`2H05npvJv3fx%5Ww;a9%T(T40~!$;lDf$4-&r)4?s&NmVTCHS{s33SX1i$Dwx zIgLTqhZ^f#?*(uC5^%FIn>RK*?iccVBdG0=GNeFNKsRXE4#HQi2ja_8R4Fp3lcHG9 z7GUp$U#NL5PRdaB$x~$E=tM#RPwDHs|3|Qkm7__Sj;^rAlGaY4=YpKFJ_APBwO$6QcpT zI*r!x11|de1Vt<-Qv1sDKqM;d21)_32c{J8);uiP3KwKbcYE&v`5<#+w2Tdx6#qfK z&AB|lD+{X>$cIYPgQqYWMO*+{1wdYrc!>(sq=?wKJnX@F?;{Tn6}SGe%fkU9cSIhN z%#SLC^H2bn9LOXI0+bwOoP`XF3{+vt4<`BmyRBw=*TLv3@we0J`;%vxyO;W=t{{3M zbSOytjkJG_dO?#O)2_+8No;RshhaZsh!zPDiVVvN##f*;#Ls*nWd&AHAY7e?l^616 zKh`3AGEt{jjZ>dzBec559JdqY0uNM=J-UELwJ?B3j6gTDaFqAFvPzrxN7rCFMRX5X zZQlO2^#Hbr0&allwyZ33k%qcj56hya*=CN=Ppk>gx3Au;HK?L#qpaKWq)=WN zR?~Xh>{}cG+uY6mDH|DB3N9bAA@xTJeYLuyC%8u`CMSw6W1#Fs&$RHCo0X|2F~MWX zuP45Q4?&x-QMnyTC6%d+E%3{L1yvi+??5*7K)GzPTZ23RSB~ccqL~G#+Vp@piI5b5 z8=r_+M&J&d1`6tAlSm?_q5_~H5r7?_gMQYKILvOZ2qZT9{t~o_vnR7}FiR&zjM}DC zvrf{Jz4><@$?L_9=3bG9?s*r0q*!8l?WP*aqVYgxqyuI+Eviz#5*J6_6hX;IhKTo3 zc(=*+XHkJ5ZB+h|<;Xp3w&yUf34-#)ZGU+h^(h=hE zHoyJZFeg3+NFSU6h|PJZb0k&a3eqB}LRikv6?nI&h2T-Y*6_(p?i8KyR#V|ac8W%U z3w8ODx^%i_;ab60(5)xwRCd3%I&ebn-Ze?UBgl-)^`=DCHh?u>Q-`t6+v@H#QsX!f zR!g&uFXqR#;CKXj^{)2Q+%hBMMM3J9&e08;H!wO&Wz(V^Tra-i8@GxC8 zR?YR@vBWU7k&Jj)^e#+M@QaTwka>R!E-nC@rJGPcf(Vt|ZlMa~Abkl?Y*qoKF$qKT z3guxyp3~Z#a=V%yyMyE8=PaPmzqD&wi{{L}Y38S6@fnp>I6~#!y9)gT(Pj@EK<)I~ zY?l@}tPn*Z?Q8ThvGpvc{e$6gnPV4A;O1jWR-=p?$jJ-<5+o&3ASZ($M#u~O#wL-$ zWA}9WZkz?a*r0rI2XSTNrG{da`FJP?3UyM)6*aNakwhYm!V%)^_<^WU7>H9XKe66gI96(Amt$wN;119J96 zMdAaTUCLsO&d)bXGsbL*Fz;6r@Y3eYV&;Rri`beG-iHIm-qUjU=^_YYx3C1G{&8kZ zYz;PhN4LZ4vt7>a48b58WavEW$7m%Op{Wg08xoXkRfHC_SuBy~c%NOdPd85A6&!Q$ z^RAdZ)UF_Y9K$wQ6W?8i4Um6*k~ec}_udt~7EMjg)Gp$@agMG!?hgH0#Pz!{5odR% z=!O%AxdNqt#F@W>oTCT~X`7&ogQ!$-UQ8nw*sHdA-JU)v-VTuXcKsI9dP>_{$aF~+ z{_xGW5yP6)&-i_?S_kd8b$YUEoUTuSwhu+<7yYNe==Z|0c&0lH$=W)r>_h*{mDzUD zLXvPuA8AZryWAArA6@R=h%mk*?|jTIP_VYGwyDw0QHMB={_0C6y2EgUq}OG8gxz$n zi8ddrd;ym+id`SIudpQ&<b#lQV0E#vsR^i zKY>{sF{})2*QxEf4~atkKDx121rm!Oa%JZ1U{&Ow1x9!$2AYr0+DhED*X%TgR!-RG&o< zRbCYPu3v$33WzQcj}henTHcXG>Ltx?Fw~p-UhLuQzrwThxh&aaRd(nMfYJV zd(c|+k9i0s1n(xWws1MAXgL11G=8y%-h7cBwygFqxcX{ftkKoR!^-Cl6&3Q}xXN zF4`L6f&F=qvrG@%!LpD;Adrp;@`yK$)ch2*#Q*@I($hy69s$35R@Y zj#Z^8`ex`SF)6fVS}o`0A&AIAkYKEcDNlz5;i0y}NX_r0O7aS2-bg{B?oU(Gpb06C z9uUtArGSw)?W}yM8C`45e}wigXkdQQ;p9QG#s__%VD$ayKA2eXlD|(cth{DzO)dG9 zW`zi8Ld&DeS2Rn~^%^Pc+q=JmzJ}S}c2zZ(lYLcDP--r88~nwfxXEP~XK8_|Q`Dq| ziATI0C}7EC1ZKRnNKt=~r64Jt;%FXX-E&=@Ls^lVHRb0INpb3&GOhfStGki9gcDls zME%o)Q|2|n`r7FLh<1|8UL@xeON^-XVu_@=V1-#nt7~+6M6WtImCdR{7 z_#x9_KHyN1Jzi!D!#E=SEx*~}h4@}zx4vS_XWk@YYF7_-*9s(mIF=zIm7|BNn|grB zlw}P3-=tcdBfI4ujWamO*!_HhpLff7X+$ktOR+I?i2!sTIjq@ti;g(x29c<7G!yM- z@0}@qJ6`&XrfirtJiH#?JM2~zFvo!cTTK$BL}GwFA|h}(g4-aXpar6|lL|h1JA=U6 zO2bc2x-yo|F&`(bet7_#1Iv2-_6le1RfDKqFVO4*0V9+|tkj@^(Y-hxKP@ll`t6=w zVO_{;v#!np=HD-L52OF20p~i80PO?C`h|0)z%)r^7Qp2K6uBOk`0IU!nr#$}J>=I-Bb&?2D`feR{g zx8Tt)^;LCunLZ+P{!8~PF738Ds-Cp#SK!jl?h<7L5CP5#IjTsJ;RSXu695Q@DhYFB z$wc6>khYBs;TzT_07=NQeaZ zXDX(ZF>9i?6Vw~1&V!&0tMljsGbGY0GU|Q#w1nzBV1g^>8kVccWZ z@YE5*>1;G1G3)LAptgDc*wveK~feKIGa&nV7Ht0kpGlb2Kb1U8P&5WHvC6Y(}-;w2B>Sm(N>XERAwB zgW-KJQX(kw%txPK5fEoWf*d_cgDMw&b^IVHgbHJpCTL}HuDLkRfZugXu6aE5h)sEs zI9~V)RoF?Qj~`%5LRBK#aEAh&T>_Qn(-?Ix#fI9ZojiOrH26cJq!Cc*s|nP|%{97i z5neWpfR<#B)S70b_Sh4^lFA)=XD<*d(M1PKbZwd5&-vCjTV`DZg!Xx6Yd-?6iTKq=XUKd&=7~ziaqQ*=DBVymU7=n&Q;;xt zN#L-T0k5RBooMykQNZe?R5iZOK&T>(w)&{~*CuPLo3(9jaGY6fNerwKqTf>YeYqBO ztzn!;uu{L#jgaZV&o%Gx7A0l-=Nm5U^=^)hVFVE|Uv?>@ae(rJ#$ZXHLb!~9S_QDV zrl}+bCiU=I%wDDp)QmG$J7Ol5{NmAhV8haH)L#kniqwn4D9DklO~4QzRV9F{^k2wD z#XgwhOMH2TsRc9_Z?1_&_fmXHHU1QZ_>9hyouG~jW>=j~GdnI+vMeP+#%N3yz;lWM zm^KlBYt%=J28F6A-=dI_;1R;c=5?1rY}M7_z3lW_EqxZ?*qa_KjtDMU%! zR>`hU?lDr4Yq6_qH0tS^Pb%y>>X&0FD7$$;o*s#5ndQACc5?u!6ef^{t~i$t=cr%m9V5z)tKFFoK?8u3Vc=~CdB zEU)1i3}Sw502lGW^$oIcBm#19<$bCtzEf|u;54F#U5kR6Cw5*{TU*^-P=4{xhwm@I zVY)&#hyXQ!_E9nv8ok-*R)O7X0n=^Yz?nkb{PwT&&8}Fp%&e6U!#3XsJ;T%(XkeJ1 zG5l2~RhC=Mo(J=xV9n2CaOAsQSwX>tz>sHX{pV(2SqB+vfJT!eMHALo^5KQ&a<|yb z$z$D}cI!20*9!1@B$@>wkkvTrL?j3U2{ zCZ&U;YI}{yk+$?r-3yrQO3`RxI(A~GIFB^)7Ey{Ter!DWREMi-Zb(yGKE2hpdfZ&^ zg)Co|Bniq5QwU^;-ae5J{8XL_xM^`vRUs;3f`A=4|37=@vgF8dr0Z9KadA{KE2#5+ z>0@@x$QD^LnW$+KIb0nxSYE3kRTWAF6fd)y*O=|BhO<6z&~GwNaz2kt0Ga8LK%yGO zzK{!2R3Wbso*o|VzyH5AF55kH$6pJgmr14|jW9MJh*C~ahN=ZB26={Ly;j|| z8QZsfHpfFU$08~E!8t;e-0Fo==XGZF%~DAhG=ucyU{D#KOwG^~tkZSnIyUrM>%YvS zYLn8ZBw<{{C0?tJ6X?U-G9w#bqi+|XdE=n29EMZr!>*yJIOefDTc@-$f_QFJ2;crZ zfaLE|thM?I(dV^5}YRDFoW+Fc0w?F^$ad~e-IwJ~@wXu9BJ#UTW z$x*bVhB6?-fF6_?TS@pp7WefJ*}~#|5&PF3{!9+gHU|)JD?HcWG1QHW z{$p?Chley{uKxOj0UchWK$`_gjba`~0nmnwE>3aVkRSd zaoY^_3>pS!R@|xua(}|UF{BdxHFLN!fZaEuJQ$BN{99*GhCjh&AUfy{wQThy*`hpThbT`^7$R#hi*wfc31{?c5=Y1Iy!EnW(6e!;sBJv zMQ#&tLlw=lI7Om2F0(ZA3R;OL(SOVA5M!=|0(`TeBd{rjO<4OZ(iYCsv@MQw`v9sb zmqljun5@j-uis>3Q;Vu$2>-t?fYD`0B<@by5*^#hhr8ErI~&{AKayn4b7fc9VvHL5qC-@(r>EId)?l!k?6HkYkp*9tmP+})6LWNY^vAH{ zsq8+caNQi>2VvV``C2+$6K0U~XXB}=MVbhj3?8s77YYXwC4dtBbncJ_j;k<3I%Cq} zeBiSgBlB1IGs{PF)((YV@keuCuZp2-_YUD9JI45u4}7ToVEtNlWKQhjR%-!Mn(}Uj zEc9s~QlF5-JiEn~RpBcxf}ki-fv(boKr?ensm;L>F$d>3^Mk;io_p__3f=-=bzMbb5PGh?lVnr6nLm`YShW*Y*=NX)IW5V`?wjZ*BHcWIUDx#Q(J+ z88hHQfR$BmaBZfhv;gEx%i>8sX!lmbpQ&;cl>=P?lC-4f9MhEQ!&v~ke;%YMdVG`0 z3wwg2?UQzWi^b@s2u4zBe|FnD7XqpM)P~OFoT^5yrSo=bZfzHO9B~Hmb`|K+6!3#z1TyPZ&Da{x`o~Y z|2f)Z{ee2i*M>5vlS=;b?pi1Y1R=NkN!2!<+lcF>h#@rowO+1=4bh%&B{DqSh{p(% zEbljB#3NUb@NM`G+K7FX4zE3klsqq{+r?!IUx+ds1zeGqq)W9--6);JB5QT7eFYWw zc-A@4s80XWh~3WZZXY0V9c(Tbys`EDrF>qyb^T(WGnAB}B%>#_nF1 zi@kYpWtsYnGRey#NWm7z1=!*go6IXOo5yA9`9%o9Tt2B+Ukq~s3a5>lu0G3J0bi-mE(v=w^N;zqM8{tKX=_p|uF~&s&3KB@!v3+Bx zo2mV?dYh+F3GNrj_q7Uckd$29E`L;sY2Sb#+MTOfw-0PCjD6K2k!irG%|y+Zr?>xo zL#=fJUe0}& zePobD(L6;WZtkUszSlvFdcsKG(Ew1nlHNyfwG+feQwzyma?Xh6*|?6h^O3ino3}nv zaQM@fKkW6EHta^JB}ntxPm3ZSHWoAk)<+e=76gmvMwq2c%wgc{v&TEZN$Qa2o9(eu zD&v{WBM)uim6^llZPaR&+8E~Ar&X>+AH2gkk3aSBDgeOQEQ0V&GY8;Dow)N9F`9W* z#iCeRI`vo^=SUXyHw)dX)7kjFhn<_xV~SB4)#!3Ka~JIciE%`t194*~N6D{DYu8Cf z6j{bS4rTZI3H@>Cz?iSJOBItQV=Kyp?(b(o@1I7$cE}}3LA2>gAL7pF|F(J>FEH`n z*0oFf}Etr>% zpO$q-LbaZhQT@E6O?+7&jjt1nM({a%=*~nqFtn;Yz@$Qj3|0aOSw&GElZ5yt8zOBPM-^H#2gJ?Gy5Fezk^=VQgXm*?5Bksi zRxR(O#b#(9C%xqG>_(o=n)BsZP zao{-_D!{sD!CH$#42Rv~qGaFLPVupwV`SMmo>R}@)3ogMCLi`FTl!)4^cOGd>XE*~ z?4cB}rJ1E!`9J^uAG>Li`c!fAhuK3Vot^TLvT?OY!Mti_xcd>Sm@gXP{ga1Qma7j= z{3P)M_{jXKCV16Pb+zp%H;#|ti^ps zR~Ct)KHQGMRPObQxBtmJP!+wo6?)Tk(~?CN)o_3SvfFx!+0E7nn?-+?KhmS67V6Fd zu&2FQ&n}w#utIdtzO?ZKUqv?Mbm`6BzLmC8bw-_ru3ILeQRGs-7q)9eV`y&<-MXkz zDk}U*Y-m>y=QWwWqd~xPo9gG+4@tHM3n>^2F&@_ttz(!9$(&3h=&m4Zn}{Qt>H)>7B1?siJSf; zDje8P!SE;8x11!4B*qK~Im0rXLv_Xf^}Qg-vJ~XdB(6q){w;R~$m6@k7N%F-9jBTG zrl^QQ-V>96QM-s~FT=)ET3*eqp*Vehe> z?NRd{T20qNrdzXlWNpa4%5GC$t=$YhL@%Z33>+uIg)t`W)F=q26>z?4k|FOAEUj?o zD*t=6F?>!pi)?&(P-x`mcGgp(g~2fs1A&|0b>8zxetd9;K`t`8Uaa`1AEbg2*G{zS z^n3SLEA%5l#Cq1uifQTWKCQ(qw$Gq#NK-9gFOC~#38Y`H$Pm+Mh6pS%WgL;^q_*Z% z)7ogw&Zn`K)r9KphY(oWO=Z`W$VYFcWmV(lDPXiQ>LDk0Rx`SOGpw#a$uK#NH1^C) z42_`Ov&^=?t1SS_D)u8cx7d%$FdiDM;rU@42QJx4K%gMb;pj#6t-@)QRTk6Mm>0lA zr!2gGA)Yqg>3j46s>NiL)n|v)J_a-kWS^MA8>grI#?rO-1+BXmgpREpeB8lamw9@!D873jUbkzsaFn5%rqVGtA|R~r@rwm`ixr1dm(oG?!7NsGij(4Y%rV_MuL zvp`G3WFK%V*%+^YHOHdX^>l5mIej) zqHWzNOR&{d1UW~LgzU2fF~%|gNobUl zSlqBw(typkvJHimi(r7ov77N#;ed?(?pJ%PDK}IGp{>Ua*mZ|geEb5`eNcrWx>CAx z&v&5z$?MqDEt=hx^ihQaz~`tDn}x z3Q7-1F$EUdyVZ*oiTAfZZ=X|8TiZDkX!>r>KtZtP&mLBHrK+#=cEak_hsM=0iryWD z$6aX*h3suQ8qHGXWk8@v`G#=ui@nF2feK>9WsKeKsmFz z1Pz~OY;Z{_lUb(o0ogbx1DrGnbLw^Ow8`JEb^p@ja?%t92EuGsSGu~Rj4gKQl151;t**+r#FbZl2Y%S2esxe^4Kb=FKuwJHaKpmG5Q& zB=8wbMx?bf1P5Xtd=ErgzUva5P6PCOyZ>-$P%AUIvA)^M{hBztXmDukZ&C5Sw#mtMCK(;%zR%F?%YhBa(?*r%y|f2cXFypRm!iCq zgwVR7Sno+M3!!3ERA`CSS>R8?o9*1tTC(69Lm1gX!W^^E#G=A-25^Nnq5TvT&rbN@ zi*H{at1mape-8w7V83(%aEGvCu$3xSixvO$OMJ~(w)VSwSzAl*9;x(l4^ETmLrw}I z=h^G;UR|i6-}iz0+kq8pnQt|&57sVp6uzBEOwl{oq_ zZ&(jEP0EPc0>i9#mYpGax#1ijXL7=pmv4(-xh4w%4d~C3oG^O1r%cQ2;#+x|_R>FE z@q)ge6sbcue@Uto2SHSn7BvK*BwHW`b$tr`!>6~scZXxF>1R0R65PqDuMcKljV}lRsfM^o78^G`{YEoi2%;YJAgkAYYVvl#-J6Y z-);8tdX>}640LgaIThZ=2U_3Y`qHFd1{9?>qx;?k{bth+)>wZgdUz`RDBVF(pR%X- zxiMM2w1fdKLR<$70d>DT$=R8}8cabD_7YOe0g6ExdL7n=_V-K&~*Cc1x1R;n{G48oBQbsD|vAq4JjWr5P;u4#HO zE~0I(5uT9MJ(M8T<$WgW&g@pSNs%O6+d38c4?mniH$cRQ@(nm}k&Mg{+vcBmUeF9% z(~Y0zvFgM*5|`XoLf$O+{-bpr*U{Bo07WvwCs+NC5K*5Hn7jieb!V_!5^FOKM@;50 zBQ;Y!lv`uilmYD-sk?WcT{QB2A_WKyx5TIt1hYG7B9=p~G2QD}BI`|chc>7Qdx?If zIbvtH>sfp5vTZT1hMRd1lGz7s?6=C8NM?-Ei z0!TWx68&bmrk{3bxzksj2!gUI2|duJQXB?wN{|qgb4p`D8TmovPitx3C$c;1&)w>- zOGmz5z(0*>YCS>h<=H_3CwQ0VS(g}`UKDr4lq7U8+CwFJ1+73vgR4OBK{=G;;XZ^#&{i68cX1ypL*2A`XM)^=sB=`xK zAZ6k%XV-KPf;KedRi?S)g6agF9n!cd>zb@~B3f%YlG++Yk=?H!(d+vM9iP>MoO;cc zN`z67_`@96WRfLYe0xuC+Y4lgQF*>Qv^?rzQ)`_+LQ&rz2es(w)0{jaffNzH3tH~$-DxgDWKJ+ zREj(dZVvK<8o|-XiC`hBCV{NKG@a3+yDOyw`kOYDNCkgP`?l?hvsGqSX~EqqO5S_- ziqEtw+ulsTPEFhS?`MX+b9ZBjut$7_{Yi(DktII$ZrFV~*e&EcLGL1aM3nSm3l*Mn`@@OP) zc45ai${%uK<%~)G)Ydw zO*FerGLj-82S6y(OP}ndDa~)FVY4o2{dv=QvZKN}L5n}m+7d7$yZrv` z&vFrKQMh@4o5u&_A8iF*hM*mT4r@~@H{M-gZz~a}ToW$#i?T8pDoPx?s7^g3Dwnmt zkxo76qx&76)5mLg?d~uQfAcFh5!Io&mn0Eg7RY%>j$OcD1>zo{V3J8qz+chFZjw(D z3D9mzf%8}x5Ltxg&qj{@53)_e1g-iwg60AWhS_ zuzcNB7o6F71m$J=PdFU*7F)*-1O!VnN7B2~zos(YNKq5EdJ8|5&nU_6B9 zkR61N3?(vzXv2j?7$vzIl%A^J7`WHMs2gBGWXBeFbl^AOyu~igIA&F$FM2?M{7ZW> z*9|Xct{Vw~Y{Me^q~vc8zPEF4=O)>0d60Y32$KO$!;{qncgIOBL28(WM}Rh>M+3G} z)I>?-duS-9L0Zhoy7}<4`-wN!rfJTrGpBvv5<0aS#D?&(Az2>?H-%Pd;?}74y2OTD*&J)}6~jMu1V`je z)QX_dA-MevJa##+9G`v`hyTi>Dn`+P4QZJBDZTu5Gd#q&&f_nBHAXo4%^#po*OACU z3sli=85OxFMK7j^@>eBIC;^a+G)fRUN&soNlSmg@@6i*Go-}cqz>D?!SMEJv#T-(^ zgr|u!nCTet9!ekPpaa<-eaL|*6xZH?n3vHGM4chMDRG_C0as>(lqF@EECNN4mQh^7 zrJYq{pyxvlgvZB$Oi>&B6b$jK9>i{i%7ciQ1}x~#+{d3YQcb}JTgiswU%hNajvb@QG_O>k(9KM@skKc4@Xt3CO z&xF1M$?d+`#DYa6PH5`%W)vlF)YPxV&;}V)IyfMA9cEA&MCD&znTt)`rC9GjG@w^+ z|5K*9qndai8ewLK=aZOipI@1#<>(Ar3RK!Dh*yBsg9@Z8eBbH3f;h3r6345isna$C zu^0r!PqfX3rR6u&#$8QR&)rptJ{ma%S_o*Rbx?awr{4IPAWk-~o@$89fSoK`bG7MR%pL_oX06%S$Y zK<)EdJdip%_2`wM(Q{sSv_`NP!sQ+%{L4);QtNS2)sqk$UkNEz!=1K892^U{J|uMG z=eN}(e!pRNFR8+=9t@3>>}=ou{Ljbbz4@(eS-ho@Wn}>M!y0lk42IHdI}A@O8C$M2 z9MB;1{5s5p!V>BQ*rd_hbv$XC@Iv|(%dm1Mszn%`FH+@(BjMP*HN-7_T%l>2J`>~J z3An8+f$J;V6AhTIOfpC%i|IO0SIuhBnh_5UKX*Y^t^L zRBAcVMiv#IziL02!*)S#z9KM5ed)@~!Weu&nw0hFAOOLs0X;c++ZF3zB{n-T?Wexr z7ASe#J=R$P4@iR)*&VjSqU<~qSP~cdmr<0(lxsq^-zjQFsW9?+ zP`M^C#r+_eBp0{Gg%?6o5d1up2cO^tQVk9|SMudKzV0?FqBj;@ z$W3$5;7jj+;@0|W?-Oq>a_}Q>?=z*e8?h}7V}LpI;ltnWBMq6O*)w-b$OqFfb^Lfz zEZT#AHIx37tcK3zo-3>IflYevBc3|xlRo1p5Y+`}lWB}&cY&()JS*8}#Cd_{HJS{g zV#57Q!+u5)-^*?#TJI;SyhS)6Qw6?U!9UWG`t5fDu=~F+UcLRfyi!HD6N1G}WQU4q$A`CT4Gw z=55brk0bcziS}p?d-nM8moY(ems73JoHWgrLWEh9ysYMa=wb`rlvHF z@@f*Cqyrf2h8yJUkRscra2C#Dnv;sPk3!DoL&PHQzWCib01T#9n0{fRjV#7XALob>_r^1ES>0RDffY3(ol(PbjL0PH6fn-^vT^Fa`RHdf?bT-&C(pUO{DFCQ%EBoStH)zVnlghjj@$vQswt{Z ze#JgAc$XNx|MthjpQ*!e@*>@{ph@Yzm7)gI9uE-Ihbv%Z6QO$q;~`JjVL_#rGIw6A zx$KVzZLO|*bsX89(4l*kc=9hzDouXoau(3Y+sGuG3B&TsF53st2Tw4R26){L@7w>< z<9pQ|(4kN0jOXwtG9XzP71_L`dLT4=5bims4U}|-`B~ulVKxbZGPd;WEn;%QF3}hP zUcgj|f3t)}aPhul35POgi7M)5eDjcU=W{ppiP520B+8)quM9la*|_jQ8%|kLCd~Kb zcsPp0zkJi_pAh*Y#stGt!v=n^QTLtsJ7 z>#)1>$yw3oD5?awDle)NnjRp<{?j(U~tr_a4H%u>>}OUhw=Q1c1>8GFSS zK*y%`F?+ld1XyR$IHqf4SyS2p;t7o`S^FY`wWO60Djx6GLqL6C=_PHixxb`Z1e#gt z+|;rpbZ@HG1y5@|jtWYL77<-TfByB^#&Znhqw?lW5v+;F2 z>$&HI5PVoBwR6HpATUqRHfs39Meobl<(vCrNEHb~ypDGEX0jLZDprr+r3!k9T7i|n z1Cgc!;rE#EKK*MbMh}_t=!$dY+oI>Lft#n-R_>`#n8pvl-%z{d==YHLsYv|zg)@iE z!%GOP!!RFd3$Ze;Y;E}18fiD%%bDk%bHjfw+Z`^OT_@MGNKNUzv!`x)T(NiW->L+Q~z{& z=usm%5HP>H+p2M1!!^~jFx+alCBCVfE2@6H&+gbD9`sNLQqco{Y97qukraP!;Zzj7 zMB`f!?m--fv^{&Ik4kJ4fo3NwCh150d9|FP7v*aPkcOpCCz)Smu;0_F_gL6L>sPDW z+h#jm^o~3cS@IN~kD-TZUS1-mhZW)!w6*G$%N#Si*dCzv==vJN=}WW1j}yoaa+C&` zD&z&StnlVO+N2d+!!U(Taw1=3B4c}Mi@K(MeYS}^VIi5a?P`u0CRrXHWgnBHWzxrT zxO~6+1)@Dy?WvnF6cz)EIzv0aO$5C)V)bEjBeh-LJ(K!$Ek`?0x5Z&V`LHhebSntK zQlCYo0=@_q$V+Q)Qs%c~4|lZ89u*1nb>+Z&PuiZRP5)DSliOA4ur179R(EFjT0S2m zNYe(SH_PhoGGE=4kHsY#Yna64EZb~E@<#cAj`3J$KR$_yk+?t)}- z&@zY8iAsR$q5?Rnv-Hm<4P(Fx*B_PS$OmBtGJ29^BSgJMXZWEN=(~g@e7VjmI?|B~ zN8nL?+THVpKf?U%R=IrMK?qEzLJ%~qDe)u)b)6z}_}@w2cr!vr_X)qYFMF_qU_scd zhT_wWyF20o%=k(Rd{z`BqtWsd`WgghYSd=#VX|N`s8L(A>>JvfDznhvIqigHM7ABGrcEHse}9`bya3q1wbtKs|=~B0u&i)zc9ZhA>dv3=ph-YpgNkwJpY&F z-L>f5uNQ5u>`4MfE^xLp;t)>=`(Lfr(vmL;bo6WA{B*q(c$WBITrYG$_ZZab?ykDA z+S2arXWO?wlHdH(_6D!F*JX#N>-ukgTKw>X%CY;VPTd;&x7ClErO+y_v%BRcW0^3l zioJ?}GF$z;$R)O?UrM-nmtDX8uLm7V>Ju&NPy5t5S>UVmB_$lB3d?w2r!K`?7na5} zqSh0qQB@=83Ejmc^ZPnm%gh}MN7~eSpJ2-gpJsDMYadf8HbOP_c+7s>K%+)nXN2bFySM+nG2~@0 z+HcgJb=rNvqW&WyLhIRG22+xe0+Um;Hdph3;yL7Jt8KLx760`n%SB?$u&7xIep^!- znRru^vyP4D(A`)-^B%&9CwV^h5ReYdOMp9Y)Br>*)ok20pXobD2YrHH@RL7H@DpxX zC;r^0-)7FnyqsrMNaX0zyjM^Zf+T2Y(Fa)DTT5t|_nzoU?TdFKZfH0E55tQN0`{*T zZ#K(0e}qkB*~Wzy$k=<$E(|{m)ra0~)xEA* z^oLj7kAaTi^wC)s7gbQ;lyVPtUAH8?ks~%$A#(=YD+I22BIfT^MV*jvsljxO6-P97 zQ}fA$BL(btt{c)Aykvh>H2cZ>)dL`3cK^+8bJUcloD7`goM{5-M(jur}-pgp9f`8A|IaFIa-IqIB>qZBM8rbprheg#l z7-4|lb(-5_s@~7L?ZbPs!gs8aootQY$d|DDz%GQf^>(#^LKrlJXtm#GH@6f|n$5`N z41jxfIlFnhll+zJy$n71<*YE96`{W_`I+h(=UA#Oow;s`HOw`z{Gn-GY3<;evU}i> z=OfmsJ)kR)%s(of-ntZ zR0}H@ZAChcPG-{2$hX9a=hNi~`g@AC{Iy!$KhU{+JpcSlB8SfBdnZnjHPd6lKWnU@ z0@KEazJ$D!;WVl6i%oSWY5@6av!PmHiqC&gAN_Zb@*9ou*!o4PbP4Nu7KVL1r1Jd5ZKvO5E7UHVsRD2EamnviIk zeIOMnS&Cqu1T}eIR(U{T3GH`|Q|35vU?h_oHor#I;rKTTWMAkyy2{kih|<8Zp!jU9 zH{W7`21082Smm9Le4@M42Q#p~UF0Mf_H-1Q`h71R8|1N1*9ufs!Ccum6W=bO#*MfvFRV$z)Bcxj+ z4)as~HqV;gJO*e9ZWP`l)YTv0m&+(IKov>f89tw#%WdCQ^K+@;$VR9P`p(!5Eu9$pazlsNr&=&~R52Bo9{!Cs_?A#X zd0@-Y1wD&U%oTIAElFKPDgRZ#K?|BE&U|WvPB96~|26FqZ$KL!-#;~K#se(HSP#$y z;MlVi!ru+ovl|StgLeVke6vN@kmTPEFgqw92g_Q_I2uDEwcUEiBk2*A|k(tZuyzogu z&WkjF&5<@kGBHlh+7wrHxCuhY(5ACBle`*g4)C)=kK)CZ?`Cyo=^7%1 zQxZeaO|n@p{`l&fufJYwXY*lsjgrCfB&>xR|2=)~XDJCk;nN>Y#ZWS?S7=~lsH}r6 zeN-^KZok-p;&C%214C-&hsO0a-bRZ4x?tSCi#Y<{q2`jUpL#^S3Df~g?G~Y7zz-6i zp5!XdsH3Is+^4~Wb^=&%qmj&jeG}3==fb#Kyz)_ir#KN|qD=XhQNSJe6No6Or+oxI zrME%pcs1op`Y$}3qac(w9{v@LpNgznHuu2`%>$`+CLz&c0Y)e+XaaRL^3qo0{1@YR zC??~G*lkqHYm)AZ{9z>v&!R1-9Y^G9KjEIU==qC5z?Hj*rXeWJpOX`8!X}?6;c}q~d-adx~PNK57_@4HKwZ~BhlXj${NVfv?2LOVh1)Apx z+vd>mQC8F*`i`?8CE~S?=6%ctAxbOjWaV)gd5CNOT2BnLwf& z(k1EURT;S>Nwj{bWC)9fOTcNIolVZvLS9W{(I`n<`zFq@SNI@odQ%U(dl+dAKYjcEn6jo!G&*Hhv06kMJCl1T^U?=9 z^sq_=0>~%ijzKQ)x&SJ_I?GEh^GlCTuPOP24p(*cC@n3L<)-bjMI4-p|I~r!C%_vs z-S#vzl)~cT;c@*&K|^i&sLc!*(_sp>olb`dN*V;}?lwBW)0>sj1uW*~Qr9Y^u7wq) zG^e9cm3VF%R%M(_T9U5>AF{yfl68AH=92=gl;*vs+{Jhw*8yJO_wRs;o5l6b_rMIV zZwA)H*y>#@lLIVs+tsGlIhniNuewV*P-$vOV04~98Ja!=eOt0*IV{wbP|?P5JkKM< zaQP>uwps>$ISGB%*clmRs2bJ;eKZUowKkhh!id`KlW@P&ScE@rOQZW-S(-a$X}#&0 zU9kTEo-f;;da1KzW=08xJ2PrALDjRcH$n4g+62`ZXe6a>Tqkp&PXHjHKSn*ur`gqw zplfkqO{3lWH2Jat(B`ax*cacv{?v977AWR!yxWe6zh@8S z^3GD)*;ajC?9bS?|5P_K@VRvNUcd#GEvXDjEAZt#y9aV@+sw^aACSO1ciVS)icu3N zJTx7J-ph4ScMj^32Hp}T2siP{#Hqv5_a?RCylx)sIMGO?p$1J=@J#T_53-r;gV+t= z;D%6!W-{Kvgtyb)vJsIBG;LTzM>A#}S8k*c4@N>l9#-#K_0buHYrvrFQxnqHK?3Du z3QlHTR&k!i*mIan+P40U{tvj-CLqS2oA3XGwBN_LQB{H8cI1b%Jvt+)UzYGAIrF*_ zmlXu4EerLnQ$${zK>!iOlgP`ynL>;dX|4zVoTZ6raoH!6k;os1U9t?Y?mLi~X-N*r z+~_8E>(pIET13S761l1JCHi0y&Vz2I0{nu!Ku~6I)|T*~WtmfyPKd%vh7TMLvX3Gr*RIe2m33%RVsdw7Z~9=WR`j_zXexW)F@ns2c(;VQos>rG!|LIoTr(xsf1ODl>}ZkI z*PhIKPh{Pn-h_`A@1QfK97mp}v*(v1hhY2m-Fwm^N6J<2M9;n6tTziv zCPQ90&$nzeJM`RyGqyRhJ-&*a?5AG&HgDIm#qS?e*&$Cl{99ho(d^N1=SfFLU?^X) zKDgwApop>p9nldGpcS(5@!R86D?6dHMorA1!vp(y2mJt8A(Px8W02OYS^)qK2oqmjv_>s{Oz@C5*7yL+s&0*Z#Fmbg2tMeWV@5Iw4m zv`%l@BJ@1~x3@p9i={Yc8KM(;*uUI5lG(1N!y@? zD{_(DJl-zvmXEhbGe)s4SdVp6(o?u;nDF%Mo8KOP>`m|q8zB`%KV9*m8G+eu1cr8S zTNyz9O6{c?2^C@Nt?y;iiLt+~whiGc9sm8xjaAKd|18=@^wCaQ zCIJ02IK3*&V#xld5&ge56x1fOSZw>sj@n5>tI}`b8_RG1qWrR>eSGa}_S)wxJtDOJ zFbb+i#6(snckJDpq3zvL`C{91snFC7b-DIldims8wXx`55v7Ma^ZTZ z+S(Bo*$g`7o(ta%<4!837BVy@KoF&&Gb?UY>7i?5+b2?sT*vO)yX-IBa2A`356`26 zIAf~}vJ)wL(Wt3KPAHwbwO?0dQ1jo@;#*}|JMrmvv#fMPspL1y+Wa47Sr>b8Y0T-k zHMN$qKkI;{M0K9foy68Bha(sOTO~@;1u!-$T|PbN`7}%U8!F7lw?V$%295E@ebScj z!yC0NVrX=#pFtgd+SY--a(%VMA=t&6$(?lkqK{7KeP!=UaxG6~Ey4s?f8s(VwbmP z#v9P2iW-wC2o4G}0)|Q0`xn3Udm=t|XNaVm7!=6x=@cU`9M)gEeh=Qd!&M+_vSdw&% zElHu^b_`Lep|@iXT7f+fx&p!kd&jFpfr%)>YDa11!hZ$>V-J&z*wev{2ed+nmpO)L zO*9045>P1vSC&*1~jP0*YhvUD(FW-c7P;gc+UH)lE1B@Al07 z1_%5N4r&V4Fu;WwD?oE@|4yKP+CzU^hsXpeXQAa`S9}mxTNjvkI!V_*O>X}y7X~-B z+i`mJX~~XT`Is9#DBpSIEvo1uNQ%k7q_r4gIbw7V(_C~97~#Spu)w3?@WYtc#@IaO z=XcC^aNfnj4xT>+-@*CS<5mxPp=qukL1FWq3A^e%d%Q8*e$&330%5H8qTAi^%r-Q}C|O`}<|i-!xHwjV(d zrb&cStq$)uuw7mMA-4WoUDlpdp2xgA2|>u* zYlmRRhunFmjd%dlpdDz#BynpH^+JpWu&n_%USVf|o_5e|5}VEtV>qfE z#(h-M!*RQcI-W2J9biws2xdEBjmo}=lhxQvbaHl!IztxWd<#LVgM*nJj!jS(jt2qF z4MT*g(}}S8$Jln-5z3k1vI8vIFRBECo*>;SKtZwzVp%oPnj(^dmv>Ri$_Kj$F+^&z zz_7Pg?>^s;>aBtq0AU4u&X&3XIYjhLb(F~tQc1WrLCAfnP-CEz7WG| z+_5E9wbd-FPhm-1*=NI6vpepOP{ayA=+y}am~ij+M#x^{79_b_aTs_}GfCZ@E6Bo6 z);Hz`=PD5oc3^zmM>b7-(m|V9Tw3{4#=}!$m}07FXzc zZ%$JC=5-(bG+HoKy}&{ms9F5^G^cHB1Q&s|!BYByq|eX~TYQG)*3fYuwITC-5w(U0 zY5+R~TqGP{w3DFSMp>USK=Ci2-KvNqHmz zU;~^y7sCjlXKy&_69-!xIuSl^>6IWQaIWbg&v_ulN@qdcqd9Kr!owIF-z~D05TdU`XTD#14tvz*e0sdMwii3f}kSkmo{zXosGL?Hc^zc-#%! zkRc;U9EwU}BwJ1J?u_E7pAdXGsKm#v>>az;1>T=)FfY1kz8YtjK&OJ^YslxxhT#4} z&i2?mw+wB1h~sSyY5G8JNAs`89ZXoYn(;*o2@<7CV1WmL;6*d&w+Fqb8{qu#*_HsU z|Cma-=n8M$`o5nwsWVO(wp;pfsZ?UYy@aC`9$SRG@*S%CEGpty(YJrQPA-;{ zbZ+{>?!9Z9bL064xmaV1{O3erL))w%h{CoH$~Q6gi7xV{Uf65zV{CI9SI+ z=ovuoyahg7ALso5wjk8!sge`U=V17eeN$K5j6I7dSC4P2Dq+3zKV<2(u3%;}=a(j| z>8Pb2Kr3`k@TP3E>6a_76eOC0u&LNDb5{`kpAuJ~ZPs)TNp!b;Q0BjukMyD6+DoGF zVl)8C1N`Oli(0n)>M6k*nR z7{);QHxr!0gy-%goiQK|(0?vce%6`l-iNWEJ@n8Dpr8wYe~nU=MnNoess@PL!qaS# zV%bc0Fdk-0CzV|zc7)v=$*5q6%;a>Y#~1Tsxe9=ClAcNS{U{`S_ePK9>?g7TpIHs# z-M6qKPx}q?U&*L!XAr#q^vjk_fn7ZW@^%m*7H_v9^^Eb4;YE}H?F2F5FiF6_8}_|w zg~AoWt5N0weX~$!6~JqM-Y`S`@}07Rc#ZH*xc~Q}wAuK?D*3zu>d7ol$1Bpbi{?x7 zXlh#+S@B(TG%WF7=(Jl*Yy6nicjPBG-vho(()L?(M`|++_wHV~Bk`;I<95{VV2~{8 zk%mu4Fz{j9Ic~Cybh2;n8MK5kdyk6Zs4Hf!)E81x_iVRbGIo-=_{`NHUAh zZXZJB8D7D{8w+q!_z+$?$wE-q#AeOy@0DGIt%hv`@R0mYEL#nqGz;++y&-0myEL`j z2M+`AJ6(vhC2>0jOI8qiO)dXG3KxW@8!t%BiUR3La=rGD_H6T!Gg(x;|1wp5gUsUX z&DD98k{FsUHut2Q^%DmMOn7tkF}Y@Kd=OVyk1Y}kBarBSfRz;Q;yYkRtuI2qN7Gs4 z3*CN;zy=h3(`CDewN$%*=8rAjYp0tsuM4|_Znq219ECJ4K%5Q{AVe44!4PT(y;ctu zEb}u~xEToN;hI>a=ss@VjcXU<30|E%ZWZ9bf+udhKs}9oW&erD8eMZajm%NGj@>7> z=5D;0i?=`r=(6z~m_JMhOIq0i^-*+qNFGmIU-;H8ZMhirYTxu!E+4x79?1)#`Mcej{?iGf3atO)WX#vkA3w`t7|V-GyraR!S@QX0!~k zZBNYFCu~tRqb-7T5;WXMCMZgLpjMQPP}Y<+(X4oVj=nod~rys{w}YdnyI zqh$Eg*ht!zcD*vpkFy1&?73|*C^>ot@;Dl>05}9QlF)gZg1^GL!ytFtoozYm==0?B z3ltB;o8o2fb^%4+*3rfNGuPrD1Dz-e0suMpNG_w304XxSIdu_r$MCA4+aHF0jhjmG z5NEbJQcw8=;~@~UnDLNhG?%hJB_5iF*WogqokJJ{lUgjRzARv+B6ktL$34aTn@+RU z1^Ex6<}tjb6JUC32qqw+i@=x-pd<01U}nmSh8J6rcl=E1O+1Efoi`p`7+u)VWoOi- zFpBw_$R_6BbwQQh?BLKCH^-<1c86#W^s(9@_!I#U-5$mxuk>8Lvuqs^?W`;1xpUs& zBgPoka1y(`FihSX0^J|yjFzWVeqr9+qzk#C->@4uN9f)l3dCSyCnVwD79{Vx;|QZI z5y*ksUc3o{s}7lJ82{?7-|{x(ahm9-RYCzx*YY##INBI0i87EZ{k-#CuK|3r05b59 zk>~@L;zIUHE^P@#egiz0F#iVV-%$^mj$jIhx*oDktnn6TH=y3u8wcQUOZ>>{-%G3! z)o4d4i2*@=-b1K}Clw%&K8^?7Tf&*lSe$4hFV#0lzyDVZj z#n~?Ij`P4Z5YyM#t=jY#P{=J}|0Kq?qi>XZ;BfkqlunvkP zs+*6ZcGUKoGr>NO8FUYRilK!`sktAOlJAU(8`}BqCR|J*K!|My6KcPFHxCyfJ~aY_ zW}+4xkkQD3TD z$&IGzgu9Uiq++_!3!9mDzYNk5{uVAF^=CST#d*XnS)7M`mdKw5eTX>pMiDTQqo{cS z*lZh0g1zpb*Nc16fCW|7s04%K=)p=BiEzFowog}l67+e1BI%N=jdvf&ja;MG|N=@{fgMu+b4acfZ%dG1zPNFTkolx@Zmh&0Z9S zJ;>SEV`FPQeeM&ijC4|GP(CXsV3FfUEU!Jyg1(bF7Z7$3aq(3*l)%mwzBR;SbL z0d(zm5)pP&a3b%R*_@k6hX*v>0w%Jf;%)b3n=6qr{UGc`P1c>gERb9z2=Scp5W;Re zQK2uCl*zkqY`j?E6vyVUNJpeUB+*5`|2+$XEO)~)n4rVrS_!kHJLFpizZ%n(!5FM* z7fBz*IUsMr{?H5tKm{aGrx}2@u+7>s)`N@CpyJp9t*_6a)#3fx?!Do6`F8EG8S_bMfC~vkaEBo5hqba;&4e;@$dPW;<_QWZQz&9 zzuVm;J6|kUcakQx3&$66cMMwGc4vh1B^-eNFop;oM#y>tuWdtdwqyqAITT);p^OxW z(P^)0V&<=sP1i(=tGqj4yU@wVMQ%*mPMZTkny1u(t~0<45-@OXJV}6l;Ib>YQif&- z$xfWpP!VOPGEV|i|7Ke<--PW}`H3`Op$qYK%G>9m3%|M##YXE&%5JX;dBkIDoKpcs@tqanuHk56^A{`}OUD%y_f^S7E$tSlo z8t09(!`(7ougsVnBn{-Jhq~qFwQ1s!rUw~bCho@KWvo&Zp;|qPLCA*w2hLYWsRL_^ zKLX@)qh!=WF6WiM>E|n`=e{h7p>^gApW&Y7C%~%?1~Hty;kW~ST(G-c zge_17Li8ZU7)pXcC9jlXx7U5h1ZV-#hM+#VPFH@!Cnuyn`*_?jmw8PWCVQn#d}a6M z!~q)?zPg=H?Y^IHd0cX(A2-{>KG>i};H?9UHNM(9OG|5q&tYc+U~}d=R0@# zO>cMxWJ3*%97qtK6S^$N#Sj7oKa@d>y)TDE{{@*KL`N7PHtqtRI&6ht@9(3AJ_ykY z0r2pm-|PoTD{QrgohqXRkK9+x5xJ5h&*1_7vj+X{AnbOBeH0Xl^_&Rz!){1q5MXy} zj{6v2^;^b?gx<-X=}kHw`^a@zhWon(h{e;U71{8PjGp}%MkM|%^>>8o4JGPQX9xNG zO}IukLJEek_q}0GVdMTN62}w#A5$KM0r_&>e9dbPI~rO`3T`Q3|6+K~ONSZ|#r% z{N#CpC*=M;kM~kHgg?BQ+QGOXdZ-s9Pffa9ViG#I2#4Kq8x(zAAT&(h`Ix`hE5UxV ztf1ns-77@%pW3gie2Sb3GpgoaPqX!G6{fTQh1paVkiP|E+&I1c+W4R5$G&g8`qP^? ze=`62^(7w(4;!P~uVbifGc~0RUO`Jx8_!qSdL=6^>~v)#r}#lO?uD;iCA1^y^%Y1@ zixJikNDBEQ398m~9s3!4D=U#G-@2PsoMEd`z83rqB{z?6W$klFEXmz*^U=*Y#_s7Y zri;^y9EfY_WZ~PHKfTHpBXn^@JUmY2K!f|y**I>_e1c6qS5y*DS!O-9n_bF6ukl8t zwdiEGy#jU$=Y7zUPhGZK=LA@__$7luUABS%7k8M|UV{t4{DLh2T8cVIVX57X{8QtG zl##(RgY_raX7VB*$$43xEnp?+w;B8ZZ)*8vwz?q%D|%TQpC~X8XTK|=`pez2=zEy2 z>jUVHPcvf|1aXe9VXt8etrjvGn=DEB7Brn`r57#Y%l-h9wta9VU-DKD4&30p(dp42 zefBc`H}{`_uZj)yTfe^%=Hp%KgP-==!@_~4nD)>gxn#L<%>>YcNZT%+flC z2C>BmY!Pia1D5(eSU3%hwS^bLlj{KQsmIsd>$m(s*>U>*nxenCjoZ!}*EBeD-iWdp z#ud?qwnD7(_HQ2-EIhzx*s7odZ45DUfhM(B znw<=9!=>2j2;H)lQ>F!0$Ir1N;bk}*C5w#Cda@ui{z93FSbmy0@Ai14k3!&Ny+~KQ zirK~<(QdIl;`LkZtv`j6+${jK;^7n!msnl4^Qqs(s$&m_pI|@xfiPTR$Jvs~H|Ad^ zxBriqQ}J=OhPNLFmI`dtw_lN{;mJYhVZlzkes5WPL^6b_5dsS{8^7lZ>1WLBaJEf+ zGdpzX@X9`ap$XAv;lD!QfUPYZEQ{M zgyuMiQ^rFuC@&n#lQB71q}xK{~uZJZO1wY+b(D96vaTi1P)Oh8z$Nz~A#l zX+IP&zBq@Zs(5dVoa(jg@XJ-ajtQ{>9vSfg%gvS@LlmWosGfh0FW5_DJ~BLxZ~qhJ zJbgIL@(o4Elwn{bunF)Kr>n~X+z>XiF$%5{=r)1zI%2_$Fs%^LHUA3D`mI(Eix&}Kjz`(nJv1M1mC-V z@|8a+gcOwUA$g%XH60ZTR##P2A)v8yBk1v6RiXuIO1AbJKHGlE=ClZvQ9=^U`;ypj zQ)fGGvt}z8{X@Kz=ifGxcQsCNw z#q}zqbBTK!zizlQnCmj3ucQwiw76+sf%P_7h-#+nFTMb!dl+H<Ntpk% z;|P40w`fIQcC!Jo!8Z!|(yI*r(Gvz(k#9Hb8TQ$C>Qx-}i#RIoIT%m|>X}dyc)K}s zot$a($^Z9_clSyh?x{?&``y86us{hmCRJus$s4b~fB&aH zl(=-hz1M8&J@3=$$Cfh8{msl*(yelvoC?Nj=e}41O%SquX7lnEN>rZGSyWJEd636) zl6s5!PXI598D?>)^DJ5JhV+EnGVGq~|4@Z@+=6<_#Wy6dg*Jp4K!8rl9YFN?iPbMC zRlfxN0f42m^%%==y^!f4H`I0+e1y;w&8uQT5`25T!bg^IN{t@Pj1{WcxD<_tYe~b2 z?_6XflJ^p72`hBohydFfDThS;K(~P|CSXE7h7rXSt)AR3sC|B0hLLJ$nqiJbjOjzo z@h>uzkAS|+CW2G}L~X>=K;8gHl3wG8XAYruU?dl2+TOt!EKl2Cg>Y5$7T2-*uQ~0( z*(FLedFxcYt~7{J9A60Dc*luyqX2-X>_wTUEjjPU1X4)d~DxlGO~CDaIJaZmXJju6f~rod05G z=LLnSyH3Udel?SEVKu#2;NG!%ymj0!=uVwL^kg`az1E8O(eQmW3P=48W064ln~p0j z;YrxxPTx7O|?QD$oo-)nrN($&CGK8LlUx>W|$zAIk^?hgvv*L4ZX2^ zCj+whhAQ+p%YUTdo6$f1`tN@x(z?d;pMU(bRY2fn2;T$}&ys!be!^J|d(3vRkj09* zGejVSVI*!l_t9%H(6Asp-bI5v#(uFihu1daTR6c^$9Dpw}zJ0CNl?L#HVY=wkFEnR@skTjNa zKJmYV3ra|LH4HL^(~pKc9X*+zpEIp^nce<3W=SMl@qCVhkELX?OBaEXhqnJ6eVcsl ziiyFuZ7dH!o9vu^n_gE^AF0w%uESMPo9v@DplyQ#EOcI{-JGF(>)W27N8SAL^-Zw0 z8PRYU#al_fx!D0Rinc@^8ycltn8F1{3#eG@9U6CHF2Am@T*}kx0L5spKKbX7wj(g|Y4OA=IDp5y%~7LTUZ!6$YP^nb6pj zK-A6D-bP6X&?h5Gm!kojS0hw^MvED}A0NCEQ{cH;VOfRdN>|yWE^X-`v^`~I%VQ@tm`qCez1?CY<0DqPRtIp zN5p?RH;b!3&;Pzd|zahG09hBH%JLM zETQ9X580HS3~e#AGsXSp0|?%--XvIJAJ%LAu+~pn{$B^vhY4gOu0Wb&NBxHW;28+` zcyJ?N=Q$E|phl%_&nQpn075{$zqa5Fka2#}hx{vse^eYBa>LdIEKsMYC(pJM(_5tV ze325%<_Jo67}Wu*4iNk)#stXOtk8qEvhkfvh))Z1Xy62+Q6Cfz@W1gDiwqM|GaOC} z1)8AD;oN@xZJo{qIkrwNrG7TN)zy-|B=32gjq^eAZE6%QBCgesY_vqX6 z_fl8i{^`B!|6aPhEZ>%Dxi!I!9|LSO%05HkD>BEmr0puUh`T(PZ+GfsHhC-tACuMf zb6VQ2GFaC(FIRBJ-c`B(s4v9?@}^hZw1HOk^KH{(29E(B3nn*C4|A>tDXWH3xHpN{ z#WSK1dYu8@Cy<=GhrvwBjtS*l{T99akMF$DTL7$9Ku{g#^3EwoD*T&cMxqmNKfUSN za7?d4$@|KR{lJ}T{rf2SNV*+r#b50$$&%Q%4}Vzd`D9v;DK~|~yiLmtg1UD!Y zj|DlXdc|hy@13|Jk41?=lp19lWZga@*W#q8&gvuY(CpI91(r@MP{sG~@E%jhe3ul6 z@wz9v1Q$sSc}W3_)P2vI8eJ5WQx!C0I{S;s#3x5ZHk~Vg{e&ckN70*4PJx}~*ppM= zgW{QFkLX5B6ov56Ek8AgmqDB2IHjua9Nw&v=9#*9{ibC^Ex$)svk#v&BPtFaq7_Xzf$_;msif&Dr&PRmFPS#@iS~_T zRAgQ~Sq&uRURBc$EcMF*R`ja#Ehm$N5-K;nvY1Rhd{vWKh~&Ek6_tbe1NP%N^ckez z7K5SFL`pKt#th?YLaK=zOFdk`FF^c3UlvzUGBKxvS|HqT9yvWuxzMh&?NAeUm|EFA z0&nSZ#i$}Z2pj_NahlQ2olRcVF|+n=maMUHOI0w+w*a)VMV&f(q{elHra`Qswhed1 z92h*Ny?W#Y*k zi-NX!waFg)+tu=Be3gA!nlVI2Sgqn4C~6j*DNmqKjG?jx;`t2!(GXQIs2k{W z$K$yN#TRDT>Ldz?tFbxy0_q<3_RK3-y<^dGS@?YR2V^UKFHLNiB+^u2jk^WMj>p3J zbsB3QSc&De1V>^^dxWoLKF(q@UUw275(>*w#MrB#X*&)SHosc3pAE~Z#*oWMb2NR( z(`XjwybehQRZ>nm0+CEJko91FmbgfrueZh) zL4}FmSJ@J%#9d*ko>`z%0ASpF*&+?Hh!IZX4Y1{;+QPxe7O-)FpouDao#CU5!a9V$CcXhJL6Pdqs2D+lxt4NitdZoQ{ekF&Do($c?Sr3jK|L#!scD9Qk*%Zr?_1p%+R(&)D=fMF&zxxbWbvp!MQ5TbPUo} zGbCnktxPpoC9c)2bu?}|pM=3OXfPy>FROtJY^IHMDet)CQAR|9=(CW7(Pg-?AIE-G z`k+^eB_VHtrX%4*&Hng&er2^=3w3R`V_7im%_B+ARyEJNMYbZ9C@jy0(DwT(r@zEf zDMqk+3i-jNBc|jzbWch@DS5sEHT2nwGfe~-U9I(sYBtIYHIOXsl8r^u=&2I0hJEWB z#ZW2%P;@ZEC!fBIoH?ysM#wrwn!~lX!d)IBz)(6Q!o(HyZfLh~><<<6fjDLxvxyo0 zqxWv5d8$LT0}IvqDQLymN&0IB(GdE5Mm<6`79fkbV|gAdNzfdR=zn&sCFooHKlh~928-cY6S!^6v9a3Kx?*16~WYT^C_bIW>~ZFWPMyew#{t2P=OI99sAcNs6^!R< z`8+k;P@ypzZu{+;kdrb`mIo^*rL6SjI$j9D9>VVd{4p;tW*(|66)z0lbQsm;!{3hG zbu=Ub)vdym&29&%je=p_JpU#2MfJ9ZWn6u~Q2)*XfojM5fMca=m3leO?3DAIprA&d zZ6HE}dz@{kjYH#&K;=M&3?yVp17yUKK2~{}YT~@P*1(AwNAIqj6+d(_yrhm04#uGc zqid*G5j}f}f>h>mu!2kEPfykh&{E@G4b%MqiK{TB{!L@c8j*uXl>Jz0fGov;t1{Gr zhxbSr?r{{ZrhwNUq|Qe&X~u1_=KXdrmXI#bOl6K>Ihj2Ab&g7upS4sHiv59pKdiys zp5v1+(a{(^L)$JTu!*H}2Z7`<)GCZBCycWnNaS0fR;z}`$-yBj4A`I?$niLB;fEFL zs8T*b=dgGT)Zy>LcVuj_tH1G zPCo2|Hl{m1dUUp|4X;^hSiFa-UTdSAeK3?>w)3WLOIyaEx_fQ1kIvFH><@P1y0i5m zhn5WrbP=m^Pu0%RUI22cKxPj(&8O6yMhIX7_eN@g#y_MpNej2RW|8EFF67_#chJ)> zy_&4DW`S%16$c=mwORO*v9;GJgmac|2WmBWJU%R-9JT1s+ki*868(KeHaQp64x*4W zfGOOhaFq|t9;dOWRgm#iN%1RJZpITvNhVBJjjJVKRRpRu9)+{H2sz2a>cO)E&ML{xQGm zt|ES2uG6e>y`faINAekF+^C!SmQ^e4{vLx48@pJVYo;|kvQy=R zPo_~gA^BGUl@wBYmh^4&^RAfBZrU2VEQeXO9>4Tuh1+A?L7R_UUfs-NlT^J0_?~mG zu)c;PP4LzVQ&+c#`+E&z>@Fd_H*gu_g%GPZ@~PMG&yaW+Nh4a3jSD^`SiE8IhCT zCy155YqK=7dDA8J(-M&)gEa&wu~kM>jJqNZ+Uia`{R6U+j)=EE@4h&AU{|TzrVk4e zu!y6c4^wUPW}KCcAF!myFL7p#=Qo*KX$qwuBV3gJP_ae9#xjCtO)QPxn-VTuC5vlf zs}vmX+gi#q{M8~^rHPQJG+GdUq|%od!uU&qv0X{VSUij1OOo~JlTMP=LW9g*JhVZ6 z>cr(fc(rK~QUW+vFQiCu!iqZ~swlQz$9^cwdV3we+(WbDZMghxjXKKhe>6m9)>sSf zDND%)RPWOzOMRROwzErNVIPT!O_I2aXEyW(HoI;$W=~3ABDW{QLo@B3^D{1Tyh!3~ zIt^#&&+px&dve_3Or@bHc`yLYp8gF0`_mRuw1t$Fb(#B9FL&qv97enM1|*vLJ{!Ae1<;a##>Jcp{W`kCD!xJ0`NT zyR70BAOzoTRnMqBX|kZHRl4i=3dCj6ib-HJRix@mf1&9PCUq`5nAExK^y!Zl{Xs)L z9WmZBpz$91hh7tZwA#d3-Xm2z@wuQ)S3*@({3-tb0sYyhzv$6dwVL8v=|_S#{e}Dp zz-DxXrR{?sY~e@R__-FYY2o8RfbVS6owe~d^urxoMT0d1{Ob_^yN^2x0(`8M|HBaf zSOnI3P!gra9>WjX%CNS_IZx>jeN}+WnbZOC=sVWQFm}-0~57lOzKF)5BjTEb(rgN5}9;!8J%{)NGcd#v~ zc|!~}YPS!y(fztNwdWuX5l#lzUdK03)Ag8{=<@(pLab}Tt3JF+X2trXjgq>~sig|w zq@vU;14Iee<5VKu0~G?en~u6W9#dLI-F4?1xE+COuY51{iiAoj3l z;C#1+@-97&4cr(p?g|Cc@-U%9|C{+7sZPM>^kxDKzp zrY(yMO@<7~yk2Isb1qQshfe-C5edGIm(wXUMo=MSG~gIl=E$CxSqji|I9rb~=CoeW zK3LK&`-qeZCo+lxq7x3fb6_>&H1BTboAjAEPHU?kI7V*Yg#f^V!B9u;fDjC+>s)Bu zD##r?s&m0V)!-IOD*ERk0z0#SEE}aCO??g*z88Mhr;$-L39+k`?ZtkmobgtssoE$r zB}-l-oUEtmEL~4^x0BR|<1EzyV|a3pffYW|ENk$VeC2RDzY4*3A=~V`5ZyP)#TBR5 zApkBb8$lm(So||1=u^j#J^AEYa*j9yxGSQ#}yvo7r-n zEpQ;BOpr}8OgGDI-CA?lU8Bqk{e1iNhTz1gY5hpZ+0j4#`tN^UqIl7G{_~H2{vBP( zl8yBCZ?PosM$9cg?Wj-`vCiyn$!$wsT$SZQZp^AJ{fc%3bT7AceRfRpmtbNM5#D|L z^=iH_n>_)zc-#S=B<#_Gmtlc`%ahGyYRZ(2$;o(XdM7DEj{8X_joYsiC@ZZNG-XKH zEdDn{Qy1AL>Dca>?`Xx-6=58&r|St6REZ9Xwq|6%yxc0Tw`CAfNG_%%t~Q0^@?Uc< z;T}KvWy_?d{jl~a%Yr7r@wp&GrGZjDO=l7Ibzw6s9f5n6l5iDQ(ixpCfM^ST4$9w) zRDsXDxXh>wV9wO_QDMEGa?}1UX)P)Uhcu@a5tGg08yx)313_;7wM)Kk8z9Fa0hv5g zN21DGCB_{ZHBbbD00OZ|a*w0dGJDs03^n_o!Hu&N`soJ6U@30pP+|ofMeg86D#dHg zJg6zSxzk4byGb^U#F>2)PLtdJg2=HY>I2>Hr=>~s6Ub*aQJ_@yR{6nvDcAF_`xZ@D zb%a!E5$hxR1?&6Z5&b&Sk$1CX4L1=i6}xEAlFQ*b=6;JcB^|3T3;IuDZiij1R+ST| z>kn-icgP|X7Q+>QG#;FM!IQbOk&ui3sReUC=-&J_0Y3JfkySJ8X#I-;-8 zv@=3$e-eWOBlc7<Fbsi((rhSp5Weycj*u#NWjWZ_MTk3kcJt*L zjI?Wmi*(F9s7rkgxoQEXXD0j&%P*{9(6MGi&sGOA-VF0L8=n3u29R27$bS$ zb6^bkNCpJ6>+`~DX?7M)QkteBr4AcFrw(UCVTiC%H=|{EwG1aF{sabb&#~}!f2)Aj zcCU#c&u*h>tvzK2YFy#Ms2Tn=Vwbj$UOfak*uh(%+gSf9KUlQKy-1O;Y<9?YN%wrU?>e1kS=-AIVG_ao!y-Z_sMS;$gn3U^rWyuaj3En#+i zwft<(c6cOvRE{>vFSKhMqX=8I!%|$8F2$7DNvLi72Fn%3zCq_@#E>wVb1UE{TW~DR zsiI*S-e0LPGf8GFDe*}3ENC5!3?9W9i$of@)qdED%!&4Bgw|-d7K^F~Z`?SmQd=d6 zc4D>xVJSD5hvQLXvGpN9D723Doe^uS!W$F(-q{DO5?J6WqNn$?kD`lB`{)Y50G5mU zh><*2%Np2QrIu*`eGW0;+2z2bPor|CgJlC9^)^Ve_QyunzZ8Tx+^BTw_2Z03mm{RNmvHzA>>%?`=} zv%2f(65y&~eWm#OezXX3h8x7C0xYtfmAF$B;LGgxzwv`&nlqfw!T&6NyejIs%WI86 zA!C*YLqvfojupUvNHFa|s1OF@na{7F+PQ&6t>)pcSJQN2k~syij}t-GH&?PCC1JVz zX>*W#K&L_I#b80l;e1K+!qzFm97r}vMI5PM$b=d6+GYO8P76ZbSTBckt>as^YBz`9 z+##dAEQai%2+sCGBEZMueTMlEg*i_tcA%Ms-7TIQGn+m_(1BEr(MK8Xtxbr+dy3pF z7I?aRb{#5ca!`SyZ|mgx_EWZQ*N1E~1N%|@e`euA+6p}^7RQLXPrfI08i!K^em$_k9@?~PEiT5Z&}+dK|%UVAiI2xX1aC3;(P9ySM)bUZ`~j| z%3Uru+yUTkDWaptjzyA7w||#f9ghfNwoNfu!iXRf@lZTrq=UNbP(CfYsTz4;`gk#z zWRb1IiBAS^oc$E|XpTa4ii4;MplJsWEA5x^*o5ipL`%Mv8e^E6X|Obmx#jC*881={ zhCx#2`3!#`VZ$8G?bqL#oIfAF%)uP?&*?`bQ|up28epPp&^Fk9#j{+kCshw%`gg!ZN+j?#$uDpUG#H#LhHFT=@% z29S_gN8x(3fSQJj0RUh%v*Z(4pYiTse9ohnGgiUm=@@5;K7iciRJhzT&(CZEstiq%JpuW=JN|3 zxcxF#k=DX`mG`6CShrz;6?SI9t~YCll-BJ5q`P8n&iC84yQAkXSp|xrwpY1a#hj%L ztV8v8xA=2@akk$HzD2ak+)W+x4A$BkpEpVer`^=VmUV)+FWeId&WZOW9H_r@hYQ0@M4wXx!gMkQ$Bg1=Yzt)piH9)B5H&e z9(FJd5r~TSZM$Ltq)rQ9{_{-qCY`(O&^3bx zBAz+rEb0fer$NC($cgRlH=W-89SqMNPs{S8pq@nO&XUIbajB1f`*)vQpX!KZ4#_uP0d@;h+}Qq2{zRNHH4{f3}S6QCH7MRnNH!z6G8NXGxJzvc`X_o-?HR3lAil2h+2%)VX0MQ-FximkV9XB-R= zL{&jU5^P+myT;)NzsA`)yf}9C!Yl8*tT*wK`1=ze31%jc$wYz#DJ*)XyLhQ0k;wf2 z@Bc2d*%&=Ni{a70k@r>{u)pLgHCoQsAgsd`2VoMv&Ti@zmb56g6W#{b*HknKUzGfL zio@cBe0?Ms!3peFcONKtn}0n1OX&5o&!;*`cd4C`1)m6OBzuzLW1ka`;GB2_=OR9K z_RKjx#Xr&!)!H*f3whz+MeYs5c$v5)9N))?)gVu4_4&X)M6pq0$>|~*!D;WrZKSfN zG5L&jB0FKyH)DUX#sSG=7-!OcuBA!wtNz3I*h5ORUl!Rc=d7g0=E-t<=tQqPNy$-m zrAVZBa!JEjZ~1F`n3c5j(ok|WjgK#YH?8nqv&4m%As}tG@`DNPRcz(f61-a2goRr% zQ)f7FI?FrWb#6v7>R&}?>;Py@e9&3tiNn4ZCmeqE6MwoO@KBy`J{DMtNk&(hiX-gIkb;YYgj-S@NOH+Jk8QL zWy23?PUk96Ka2u|VSo%HUc8bgDvDx3Cg1DXnT(s4hZeN(Z(-yG7H$+?`*95Y0SjGe z|KVzlhxy*042iOvv8GTsuhl;Y+wqBXNz?*b8ykQ902rEZ=r~Mgr}8Gh`F6O>XD*+` z6m8J5RyTGQFQIWFCmenkM&81HLbEky@`7Tibs`->*#*#zXrl{14bk<9_wG>L;{qT; z)_84Z(tLzfT@v^>w_@k17b2+p9`k{8Oyf2QotWDIj)jS-oj#+nJ^L!8Bys|}xX-HI zz0Y}KAH(+S7$xEF89xc;NnwOigL8=rG(t~ z@(j0FVKst#lq>gEL61U3?fAdWkb(L5U;usegw1*r2eLMJX?ElN06+H3`3(S(JJcz| z8L4OGZrMF)=R*ETV)uf+L9tVu@h}TxIc$(Ty09b7>re{~c)>eN4gCtHMYYM5ZD2($ zG2%*A6j!_PEC2%{;$jDDj}lg258Xc$B2%T{gO-IAF9>dq+dmFLVoReKvpRCQM4_0e ze}TNDk#kLA4s|Wd#_EG2({otK zHmb)5&=56n#V%k-y@d*{04|$a0551xlQ=;MT)QERIp63BObI}M1TSjg8?Ic1uOi%{8=gi7XO(`<~q$; zpsdIKxnw>;8UT^tnc2X=(dvbz20s`9dkn4vTwp5q3yx^20=VB3Opl}$aDAIz>j>G> zoL6}k_9=yXh3>@UPrGaFAvQOGibCYnkX2>u+`?Pq z@!w?>uaywG{8`E|0MABcj&H(U(9)*<66KC`x#th4*hPXFvK@YXgOf;9C72h@pk};- zw6UV+yTbP(eX8md7aadKY)rIbk1t3Q=~JOByB-71Qtt8Rf*x<<{Z47%6jzsDjxx0= z^PUtx*viBF6})_%zn0}AsS0(WO0%=(wR`_kmI>dfEX_#4DIBYXC4EZhSUu6DmwUas zW#j^Uh`?~dd#~->*aA0Rh&WlWk#f$ldm#kskHoL_^V))T3nlBTV8l#z;U^ubyAMH> zZ%I;N;e%Y_whZtYKr{iT@i}7WWp}CUSxV9ekHPokjx`HM6APZIi<)O{6vN(}-S_;? z2FcXtAZ(C`L?FysJolzdQdjHi41q`I zm^0RhQ|MyNUN4fQOL&8oW%>`qc%wS~F1VRiwkbS0e7~(ktA`YF`_bZS_#L6-72Hx& z?jwQ=C*`4L7NJsvkLDoQEr|wOiU1&w-vw3 z3f`6%(gnYnxRv#$Hdi!^@}oN!!wns4M81<~g>bu&FMvbx#{H{$bT5=ZWz#h!amRXE z@VaBSrM0;8G!qi3MI$|7T+_4U$7Xk$DK$6g+b|tdQ+HOM*PTmA>FcCitMFdxtx}9D%aFX47bw2V7DhrF zesQZ&<@$tpA+xouK8p*Q>Wg$aTsXKLOj+(N;=#J>;lnD%;W-&+-*yf$fKZjr4@4_@DgNi7)0Y^Dj zEdJdHpk1WSN^bP!W2%qJH=UN49_DIeo}>2oU*!10R1p#EUI4*rI1RF)^h$whXZy0j z#CiNT+MUE(6^4BDfU0ofwZJR@y8cw!1PCzMoEdCM9wj$0d69eztBNuG8 zE_l^keS)Gkj$*(~y+VDH1%h~9I-vuAnF`S2A{<$8LSxTY7DDXWuT0? zY;oAXBk>(mmFq+7r~G|z)S7UK4`foTCSVEbvhDfcac$Q}_~Yz?lej_}aTQFx%!_VJ z294?~!p#7;9?xwWA4Te^`1#EB{-Tpa(@IU7j5}sjTb}YDsHM@)2E6q^w(%nZNY^mc z>+gyh_d|kMnJg4Ga__Z#b@?X2j*VUoo`~W7StiRUuas4s*(ARmQga~ROe@t|T^2)tEx!?ao_88Zbr zP@jrk;?fnLrGjJl;;`2iizA*qkhzc%=sF58WeSF=-dJ^a3#9AAJINAVxxF z5J~VY?g=WeobhXlCt`hRt7U4<_U*4OAODT=UCa`Zh43y*ggleHIkjW@o1fEQmQAIo zn4BR67_1D*GLt`X?bT+Hs$wb0%w_Z|f9zoNglK2f(%?nWybk{j_sl4q`(iK1%!O0z z@uBejXx$R-*m-zav3_yGBSvZRR`|jdQ>52K2z<( z@hyUuU!#}doAK5#fd2$E^4`NZU;QE4F6Vxq!vCM5A2o_eA=0WLgKvGXe&VM%sEJb{ zZmR`Z7b5HkKE|HDEFU$@Bl>*x%oNW;^}HFslLp|!SL8dPv4;0L|3kmG6nuf1{#RHI z>+sh$+cReF9O@fgu+g+by3oF@tL1(_4w1izOfv8=a%F@b;FcB`?v_zN@d)DARQgoq zyj4If&Aq1i9Ka?i{FXbOzkhtt;R2jB6u6-MP2q_K3C(X<@|cdEgKZTfGOU{Gr=SnG&;MXmutkzRKnoB&_p6M}ZyAF0D zNc>nqu#sV&<&lQm{D_##IwbNPG#aiv1&fZvYaD?Wt>iEZC}-}0X{M3eI$UVYO}lWw zT?cBkx4+vO_IF{#?&aL(!sjKDy+f&Upw`P&2ZUgiu~WXOFTrh^=w+<;1j9WB$*qCF zEw^<^K9dc?^5cKY4*Kut$BN5%D#)VkP!DbB+15mP|+4PL5UttpTQ=?h3!%b z+KJ%O@RlWLFEgW+v06uhJFfI{*P5m@H?$ZC%D<5cFKwE>Yq`vy0WXZgxu=3?eplhP@csP=uY|h*ZZ?fstQfZ3D&fv*QOu^V z>KkVFs2wYs60i$ubIo8!~lBSp5MP3-Az0ADsgtDdJaa4F7zm;*%!b+pJ zQ%-CIHKP4`x3&)yNSD}{GJa~QAsedd$q+Xe1%qQlQLZ5&C(2UhPK^!Fa9Z^?gxeFy z0P5RjM@A6>T3Hb3s5>}bT-3l3Zi(fdU2h9*E>RU`kYGsn&ccIA)~?`dAcuF9Qyghn z;s&QT3$}6+xq9X!%-?oJgSll4MD!o^a5%CEikR(%;_bd5PoW9MPN=xr%zdQzi5aZF z)e6v*)&b}MTqtb(*HIea-++BVC2<4DR%26SKy>kYIFmAljU4}$$kVd}nFXXKcgHwv zV?y?kE`)F)lv(QYfu|BPW}U?q(Vzk1(uy1)P%!(w~) zV&@+&C7VG0h;m{tw7Sv_@6+RN2?JjL{9;M&7A!NY=PoutB>$M_sXjrx>VlB3EGN$R zd*WQWJ{s8_tc^J{>Yan&SSXl+bh5pE+L*~Dm+j^p2Xo(lL=9_L5$?8esnAQZ)KeqZ3KpQfB1*SI zY1G~m*9#;5fXf?XEYI8fo8a*wQ}~K|f5{WqCi`wYacjNr<5i}-p*CTpxu`>=_Y zsI7y8atM&OU5OO}ZHHU*x@;fW%TzRx9gg*S3oNBv2F)9V^I%i2U8uoJXxLFHJaaxX zb}u!?;S9kKG+7rnf~sdL`Ca@xi5(l8rNOGYZH;)<0PS*ND5Y!1fg^y*{l|YNbe{yG2QpL#2m&i@(3cxL{2 zM21E7E6AgR)vx@j34fXT(I6P{KmUoHO@m+DIPvkY^K|4U?p^TLulOe>K{6JG8VT~O z+%!t#L7Iy1y?m6e6aR*hf9*#;xigCcmC1ZK$v>3gYlD%`M0kUVpK0*;r=7W;cYJno z@qBoAc=G)CCxj8^f6|ZiZ5@2$9MGk3USEEA z_wL}CvwZv$8an@vBYMVLiY)l&XO8UEKYqA+`M&Ihy~C5T*X0&B)M-@y?ySVpc$b&2 z-@JQM_J>GVk0qan5rN9y1}l>;2EZ=)m+>Lfc)&Ns3#TEAmnwU!*{vH*T+fGTAtq@r zW^Ux>a?8?pfB)u6{rR4_z^3kk1iD~cR>Xo%sU~?J5H*n~xddI2Q0?{05AXkY@X!Cq zFOiDBt7M^uOpQE1qZqJZyRg@+Vl#^PpfXRve zz)0BjlW=x)jD{fjnqLS0;knyCJ3V@S?H`^!KRzDzpP##jqvwa$L+@gEc+~41xz^SW zBFc@{&PClVV_wC|D}}{}vJJRjVnws8{fypG zIM#s1fZ{xZ2`LII$O9l4h@26uupA`B;gdP2m-Lim}<_gQOH^vXmH&;D9cbs?27$bO9Q&cdbh=TJ>FB zyMfxBySkAhk_Q9DQ)x)?1pvwD_8tfRjes2)cg=|WFsXP>&%c}|X-n7KL!lgsGFam) zjm$5j+P1VL?%H7$Y^U1EJ#u#O$g4`ab|@# zBW~?pmbybb`;0*Rfj_le!5x|1_oap3{@wSjSP0jVd&AO>+2t?A#ki-HwRRIQv9Orj z0qlyg%XBcC#<<5ey4pzLpheOZw;6stbdZ30{I}2RVdg@p00N%9=Fto~g^f!$ZNbdH z37}w>8>ZR2#;0bbL}>-Gf2G6{%k`Zx)E}di(gvAVC76C7O-B?S;EYm;$(hBYgP&ld zu|N_a(>%$De#|NXdpA1RR7QQ|7la75ZV{}`U6&eGYdZtvye5kRg4mk7f5HB!=8*ku z2$EHv?h>X6@V3<3iJ>|we56ptg9a`uf^ECYn)BKkspf^iF$wmYUtXH*xalYY5^N9t zg}s`|1%^Cn|L)tZUEs_sZGiyU+my{t>b=~QTLJM5S%8qW>x;Z4quseSqTj+pw~?E7 zF8y7fn6kc;h6EYTZDauA!9~X2sxh1R?d7ge;s;b8`UeqxZX~(>;<=K!42kQ*pGR?J ztPf_kX7B7U<9+PqDrsbI;Vt#Zk1c=(R$x`jpD55IR~7>V1>Wubn0!k~u5D{oz;V*# z!0Fc}3)nkwmg!<8pQMsF+ps5<>cZDdaaErW`@HoChdn8VzGsR#nn{4uT=8YPHOYj zT3G+?bR~mbCWb`-j1{jf?loq-2iP!m&_<*qN?i3k;6#Z+88dVdV$CX5PtLgkY8GjSQ@6i9dgZ%FH5B`8PfP6?C z0i-s)3j?Z6V<$|V8Eo_7ZykO~ymUZQz_^u1g%uR9{ov|p>vFEuA()oS`}Dh$`IWUr!d#QZpykrGmt z{f))?luUb!PNu{9(-nygP`4Q74i#F9;kFns(%s>iGfBbbV~ZRIE;hyhVAa_TCV52N zzzZTc!9b?olgQLppGB-n*|W^8R1CKexP#U0V!AD)0vXXfPXaJ32QJj_Vv(>gKzt&9 z+;quYm<@ePTSz(H9R1DQFd$)ghZgUscbD4Df36a!U1VAAHS=iAFMo7|^@G#7q>Db5S z5220EW)0+}Pi{*qmP*p!&A-zSezer8`7EBs8sDMZOR(BerF1dtqQQH*D)q##Yoo)e zbtki1fcL3QtiJ%&Y2B#=4NWBedx@$2n+9+ z)1c?F#b+NzFk5{4uzz%m57EVt6-Qwfu;M&@{pQV^uO!Ep=T7a)|I4i%t1F$_)ZniE zF7e?vC~9P0L=3#PSKJt$jI~L5+g~OvQJ`gvZ)(BaC?Tvx%&=H^F*`3D@V`NLMm_*q ztAk>)rB80QlppVWYio6!nGvLpYm3OgkkQr4Xu|R{W#<83VUm(W+ zWV@tnd75IE0D0E(Ch=Lj+b;UfpN zF0yqK(g3jP#>1(SDQ1x6Mp1ML^uA73Svx)cM47p>0Evi;WOjQ)FvC-$wgGVm4cUW+ zh7sMq_G$@3XR%)G$>=^8xgXu33(q<+rZMobL(rfy7u_{5@-pMo?8C^bozm-1j}N!x z|ACX?`!Irb5_wjP4Cp-l^!-n=nElN(&;1}vwAXi(%3~PDFR5vbBAScQ`<^0-&C;LgJjKQF69A_yVJI| zVvsaLG*RYQ8BA?jO!dln2$h56^OjKNEMZJrWHQL1i3CF%$_rplKJF}*VUottRA**@ z_t_pyh4+WNd&k187$Xu9MwV#PIraTy6W8Vl10h%=mEOc}Nw zBFKS2L7(A)g`GGDnQH_Fx1RwQII%ko5}NqN5~9G%{MG#1ugdLmV2be-rq&9G6SF<1 zJXBt{fVg5f+Y82}nAXKZvp36K>%r%k?nVxL9`{YdQ;}nmG1G`}g31`m*#m0?lU~PC zi|I?vGT6)3-a2kOyiHV%sVeY^wPhkDui-ZdRLm@Qdkn4pB99vpEMoq)mRKa;o)SNa zTv%NF#a>r)Vs_4zM|*B%Q>qz5&$0Gx%MdrA4$00cd+XxZ@UW^qLiqv@tBtIWkeMMS z0ZsL@`exgOP4BS21BlJaCik9YrOkCPB%{EoOGpEC810z=uV;d}JYF*Tt;KTI3|T*| zuV-#yIlHBv=^Bip!Ds+pHT<|eiy7@^AbHoS=(yM}Nzt3w_&3xh8Zos z+n03qrR^=Q33FfxL{A+M$Q~bVLL`_%50nW(7-yj1=Is1#oc!K7fl(qv9nghbVO#q- z4FfD|XiDbx&-?<-;hy^wf_IzHtuhdc`c!w-SR*^TKH_CIdA=xK@x13fCB_ z!NeX!Vn&DW^=hltt!@T2FY*42p54){A6v|hG1NJgqqN#E{9K?|doHMYqm8=v%PA$$ zkEd}=5g-FE4WdC3;J@@sL+nub%PmUKdq-xQ|4D5s%BRQQ5~j|6p-3IoO_kXZ0Cl?= ztL~JcF@r20kH}5o%#Rpt+SX~)H^X6iozo=g#Yz+`Drc{JL<`Dk;!i1~g>@A3P0?B* zVQ3<{FJ9}7R1jL@U%S18e6@t*`#8C$0L&78lAkb56j+hg7L?&;u{V{+CRUWdb>rw} z)^~|7{h~HZ=ulnH^7|NSIW!KoZr*ZSyAm=yu8%P7XV;)~?+l~S+!lf6tu`%xPRtO3 zqkTEqn+_+wdi)Ky-U}f3Zk|zI*6~)H^sgX5!YGx-4>0NpKsEMcMnTzKK0sh);Dr{& zYhU{P{*u1XEMVRal~na^I(baY%7d~=uW6vp5=Le2&2S?pEz5N6on=s5OSiCRa0u>_ z0D+*vH9+v-FnDlxA7*fOmk@#lhX5hLHFyX?g1fsD+#T-ZJ#T&IB!}PMpL=^%?U~-Y z*VC(}*P33{)zy1(Kuk%z;mWQNr4zO>iCd*5TxR{P)6!w`HLR=N``3U`T} z7P^qUNIIUiGnh~_=guh`L>E8F_vWN;)*y>m*w}RyeNO#E_`>i@5m+wao%9i%wt#kD z5KGf^K!5*=N_=Q`VLNGz_@J}TVbSh_Iam6;k$0h-fYLij9&n@8a$7J-AhyfGz;UdU z;sG`G#^M!IZpydV!G^pFesSqbn?^S`E);6Qkjh=5?vOCE*rZku(P8x z<#i<`PSYR-O(=Q_Z%{(&jl)Y)B4A3)9w5Q6_(G{$>W&GcD{w9ad*me=%B-17S^t2Q zU+DhgQ7qBYcL-);aN03%KPxdJr3_`IpkS73Bh4}DV~2{x)?uhEDQL;J{)=zXu5(D- z&YZqRATR*0ht9Eh*UFx+tpykV<#bX>)!`4Nv1fEG`D=J{->lwgO$=($%e(a`W@{|1 z_^Mzcbf5%J=r~cbJ1y@$si9jnubY{u7;^Y9R9FPwMtP~nin<4vCd>2@SkB$Q(!#$F z&mj{~cg8I?0rz#Je>;g{{$4pCpI9n@IFHm-hBB4$t#f?zqidBkDpb^DGR=Y7R{6=) z#kxmiuClWMd9O1R1MkDyxFi7+jnXXRT?^!z;%n&6I8PWSD)M`s8nyyLH5Uv=sCujw zOB&|zIZHW;V$~{4iwNffIm|GMxcSrTYx0e22gR*qzD6s>t-X`$A9Vx>CqS>YH5D28*++MFqsc1?LT@|?pJOh7BM9zU zo>KvHhX#>JoZ0|~o zJXTK6XnjhmQvoGa{j}JsghJB}pbdvP_&s{=vMe^mc#}{oJ}6ml;plex5$~;Xx2u#^ zmOHT~mlA#K`%;!geyJGQPkqkBMh-Mpp_^^Bt$lU@eOeqFXNDQ#-Q76+S5Gzd2tzT} zI=+ux?>D2<@$Tl7Rc%yMw`HU_`os$Gi?{dH>D@EYUL4VDWSDcR2JFp!)$iRXC&8Xr zNb+o^;pjU|uGT51kPr%Z9&*OzLu78LyHRDi85vTjpf8BL3JK+$BX@H#ALrZetDcqq zR{KI!M|;-&&6atF>6w$JBzi2!U&qm~G@_6rlqBnl7 zUXwRn`O1F2OTX-Fy6*k9Q0q&xO3#$d%Ego)rGztVUYGL46?ry_&J)Fne7liFu#*66 zZ8Z*QYcR9>Bbm>6sCQ^Gf^kq>Rp~3B0x{E<$sVtsgip=;odsqzJ@U2Tm|em+aR$zQkG zAyZoq6j#_kGK})g&HHj`u7M->qJO^=vZpc?P_8(*w_9%z2-#Ji=HkkUor@qXEHf>E z_l)aI*YL528mX2bsTz0^a@0e(RWr_%3pDzoKrZ@a$y01Pc)n>QF$w%u#&%XYgGyHi zdnx6npVP{$M!?szwjzzZoCWz$;W290Q^6=(F_sm^jfJUm4Y&P(isgBN80RIPTXv#L zxVu>R7nZHLaMK10hZ(D4hF|tA?zG7fnFUrB&3_7jdm+th{_|!mxC-x!bdb;Lzd7Iq zXO@YHbi1Ua5Bmj>LZN5$vX6ph9j%!^oXSbv&U|a5BlB?uKgk>1A9-W3v+zL#uUM-; z`SLc=0hn&k04+|=hdZ!0=wVdIo>|AEfN`$4#de+W{Q433!C30irW&1BS8}gF1D2^n{m-tsibrC$l zw7yA5Lvrov+TmMbx_s};1f2-?VqK-EGm6{#*pjtSY4JlX!$~K-U?ThIN@&vt8UVc| zZaivn4FxVYeEjKy1oE+6KPr)#?J8X(`**^DZG#4T!Nkw?~v{kM>XMG*W1Mj#4X)%jm;KDn)Eer*H0mhrRZLWKlyS0XsiPWcmkOVqB zLZyUWQ1fp(CrgfG{oEawhSRxRLHGz>mJ}|qWRPoYl7G}-qDov?38~qfs8{)dRWA9I zx15Okpgmql-@CCwDXQERXJ0O;=gp;$m#_l1Uu#e>q%6$v*#_cUfZ?423AgFO6^m!J z#OJ1%<|%|$i{%2aefE()`f&+0|bVVq`^Itc%JxA)njf&_grB z4>(`#Cuydly_;cIuhqOB*5J*7y|4@ir^d=*Ty& zleW124yYAV@2^h##(U?Jt#~26NvHFiyrlIqVtVhY3yV{%6p5@J4d=YZ1xb9^MRzmr zfbB!ZII*fXC~eycYpDT*zl`XVjR)fzgDey~96J1;^Lk9P!`YjF6nf;;$7@BEm1%YE zPcFc6^jL-$`{G%F)CiwBZwB12wyraq-x3@E=_y~0naArGUH0~|5!WUbbKrVBQ?}+7 zUYUKPR41M=Pim+s(W4)})}4ByCEd)l^Af87wtD9l12@f&6oVnx;>|Fk(;Amy^g@gw zG_18QaO#sYl~-K~#Mz*URlea&*OY%#hQnz4tch*4%SF$H+qmL!4T=H}vEo519hRQc z>n>`$ukd*z{5*hQ>%PY4pDOcqSMc;ABbQa`f}?Q9lDOO3i0oYkBbWstD{N)S`qpj> z=llzepX9X>EzvSxejKAl(MDnXJUf_+e@F`qF{go6JUuzQ$=b30S#ao2nG~xRD=UN* z8ZOw|P)#IQ~i;eMH*TeZ?ZLV}5QKq-%YX0DvgcV>g?2>gm-xH+(r^r zQU-nQMPp8#i3TTZQ|f*2Mu^q&TnNKvOlp`ZTYf!Baey};Iw?t&og+cn2#^dFz3RI^ zXlF<SFk~ zvMiV=xv9ZWFa~bm$%Fu%ZdQn^C>#=v+oT9){yVpOp)9Ztattv7V}QwiBkY{bTMC*s zp~UYY${E|Y?b9k8J46D3d!wwW%l7SdBsME7U6Vq!=e(^eZED>c+{Gd+y`;mD-r=UR zH$v1kRP?IH$_Mmc(S?bPeJeug$SUeQ^h(QpN}uY_tol6W$jBh7J11sBy~yz-TLk-c z2$36xDM=t)eDV5p%;mcmz-?H}&dh66*l))2L}QPbd?@0u&5T$A*vOw&zO9Srn)(yf z-u;E*bwAcOGeyIBfrdOyB^y9OHmW>t7*DrhDNw^t?d$@UFd!v4Ms;%wD9VI7)IO#KU${vSStmrT3R>Xd2J)V$^b%bo#Nc-qtH|)ZljL zM_*1#CA05OSgs5^xIhheNA#}1_1#kc8@jU!K8I1#D7y~seb@Fi{`wMr?3#Sk$nvjU zm+1m2x3Kl3FW5&(u5ZpHk;y*db4|3*v`Yp75*v~aAH6T7f~ zo%vYle8AfKcAX{6o|xgzp(_?0(~RCN^GhYR0{BHkdyh(}F&ZWs0`pd8@#IOjyO(DD z(_3IoOx5`;Twdd@cU(4Q7o3ofbp+~`prJadkQ4uW9G!X`}DW$>Qx_$D{yNy zm-^w~S!dVRbX%P`96%ngYU>URl)kCvwjeIjh`HHv*@-T$lVQsX*4@*nR@gSneyP*c z?CL~qe>*)cq-bZLbLJ!YS~`{dCx&<|!4xXWB4K(lI;t0urxOiX>$EH5Qkk!1{!jW^ zj@fa2J0D@#%B3c4x}SLz<6C~)np=|%X=;wDlj1eHQ!ET*XU{6%D(X|yxY)9ft4Qx+ zMXYGgrcpS1o1SlR(cD~xTut51zJD}3=A5^KKE~aLzWcF?8Y=;5^yn5s_?m{vM>(}| zY*^AZ{3)fukqq0Dyzll0h038eTB`$8Dzn(#cqSl;S{5J3`Nd!|yS|tkR=7aCzd)*y z3nmnTkbo-f@j` OqHMszo8TbA;{VNRXAFxQuIQW7TL04ux;eO^yMRISyo1i`RnQ zmux-6^9j0tzzfx+BAIa18FGW!Q%NgT1F;ph;DPH<^OLkQUVLb8MG6}slz!4)GOn@YYVd%?+Z zGbW}_QW2?K$y!-eh50W!_%y|xOR&h-t_2qiR6cZYDCe%qtXqiH;>)t*4Y6@L zn1Y%PmRyV`>+dPVhfp=eEX_`n53@wcCDo+MLqj264xI+K-hQV8NyIPm$%X-C`QIR zAx1pnAaHkzbaN{qDI|T$(Cdj@c9&XEHc5JShE5Gp{3-5iK!4XVx0`OaZA@!k%B*ZTy?vN=;sY}6sn7ueH_;O|WmWE&{JyT(dL+ii}9PI~)Y|kbc z^i}aincF45cf!7$5E>puGc#{%H&TfZWTBi^KRJ|mOy_%H0002)@8JPTa_|WF03-kk zto{_xU0y#i0S5pC;R68Jusd$HtR_abCgPSxR`x7zcDA;Y3icx+fRH6YT-UJ1g2N5p z+L)ggayYQKAQWO$5o6z3I)q|d$$cUeIB)!ReHjGkv(X~R_e9`8ym4b zYgU!%XWJuvOYZMk6`Ltyu1_zIHVz%#J%m8pp(8Hu`>*FJ7<=sNT9}k(=j;hP)erN2 z&KMt7F$JAuh)kMcs`P5a}$5a^9 zRi2J1lc+1>j47e2D*?w8FVz$eM?qU^pruiTDK&+mQTZQg@~xwCb!u|uqq2o+ve~0D zDUA^7Q7dZN?S&g?9S!iJ`XS4~1K+!Bx z8O-CiXp*#fFPw=ISr@SOec&Gof_IJ+7B7GRfHW)+Z~!PkQwI|zhyxhPYT^Jf`xOP0 zIS4@dEC8&@|L*cGPEHP<9Y6Ry;NGaqhxXV~;+s6$>-vf;P376()fx;C`Ln{2J5SCL z{_#%9F4{C`OPZ(3z}Y8-`Brx;r@#m%DO5}QpwUz$!D7NKZdorw?Xq!8%R%7F_d`9A z?{&UK2YOmCeW7fni~AtBR3kcr&D1)1%uK&txjCq;OIDy6rcv9Pc*t7=)Lp+Mc#WNG z=)XjsJRKJXl5UmyFjwaSK#9plMC5nVHATETLIQW+^RdxefN0uUJJLap15*JM?L$rH z5H1(JBUGwGo})cSP40yH{RsFRZ(-(RBa+X$a@f4cS7nh_d7yRk38HoDn;F$d zLLZ*Z0v}Pny?!s?-OUIL{y%T~JHpc92ki0Ku{-vq8w1xoD}2##A{t9% z#mC^oq-8uQ8Y{H|O-gO|8JD8nF!iIRY*Y8da6Z-^_HGOa$966smGKsdQ&d$V4y-d+ zSSCELo+G%i_vG~K)Cx0{W9v6=jU~Rw^SYHSYO)!WiuR8S#y~7Ju}zf0Rg3mSfimiR zQtRJ;Iy}6OtD&yF)LrNj%R}q_o(1>it16nr%5~c=E7t$rnm{3#Jz<^{gu zlH0(Gafj9AXIX}fI~YimjPlC0{KH0>pSdqC&@YYUeT<2}Ag7%>F}%LJ5p^J+iju&B zy=%9kR*NS@XeIX4m#FBVb^rOKeg6?x%LP`svmR^Ty*rq482Rw{-3%#wRWn8r{K&AJ z)2cum=;~4jS=joTd6rwJHXtlEo!ssvTXscO71AkX&@;sawL92f>R(w05RMt}ciz$deTyal76V_90RX(ehy1_abi^I( zoy_c=4Ak7gW>CFfL7^n~JD^&fjrs)ETprfJqu&4xu&^?KnAt*ES$7m@yx|F}9pY>XWoY#!>>0$nGM0cJK3)|B)&Wl5M3u;l5lj{k{x&TL{uQapal63N|^M66wZezB~(c{d{{t0bj6>PHf6YnY~gx{@-qp#`pO&t zVEF;}hb$N_T{KQ;gWHmd(9+?@XbK%Lxe&7cqU?OdA3hP(&>{D?(@1{`i5E>LCj25ogh~B77r5sOzi(=^pGI_BK(%-{~08|XGDMN0sw=L5dr_okN#Bu keTMs|x-i8*)c-r*Daj$iex+Y?AvWMKEHXu@5Mj0d0hRxs!T Date: Wed, 10 Apr 2024 18:25:02 +0200 Subject: [PATCH 012/147] ajout filtres donut 2 onglet data --- dashboards/app/pages/data.py | 95 ++++++++++++++++++++++++++++-------- 1 file changed, 76 insertions(+), 19 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index dd6eda4..3bee473 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -27,6 +27,15 @@ # Définition d'une fonction pour charger les données du nombre de déchets +@st.cache_data +def load_df_dict_corr_dechet_materiau(): + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" + "exploration-des-donn%C3%A9es/Exploration_visualisation/data/dict_de" + "chet_groupe_materiau.csv" + ) + + @st.cache_data def load_df_nb_dechet(): return pd.read_csv( @@ -49,6 +58,7 @@ def load_df_other(): # Appel des fonctions pour charger les données df_nb_dechet = load_df_nb_dechet() df_other = load_df_other() +df_dict_corr_dechet_materiau = load_df_dict_corr_dechet_materiau() # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) @@ -133,14 +143,18 @@ def load_df_other(): # Charte graphique MERTERRE : colors_map = { - "Plastique": "#48BEF0", - "Caoutchouc": "#364E74", - "Bois": "#673C11", "Textile": "#C384B1", "Papier": "#CAA674", "Metal": "#A0A0A0", "Verre": "#3DCE89", "Autre": "#F3B900", + "Plastique": "#48BEF0", + "Caoutchouc": "#364E74", + "Bois": "#673C11", + "Papier/Carton": "#CAA674", + "Métal": "#A0A0A0", + "Verre/Céramique": "#3DCE89", + "Autre": "#F3B900", } # Ligne 0 : Filtres géographiques @@ -267,17 +281,57 @@ def load_df_other(): # Étape 1: Création des filtres selected_annee = st.selectbox( - "Choisir une année:", options=df_other_filtre["ANNEE"].unique() + "Choisir une année:", + options=["Aucune sélection"] + list(df_other_filtre["ANNEE"].unique()), ) - # selected_type_milieu = st.selectbox('Choisir un type de milieu:', options=df_other_filtre['TYPE_MILIEU'].unique()) - # selected_type_lieu = st.selectbox('Choisir un type de lieu:', options=df_other_filtre['TYPE_LIEU'].unique()) - - # Étape 2: Filtrage du DataFrame - df_filtered = df_other_filtre[ - (df_other_filtre["ANNEE"] == selected_annee) - # & (df_other_filtre['TYPE_MILIEU'] == selected_type_milieu) - # & (df_other_filtre['TYPE_LIEU'] == selected_type_lieu) - ] + if selected_annee != "Aucune sélection": + filtered_data_milieu = df_other_filtre[ + df_other_filtre["ANNEE"] == selected_annee + ] + else: + filtered_data_milieu = df_other_filtre + + selected_type_milieu = st.selectbox( + "Choisir un type de milieu:", + options=["Aucune sélection"] + + list(filtered_data_milieu["TYPE_MILIEU"].unique()), + ) + + if selected_type_milieu != "Aucune sélection": + filtered_data_lieu = filtered_data_milieu[ + filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu + ] + else: + filtered_data_lieu = filtered_data_milieu + + selected_type_lieu = st.selectbox( + "Choisir un type de lieu:", + options=["Aucune sélection"] + list(filtered_data_lieu["TYPE_LIEU"].unique()), + ) + + if ( + selected_annee == "Aucune sélection" + and selected_type_milieu == "Aucune sélection" + and selected_type_lieu == "Aucune sélection" + ): + df_filtered = df_other_filtre + elif ( + selected_type_milieu == "Aucune sélection" + and selected_type_lieu == "Aucune sélection" + ): + df_filtered = df_other_filtre[df_other_filtre["ANNEE"] == selected_annee] + elif selected_type_lieu == "Aucune sélection": + df_filtered = df_other_filtre[ + (df_other_filtre["ANNEE"] == selected_annee) + & (df_other_filtre["TYPE_MILIEU"] == selected_type_milieu) + ] + else: + df_filtered = df_other_filtre[ + (df_other_filtre["ANNEE"] == selected_annee) + & (df_other_filtre["TYPE_MILIEU"] == selected_type_milieu) + & (df_other_filtre["TYPE_LIEU"] == selected_type_lieu) + ] + # Étape 3: Preparation dataframe pour graphe # Copie des données pour transfo df_volume2 = df_filtered.copy() @@ -375,6 +429,10 @@ def load_df_other(): ) # recuperation de ces 10 dechets dans une liste pour filtration bubble map noms_top10_dechets = df_top10_dechets.index.tolist() + # Preparation des datas pour l'onglet 3# ajout de la colonne materiau + df_top10_dechets = df_top10_dechets.merge( + df_dict_corr_dechet_materiau, on="categorie", how="left" + ) # Preparation de la figure barplot df_top10_dechets.reset_index(inplace=True) # Création du graphique en barres avec Plotly Express @@ -384,6 +442,9 @@ def load_df_other(): y="nb_dechet", labels={"categorie": "Dechet", "nb_dechet": "Nombre total"}, title="Top 10 dechets ramassés", + color="Materiau", + color_discrete_map=colors_map, + category_orders={"categorie": df_top10_dechets["categorie"].tolist()}, ) fig.update_layout(yaxis_type="log") # Amélioration du visuel du graphique @@ -398,6 +459,8 @@ def load_df_other(): uniformtext_mode="hide", xaxis_tickangle=90, ) + # Suppression de la colonne categorie + del df_top10_dechets["Materiau"] # st.markdown( # """## Quels sont les types de déchets les plus présents sur votre territoire ? @@ -602,9 +665,3 @@ def load_df_other(): width=800, height=500, uniformtext_minsize=8, uniformtext_mode="hide" ) st.plotly_chart(fig_marque, use_container_width=False) - - -# st.markdown( -# """## Quels sont les secteurs, filières et marques les plus représentés ? -# """ -# ) From 9a93107cdccacc461b9f3051b79e04149d109087 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Wed, 10 Apr 2024 20:21:10 +0200 Subject: [PATCH 013/147] ajout filtres onglet data sous onglet secteurs marques --- dashboards/app/pages/data.py | 61 +++++++++++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 3bee473..274ef55 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -563,8 +563,67 @@ def load_df_other(): df_dechet_copy = df_nb_dechet.copy() df_filtre_copy = df_other_filtre.copy() + + # Étape 1: Création des filtres + selected_annee_onglet_3 = st.selectbox( + "Choisir une année:", + options=["Aucune sélection"] + list(df_other_filtre["ANNEE"].unique()), + key="année_select", + ) + if selected_annee_onglet_3 != "Aucune sélection": + filtered_data_milieu = df_other_filtre[ + df_other_filtre["ANNEE"] == selected_annee_onglet_3 + ] + else: + filtered_data_milieu = df_other_filtre + + selected_type_milieu_onglet_3 = st.selectbox( + "Choisir un type de milieu:", + options=["Aucune sélection"] + + list(filtered_data_milieu["TYPE_MILIEU"].unique()), + key="type_milieu_select", + ) + + if selected_type_milieu_onglet_3 != "Aucune sélection": + filtered_data_lieu = filtered_data_milieu[ + filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu_onglet_3 + ] + else: + filtered_data_lieu = filtered_data_milieu + + selected_type_lieu_onglet_3 = st.selectbox( + "Choisir un type de lieu:", + options=["Aucune sélection"] + list(filtered_data_lieu["TYPE_LIEU"].unique()), + key="type_lieu_select", + ) + + if ( + selected_annee_onglet_3 == "Aucune sélection" + and selected_type_milieu_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 == "Aucune sélection" + ): + df_filtered = df_other_filtre + elif ( + selected_type_milieu_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 == "Aucune sélection" + ): + df_filtered = df_other_filtre[ + df_other_filtre["ANNEE"] == selected_annee_onglet_3 + ] + elif selected_type_lieu_onglet_3 == "Aucune sélection": + df_filtered = df_other_filtre[ + (df_other_filtre["ANNEE"] == selected_annee_onglet_3) + & (df_other_filtre["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + ] + else: + df_filtered = df_other_filtre[ + (df_other_filtre["ANNEE"] == selected_annee_onglet_3) + & (df_other_filtre["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + & (df_other_filtre["TYPE_LIEU"] == selected_type_lieu_onglet_3) + ] + # Filtration des données pour nb_dechets - df_init = pd.merge(df_dechet_copy, df_filtre_copy, on="ID_RELEVE", how="inner") + df_init = pd.merge(df_dechet_copy, df_filtered, on="ID_RELEVE", how="inner") # Data pour le plot secteur secteur_df = df_init[df_init["type_regroupement"].isin(["SECTEUR"])] From 872fba3ff76661c9900faefe6530f522813e6bdb Mon Sep 17 00:00:00 2001 From: DridrM Date: Wed, 10 Apr 2024 20:32:36 +0200 Subject: [PATCH 014/147] Create a hotspot functions module and update the requirements.txt --- dashboards/app/pages/hotspots.py | 10 +- .../app/pages/hotspots_functions/__init__.py | 0 .../app/pages/hotspots_functions/maps.py | 94 ++++++ .../app/pages/hotspots_functions/utils.py | 25 ++ dashboards/app/requirements.txt | 2 + poetry.lock | 272 +++++++++++++++++- pyproject.toml | 2 + 7 files changed, 399 insertions(+), 6 deletions(-) create mode 100644 dashboards/app/pages/hotspots_functions/__init__.py create mode 100644 dashboards/app/pages/hotspots_functions/maps.py create mode 100644 dashboards/app/pages/hotspots_functions/utils.py diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index adf133d..02f4fb2 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -3,6 +3,8 @@ import pandas as pd import duckdb +from hotspots_functions.maps import plot_adopted_waste_spots + st.markdown( """# 🔥 Hotspots *Quelles sont les zones les plus impactées ?* @@ -36,14 +38,12 @@ ) ).to_df() -# st.bar_chart(data=res_aggCategory_filGroup, x="categorie", y="total_dechet") - st.altair_chart( alt.Chart(res_aggCategory_filGroup) .mark_bar() .encode( - x=alt.X("categorie", sort=None, title=""), - y=alt.Y("total_dechet", title="Total de déchet"), + x=alt.X("categorie", sort = None, title = ""), + y=alt.Y("total_dechet", title = "Total de déchet"), ), - use_container_width=True, + use_container_width = True, ) diff --git a/dashboards/app/pages/hotspots_functions/__init__.py b/dashboards/app/pages/hotspots_functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dashboards/app/pages/hotspots_functions/maps.py b/dashboards/app/pages/hotspots_functions/maps.py new file mode 100644 index 0000000..e3a5e35 --- /dev/null +++ b/dashboards/app/pages/hotspots_functions/maps.py @@ -0,0 +1,94 @@ +import pandas as pd +import geopandas as gpd +import folium +from folium.plugins import MarkerCluster + +from utils import construct_query_string + +def plot_adopted_waste_spots(data_zds: pd.DataFrame, + filter_dict: dict, + region_geojson_path: str, + ) -> folium.Map: + """Show a folium innteractive map of adopted spots within a selected region, + filtered by environments of deposit. + Arguments: + - data_zds: The waste dataframe + - filter_dict: dictionary mapping the name of the column in the waste df and the value you want to filter by + """ + #################################### + # 1/ Create the waste geodataframe # + #################################### + + # Create a GeoDataFrame for waste points + gdf = gpd.GeoDataFrame( + data_zds, + geometry = gpd.points_from_xy(data_zds["LIEU_COORD_GPS_X"], data_zds["LIEU_COORD_GPS_Y"]), + crs = "EPSG:4326" + ) + + # Construct the query string + query_string = construct_query_string(**filter_dict) + + # Filter the geodataframe by region and by environment + gdf_filtered = gdf.query(query_string) + + ###################################### + # 2/ Create the regions geodataframe # + ###################################### + + # Unpack the region name + region = filter_dict["REGION"] + + # Load France regions from a GeoJSON file + regions = gpd.read_file(region_geojson_path) + regions = regions.loc[regions["nom"] == region, :] + + # Filter the region geodataframe for the specified region + selected_region = regions[regions["nom"].str.lower() == region.lower()] + if selected_region.empty: + raise KeyError(f"Region '{region}' not found.") + + ############################ + # 3/ Initialize folium map # + ############################ + + # Initialize a folium map, centered around the mean location of the waste points + map_center = [gdf_filtered.geometry.y.mean(), gdf_filtered.geometry.x.mean()] + m = folium.Map(location = map_center, zoom_start = 5) # Adjust zoom_start as needed for the best initial view + + ###################### + # 4/ Add the markers # + ###################### + + # Use MarkerCluster to manage markers if dealing with a large number of points + marker_cluster = MarkerCluster().add_to(m) + + # Add each waste point as a marker on the folium map + for _, row in gdf_filtered.iterrows(): + # Define the marker color: green for adopted spots, red for others + marker_color = 'darkgreen' if row['SPOT_A1S'] else 'red' + # Define the icon: check-circle for adopted, info-sign for others + icon_type = 'check-circle' if row['SPOT_A1S'] else 'info-sign' + + folium.Marker( + location = [row.geometry.y, row.geometry.x], + popup = f"Zone: {row['NOM_ZONE']}
Date: {row['DATE']}
Volume: {row['VOLUME_TOTAL']} litres", + icon = folium.Icon(color = marker_color, icon = icon_type, prefix = 'fa') + ).add_to(marker_cluster) + + ############################## + # 5/ Add the region boundary # + ############################## + + # Add the region boundary to the map for context + folium.GeoJson( + selected_region, + name = "Region Boundary", + style_function = lambda feature: { + 'weight': 2, + 'fillOpacity': 0.1, + } + ).add_to(m) + + # Return the map + return m diff --git a/dashboards/app/pages/hotspots_functions/utils.py b/dashboards/app/pages/hotspots_functions/utils.py new file mode 100644 index 0000000..b82de7a --- /dev/null +++ b/dashboards/app/pages/hotspots_functions/utils.py @@ -0,0 +1,25 @@ +# Imports here + + +def construct_query_string(bound_word = " and ", + **params + ) -> str: + """Construct a query string in the right format for the pandas 'query' + function. The different params are bounded together in the query string with the + bound word given by default. If one of the params is 'None', it is not + included in the final query string.""" + + # Instanciate query string + query_string = "" + + # Iterate over the params to construct the query string + for param_key, param in params.items(): + # Construct the param sub string if the param is not 'None' + if param: + query_sub_string = f"{param_key} == '{param}'" + + # Add to the query string + query_string += f"{query_sub_string}{bound_word}" + + # Strip any remaining " and " at the end of the query string + return query_string.strip(bound_word) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index 28dbd01..a69bed9 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -1,3 +1,5 @@ pandas==2.0.3 +geopandas==0.14.3 +folium==0.16.0 duckdb==0.10.0 streamlit==1.32.2 diff --git a/poetry.lock b/poetry.lock index 9bcff55..ab76f30 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,5 +1,25 @@ # This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand. +[[package]] +name = "attrs" +version = "23.2.0" +description = "Classes Without Boilerplate" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, + {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] +tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] + [[package]] name = "blinker" version = "1.7.0" @@ -12,6 +32,21 @@ files = [ {file = "blinker-1.7.0.tar.gz", hash = "sha256:e6820ff6fa4e4d1d8e2747c2283749c3f547e4fee112b98555cdcdae32996182"}, ] +[[package]] +name = "branca" +version = "0.7.1" +description = "Generate complex HTML+JS pages with Python" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "branca-0.7.1-py3-none-any.whl", hash = "sha256:70515944ed2d1ed2784c552508df58037ca19402a8a1069d57f9113e3e012f51"}, + {file = "branca-0.7.1.tar.gz", hash = "sha256:e6b6f37a37bc0abffd960c68c045a7fe025d628eff87fedf6ab6ca814812110c"}, +] + +[package.dependencies] +jinja2 = ">=3" + [[package]] name = "cachetools" version = "5.3.2" @@ -175,6 +210,42 @@ files = [ [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} +[[package]] +name = "click-plugins" +version = "1.1.1" +description = "An extension module for click to enable registering CLI commands via setuptools entry-points." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "click-plugins-1.1.1.tar.gz", hash = "sha256:46ab999744a9d831159c3411bb0c79346d94a444df9a3a3742e9ed63645f264b"}, + {file = "click_plugins-1.1.1-py2.py3-none-any.whl", hash = "sha256:5d262006d3222f5057fd81e1623d4443e41dcda5dc815c06b442aa3c02889fc8"}, +] + +[package.dependencies] +click = ">=4.0" + +[package.extras] +dev = ["coveralls", "pytest (>=3.6)", "pytest-cov", "wheel"] + +[[package]] +name = "cligj" +version = "0.7.2" +description = "Click params for commmand line interfaces to GeoJSON" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, <4" +files = [ + {file = "cligj-0.7.2-py3-none-any.whl", hash = "sha256:c1ca117dbce1fe20a5809dc96f01e1c2840f6dcc939b3ddbb1111bf330ba82df"}, + {file = "cligj-0.7.2.tar.gz", hash = "sha256:a4bc13d623356b373c2c27c53dbd9c68cae5d526270bfa71f6c6fa69669c6b27"}, +] + +[package.dependencies] +click = ">=4.0" + +[package.extras] +test = ["pytest-cov"] + [[package]] name = "colorama" version = "0.4.6" @@ -358,6 +429,54 @@ docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1 testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] typing = ["typing-extensions (>=4.8)"] +[[package]] +name = "fiona" +version = "1.9.6" +description = "Fiona reads and writes spatial data files" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "fiona-1.9.6-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:63e528b5ea3d8b1038d788e7c65117835c787ba7fdc94b1b42f09c2cbc0aaff2"}, + {file = "fiona-1.9.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:918bd27d8625416672e834593970f96dff63215108f81efb876fe5c0bc58a3b4"}, + {file = "fiona-1.9.6-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:e313210b30d09ed8f829bf625599e248dadd78622728030221f6526580ff26c5"}, + {file = "fiona-1.9.6-cp310-cp310-win_amd64.whl", hash = "sha256:89095c2d542325ee45894b8837e8048cdbb2f22274934e1be3b673ca628010d7"}, + {file = "fiona-1.9.6-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:98cea6f435843b2119731c6b0470e5b7386aa16b6aa7edabbf1ed93aefe029c3"}, + {file = "fiona-1.9.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f4230eccbd896a79d1ebfa551d84bf90f512f7bcbe1ca61e3f82231321f1a532"}, + {file = "fiona-1.9.6-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:48b6218224e96de5e36b5eb259f37160092260e5de0dcd82ca200b1887aa9884"}, + {file = "fiona-1.9.6-cp311-cp311-win_amd64.whl", hash = "sha256:c1dd5fbc29b7303bb87eb683455e8451e1a53bb8faf20ef97fdcd843c9e4a7f6"}, + {file = "fiona-1.9.6-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:42d8a0e5570948d3821c493b6141866d9a4d7a64edad2be4ecbb89f81904baac"}, + {file = "fiona-1.9.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39819fb8f5ec6d9971cb01b912b4431615a3d3f50c83798565d8ce41917930db"}, + {file = "fiona-1.9.6-cp312-cp312-manylinux2014_x86_64.whl", hash = "sha256:9b53034efdf93ada9295b081e6a8280af7c75496a20df82d4c2ca46d65b85905"}, + {file = "fiona-1.9.6-cp312-cp312-win_amd64.whl", hash = "sha256:1dcd6eca7524535baf2a39d7981b4a46d33ae28c313934a7c3eae62eecf9dfa5"}, + {file = "fiona-1.9.6-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:e5404ed08c711489abcb3a50a184816825b8af06eb73ad2a99e18b8e7b47c96a"}, + {file = "fiona-1.9.6-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:53bedd2989e255df1bf3378ae9c06d6d241ec273c280c544bb44ffffebb97fb0"}, + {file = "fiona-1.9.6-cp37-cp37m-win_amd64.whl", hash = "sha256:77653a08564a44e634c44cd74a068d2f55d1d4029edd16d1c8aadcc4d8cc1d2c"}, + {file = "fiona-1.9.6-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:e7617563b36d2be99f048f0d0054b4d765f4aae454398f88f19de9c2c324b7f8"}, + {file = "fiona-1.9.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:50037c3b7a5f6f434b562b5b1a5b664f1caa7a4383b00af23cdb59bfc6ba852c"}, + {file = "fiona-1.9.6-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:bf51846ad602757bf27876f458c5c9f14b09421fac612f64273cc4e3fcabc441"}, + {file = "fiona-1.9.6-cp38-cp38-win_amd64.whl", hash = "sha256:11af1afc1255642a7787fe112c29d01f968f1053e4d4700fc6f3bb879c1622e0"}, + {file = "fiona-1.9.6-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:52e8fec650b72fc5253d8f86b63859acc687182281c29bfacd3930496cf982d1"}, + {file = "fiona-1.9.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c9b92aa1badb2773e7cac19bef3064d73e9d80c67c42f0928db2520a04be6f2f"}, + {file = "fiona-1.9.6-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:0eaffbf3bfae9960484c0c08ea461b0c40e111497f04e9475ebf15ac7a22d9dc"}, + {file = "fiona-1.9.6-cp39-cp39-win_amd64.whl", hash = "sha256:f1b49d51a744874608b689f029766aa1e078dd72e94b44cf8eeef6d7bd2e9051"}, + {file = "fiona-1.9.6.tar.gz", hash = "sha256:791b3494f8b218c06ea56f892bd6ba893dfa23525347761d066fb7738acda3b1"}, +] + +[package.dependencies] +attrs = ">=19.2.0" +certifi = "*" +click = ">=8.0,<9.0" +click-plugins = ">=1.0" +cligj = ">=0.5" +six = "*" + +[package.extras] +all = ["fiona[calc,s3,test]"] +calc = ["shapely"] +s3 = ["boto3 (>=1.3.1)"] +test = ["fiona[s3]", "pytest (>=7)", "pytest-cov", "pytz"] + [[package]] name = "flask" version = "3.0.2" @@ -381,6 +500,47 @@ Werkzeug = ">=3.0.0" async = ["asgiref (>=3.2)"] dotenv = ["python-dotenv"] +[[package]] +name = "folium" +version = "0.16.0" +description = "Make beautiful maps with Leaflet.js & Python" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "folium-0.16.0-py2.py3-none-any.whl", hash = "sha256:ba72505db18bef995c880da19457d2b10c931db8059af5f6ccec9310d262b584"}, + {file = "folium-0.16.0.tar.gz", hash = "sha256:2585ee9253dc758d3a365534caa6fb5fa0c244646db4dc5819afc67bbd4daabb"}, +] + +[package.dependencies] +branca = ">=0.6.0" +jinja2 = ">=2.9" +numpy = "*" +requests = "*" +xyzservices = "*" + +[package.extras] +testing = ["pytest"] + +[[package]] +name = "geopandas" +version = "0.14.3" +description = "Geographic pandas extensions" +category = "main" +optional = false +python-versions = ">=3.9" +files = [ + {file = "geopandas-0.14.3-py3-none-any.whl", hash = "sha256:41b31ad39e21bc9e8c4254f78f8dc4ce3d33d144e22e630a00bb336c83160204"}, + {file = "geopandas-0.14.3.tar.gz", hash = "sha256:748af035d4a068a4ae00cab384acb61d387685c833b0022e0729aa45216b23ac"}, +] + +[package.dependencies] +fiona = ">=1.8.21" +packaging = "*" +pandas = ">=1.4.0" +pyproj = ">=3.3.0" +shapely = ">=1.8.0" + [[package]] name = "identify" version = "2.5.33" @@ -766,6 +926,46 @@ nodeenv = ">=0.11.1" pyyaml = ">=5.1" virtualenv = ">=20.10.0" +[[package]] +name = "pyproj" +version = "3.6.1" +description = "Python interface to PROJ (cartographic projections and coordinate transformations library)" +category = "main" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pyproj-3.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ab7aa4d9ff3c3acf60d4b285ccec134167a948df02347585fdd934ebad8811b4"}, + {file = "pyproj-3.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4bc0472302919e59114aa140fd7213c2370d848a7249d09704f10f5b062031fe"}, + {file = "pyproj-3.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5279586013b8d6582e22b6f9e30c49796966770389a9d5b85e25a4223286cd3f"}, + {file = "pyproj-3.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fafd1f3eb421694857f254a9bdbacd1eb22fc6c24ca74b136679f376f97d35"}, + {file = "pyproj-3.6.1-cp310-cp310-win32.whl", hash = "sha256:c41e80ddee130450dcb8829af7118f1ab69eaf8169c4bf0ee8d52b72f098dc2f"}, + {file = "pyproj-3.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:db3aedd458e7f7f21d8176f0a1d924f1ae06d725228302b872885a1c34f3119e"}, + {file = "pyproj-3.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ebfbdbd0936e178091309f6cd4fcb4decd9eab12aa513cdd9add89efa3ec2882"}, + {file = "pyproj-3.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:447db19c7efad70ff161e5e46a54ab9cc2399acebb656b6ccf63e4bc4a04b97a"}, + {file = "pyproj-3.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7e13c40183884ec7f94eb8e0f622f08f1d5716150b8d7a134de48c6110fee85"}, + {file = "pyproj-3.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65ad699e0c830e2b8565afe42bd58cc972b47d829b2e0e48ad9638386d994915"}, + {file = "pyproj-3.6.1-cp311-cp311-win32.whl", hash = "sha256:8b8acc31fb8702c54625f4d5a2a6543557bec3c28a0ef638778b7ab1d1772132"}, + {file = "pyproj-3.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:38a3361941eb72b82bd9a18f60c78b0df8408416f9340521df442cebfc4306e2"}, + {file = "pyproj-3.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1e9fbaf920f0f9b4ee62aab832be3ae3968f33f24e2e3f7fbb8c6728ef1d9746"}, + {file = "pyproj-3.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6d227a865356f225591b6732430b1d1781e946893789a609bb34f59d09b8b0f8"}, + {file = "pyproj-3.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83039e5ae04e5afc974f7d25ee0870a80a6bd6b7957c3aca5613ccbe0d3e72bf"}, + {file = "pyproj-3.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb059ba3bced6f6725961ba758649261d85ed6ce670d3e3b0a26e81cf1aa8d"}, + {file = "pyproj-3.6.1-cp312-cp312-win32.whl", hash = "sha256:2d6ff73cc6dbbce3766b6c0bce70ce070193105d8de17aa2470009463682a8eb"}, + {file = "pyproj-3.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:7a27151ddad8e1439ba70c9b4b2b617b290c39395fa9ddb7411ebb0eb86d6fb0"}, + {file = "pyproj-3.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4ba1f9b03d04d8cab24d6375609070580a26ce76eaed54631f03bab00a9c737b"}, + {file = "pyproj-3.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18faa54a3ca475bfe6255156f2f2874e9a1c8917b0004eee9f664b86ccc513d3"}, + {file = "pyproj-3.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd43bd9a9b9239805f406fd82ba6b106bf4838d9ef37c167d3ed70383943ade1"}, + {file = "pyproj-3.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50100b2726a3ca946906cbaa789dd0749f213abf0cbb877e6de72ca7aa50e1ae"}, + {file = "pyproj-3.6.1-cp39-cp39-win32.whl", hash = "sha256:9274880263256f6292ff644ca92c46d96aa7e57a75c6df3f11d636ce845a1877"}, + {file = "pyproj-3.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:36b64c2cb6ea1cc091f329c5bd34f9c01bb5da8c8e4492c709bda6a09f96808f"}, + {file = "pyproj-3.6.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd93c1a0c6c4aedc77c0fe275a9f2aba4d59b8acf88cebfc19fe3c430cfabf4f"}, + {file = "pyproj-3.6.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6420ea8e7d2a88cb148b124429fba8cd2e0fae700a2d96eab7083c0928a85110"}, + {file = "pyproj-3.6.1.tar.gz", hash = "sha256:44aa7c704c2b7d8fb3d483bbf75af6cb2350d30a63b144279a09b75fead501bf"}, +] + +[package.dependencies] +certifi = "*" + [[package]] name = "pyproject-api" version = "1.6.1" @@ -951,6 +1151,64 @@ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] +[[package]] +name = "shapely" +version = "2.0.3" +description = "Manipulation and analysis of geometric objects" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shapely-2.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:af7e9abe180b189431b0f490638281b43b84a33a960620e6b2e8d3e3458b61a1"}, + {file = "shapely-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:98040462b36ced9671e266b95c326b97f41290d9d17504a1ee4dc313a7667b9c"}, + {file = "shapely-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:71eb736ef2843f23473c6e37f6180f90f0a35d740ab284321548edf4e55d9a52"}, + {file = "shapely-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:881eb9dbbb4a6419667e91fcb20313bfc1e67f53dbb392c6840ff04793571ed1"}, + {file = "shapely-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f10d2ccf0554fc0e39fad5886c839e47e207f99fdf09547bc687a2330efda35b"}, + {file = "shapely-2.0.3-cp310-cp310-win32.whl", hash = "sha256:6dfdc077a6fcaf74d3eab23a1ace5abc50c8bce56ac7747d25eab582c5a2990e"}, + {file = "shapely-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:64c5013dacd2d81b3bb12672098a0b2795c1bf8190cfc2980e380f5ef9d9e4d9"}, + {file = "shapely-2.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:56cee3e4e8159d6f2ce32e421445b8e23154fd02a0ac271d6a6c0b266a8e3cce"}, + {file = "shapely-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:619232c8276fded09527d2a9fd91a7885ff95c0ff9ecd5e3cb1e34fbb676e2ae"}, + {file = "shapely-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2a7d256db6f5b4b407dc0c98dd1b2fcf1c9c5814af9416e5498d0a2e4307a4b"}, + {file = "shapely-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45f0c8cd4583647db3216d965d49363e6548c300c23fd7e57ce17a03f824034"}, + {file = "shapely-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13cb37d3826972a82748a450328fe02a931dcaed10e69a4d83cc20ba021bc85f"}, + {file = "shapely-2.0.3-cp311-cp311-win32.whl", hash = "sha256:9302d7011e3e376d25acd30d2d9e70d315d93f03cc748784af19b00988fc30b1"}, + {file = "shapely-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6b464f2666b13902835f201f50e835f2f153f37741db88f68c7f3b932d3505fa"}, + {file = "shapely-2.0.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e86e7cb8e331a4850e0c2a8b2d66dc08d7a7b301b8d1d34a13060e3a5b4b3b55"}, + {file = "shapely-2.0.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c91981c99ade980fc49e41a544629751a0ccd769f39794ae913e53b07b2f78b9"}, + {file = "shapely-2.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bd45d456983dc60a42c4db437496d3f08a4201fbf662b69779f535eb969660af"}, + {file = "shapely-2.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:882fb1ffc7577e88c1194f4f1757e277dc484ba096a3b94844319873d14b0f2d"}, + {file = "shapely-2.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9f2d93bff2ea52fa93245798cddb479766a18510ea9b93a4fb9755c79474889"}, + {file = "shapely-2.0.3-cp312-cp312-win32.whl", hash = "sha256:99abad1fd1303b35d991703432c9481e3242b7b3a393c186cfb02373bf604004"}, + {file = "shapely-2.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:6f555fe3304a1f40398977789bc4fe3c28a11173196df9ece1e15c5bc75a48db"}, + {file = "shapely-2.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a983cc418c1fa160b7d797cfef0e0c9f8c6d5871e83eae2c5793fce6a837fad9"}, + {file = "shapely-2.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18bddb8c327f392189a8d5d6b9a858945722d0bb95ccbd6a077b8e8fc4c7890d"}, + {file = "shapely-2.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:442f4dcf1eb58c5a4e3428d88e988ae153f97ab69a9f24e07bf4af8038536325"}, + {file = "shapely-2.0.3-cp37-cp37m-win32.whl", hash = "sha256:31a40b6e3ab00a4fd3a1d44efb2482278642572b8e0451abdc8e0634b787173e"}, + {file = "shapely-2.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:59b16976c2473fec85ce65cc9239bef97d4205ab3acead4e6cdcc72aee535679"}, + {file = "shapely-2.0.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:705efbce1950a31a55b1daa9c6ae1c34f1296de71ca8427974ec2f27d57554e3"}, + {file = "shapely-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:601c5c0058a6192df704cb889439f64994708563f57f99574798721e9777a44b"}, + {file = "shapely-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f24ecbb90a45c962b3b60d8d9a387272ed50dc010bfe605f1d16dfc94772d8a1"}, + {file = "shapely-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8c2a2989222c6062f7a0656e16276c01bb308bc7e5d999e54bf4e294ce62e76"}, + {file = "shapely-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42bceb9bceb3710a774ce04908fda0f28b291323da2688f928b3f213373b5aee"}, + {file = "shapely-2.0.3-cp38-cp38-win32.whl", hash = "sha256:54d925c9a311e4d109ec25f6a54a8bd92cc03481a34ae1a6a92c1fe6729b7e01"}, + {file = "shapely-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:300d203b480a4589adefff4c4af0b13919cd6d760ba3cbb1e56275210f96f654"}, + {file = "shapely-2.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:083d026e97b6c1f4a9bd2a9171c7692461092ed5375218170d91705550eecfd5"}, + {file = "shapely-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:27b6e1910094d93e9627f2664121e0e35613262fc037051680a08270f6058daf"}, + {file = "shapely-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:71b2de56a9e8c0e5920ae5ddb23b923490557ac50cb0b7fa752761bf4851acde"}, + {file = "shapely-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d279e56bbb68d218d63f3efc80c819cedcceef0e64efbf058a1df89dc57201b"}, + {file = "shapely-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88566d01a30f0453f7d038db46bc83ce125e38e47c5f6bfd4c9c287010e9bf74"}, + {file = "shapely-2.0.3-cp39-cp39-win32.whl", hash = "sha256:58afbba12c42c6ed44c4270bc0e22f3dadff5656d711b0ad335c315e02d04707"}, + {file = "shapely-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:5026b30433a70911979d390009261b8c4021ff87c7c3cbd825e62bb2ffa181bc"}, + {file = "shapely-2.0.3.tar.gz", hash = "sha256:4d65d0aa7910af71efa72fd6447e02a8e5dd44da81a983de9d736d6e6ccbe674"}, +] + +[package.dependencies] +numpy = ">=1.14,<2" + +[package.extras] +docs = ["matplotlib", "numpydoc (>=1.1.0,<1.2.0)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] +test = ["pytest", "pytest-cov"] + [[package]] name = "six" version = "1.16.0" @@ -1099,6 +1357,18 @@ MarkupSafe = ">=2.1.1" [package.extras] watchdog = ["watchdog (>=2.3)"] +[[package]] +name = "xyzservices" +version = "2024.4.0" +description = "Source of XYZ tiles providers" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "xyzservices-2024.4.0-py3-none-any.whl", hash = "sha256:b83e48c5b776c9969fffcfff57b03d02b1b1cd6607a9d9c4e7f568b01ef47f4c"}, + {file = "xyzservices-2024.4.0.tar.gz", hash = "sha256:6a04f11487a6fb77d92a98984cd107fbd9157fd5e65f929add9c3d6e604ee88c"}, +] + [[package]] name = "zipp" version = "3.18.1" @@ -1118,4 +1388,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "a604d3b769ffc5079bf789d1557a112f77a5fbf91071732ab27de41caf356da8" +content-hash = "1bbdf34eb8993f9ad610eb20a865992d33882e65b06c410b2b93c7d7b2f62551" diff --git a/pyproject.toml b/pyproject.toml index b852b15..198437e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,6 +19,8 @@ python = "^3.10" pandas = "^2.2.1" dash = "^2.16.1" duckdb = "^0.10.1" +geopandas = "^0.14.3" +folium = "^0.16.0" [tool.poetry.group.dev.dependencies] pre-commit = "^2.20.0" From 2cef0e04f2eaa328b943872d1904804c37811769 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Wed, 10 Apr 2024 21:07:04 +0200 Subject: [PATCH 015/147] ajout de containers pour esthetique --- dashboards/app/pages/data.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 274ef55..9f038fb 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -190,8 +190,11 @@ def load_df_other(): cell3.metric("Nombre de collectes réalisées", f"{nb_collectes}") # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux + l2_col1, l2_col2 = st.columns(2) - with l2_col1: + cell4 = l2_col1.container(border=True) + cell5 = l2_col2.container(border=True) + with cell4: # Création du diagramme en donut en utilisant le dictionnaire de couleurs pour la correspondance fig = px.pie( @@ -211,7 +214,7 @@ def load_df_other(): # Affichage du graphique st.plotly_chart(fig, use_container_width=True) - with l2_col2: + with cell5: # Création du graphique en barres avec Plotly Express fig2 = px.bar( df_totals_sorted, @@ -272,7 +275,8 @@ def load_df_other(): fig3.update_layout(yaxis_title="% du volume collecté", xaxis_title=None) # Afficher le graphique - st.plotly_chart(fig3, use_container_width=True) + with st.container(border=True): + st.plotly_chart(fig3, use_container_width=True) st.divider() @@ -378,7 +382,8 @@ def load_df_other(): # Amélioration de l'affichage fig4.update_traces(textinfo="percent") fig4.update_layout(autosize=True, legend_title_text="Matériau") - st.plotly_chart(fig4, use_container_width=True) + with st.container(border=True): + st.plotly_chart(fig4, use_container_width=True) else: st.write("Aucune donnée à afficher pour les filtres sélectionnés.") @@ -489,14 +494,14 @@ def load_df_other(): # use_container_width=True, # ) - with st.container(): + with st.container(border=True): col1, col2 = st.columns([3, 1]) with col1: st.plotly_chart(fig, use_container_width=True) with col2: - st.write("Métriques des déchets") # Titre pour les cartes + st.write("Nombre ramassé pour chaque déchet") for index, row in df_top10_dechets.iterrows(): value = f"{row['nb_dechet']:,.0f}".replace(",", " ") st.metric(label=row["categorie"], value=value) @@ -686,8 +691,8 @@ def load_df_other(): fig_secteur.update_layout( width=800, height=500, uniformtext_minsize=8, uniformtext_mode="hide" ) - - st.plotly_chart(fig_secteur, use_container_width=False) + with st.container(border=True): + st.plotly_chart(fig_secteur, use_container_width=True) l1_col1, l1_col2 = st.columns(2) cell1 = l1_col1.container(border=True) @@ -723,4 +728,6 @@ def load_df_other(): fig_marque.update_layout( width=800, height=500, uniformtext_minsize=8, uniformtext_mode="hide" ) - st.plotly_chart(fig_marque, use_container_width=False) + + with st.container(border=True): + st.plotly_chart(fig_marque, use_container_width=True) From b87ee5fcbfcc80772620db1ec8034dadac726b71 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Thu, 11 Apr 2024 11:25:28 +0200 Subject: [PATCH 016/147] =?UTF-8?q?tg=20-=20filtres=20remont=C3=A9s=20sur?= =?UTF-8?q?=20home.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/home.py | 77 ++++++++++++++++++++- dashboards/app/pages/data.py | 128 +++++++++++++++++------------------ 2 files changed, 138 insertions(+), 67 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 3fd4b7b..713eed5 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -1,3 +1,4 @@ +import pandas as pd import streamlit as st st.markdown( @@ -8,4 +9,78 @@ ) st.markdown("""# À propos""") -st.image("media/ZDS-logo.png") + + +# Chargement des données géographiques pour le filtre : une seule fois à l'arrivée +@st.cache_data +def load_df_other() -> pd.DataFrame: + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv", + ) + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE + # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] + return df + + +# Appel des fonctions pour charger les données + +df_other = load_df_other() + + +# Création du filtre par niveau géographique : correspondance labels et variables du dataframe +niveaux_admin_dict = { + "Région": "REGION", + "Département": "DEP_CODE_NOM", + "EPCI": "LIBEPCI", + "Commune": "COMMUNE_CODE_NOM", +} + +# 1ère étape : sélection du niveau administratif concerné (région, dép...) +# Si déjà saisi précédemment, initialiser le filtre avec la valeur +index_admin = st.session_state.get("niveau_admin", None) +select_niveauadmin = st.selectbox( + "Niveau administratif : ", + niveaux_admin_dict.keys(), + index=1, + key="niveau_admin", +) + +if select_niveauadmin is not None: + # Extraction de la liste depuis le dataframe + liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] + liste_collectivites = liste_collectivites.sort_values().unique() + + # 2ème filtre : sélection de la collectivité concernée + index_collec = st.session_state.get("collectivite", None) + select_collectivite = st.selectbox( + "Collectivité : ", + liste_collectivites, + index=2, + key="collectivite", + ) +else: + st.caption( + "Choisissez un niveau administratif pour afficher la liste des collectivités.", + ) + +if st.button("Enregistrer la sélection"): + # Retourner le filtre validé et le nombre de relevés disponibles + filtre_niveau = st.session_state["niveau_admin"] + filtre_collectivite = st.session_state["collectivite"] + st.write(f"Vous avez sélectionné : {filtre_niveau} {filtre_collectivite}.") + + # Enregistrer le DataFrame dans un "session state" pour conserver le filtre dans les onglets + colonne_filtre = niveaux_admin_dict[filtre_niveau] + st.session_state["df_other"] = df_other[ + df_other[colonne_filtre] == filtre_collectivite + ] + + nb_releves = len(st.session_state["df_other"]) + st.write( + f"{nb_releves} relevés de collecte disponibles \ + pour l'analyse sur votre territoire.", + ) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 9f038fb..92dc529 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -17,6 +17,9 @@ # Session state session_state = st.session_state +# Récupérer les filtres géographiques s'ils ont été fixés +filtre_niveau = st.session_state.get("niveau_admin", "") +filtre_collectivite = st.session_state.get("collectivite", "") # Titre de l'onglet st.markdown( @@ -24,7 +27,7 @@ Visualisez les impacts sur les milieux naturels et secteurs/filières/marques à l’origine de cette pollution """ ) - +st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") # Définition d'une fonction pour charger les données du nombre de déchets @st.cache_data @@ -48,30 +51,28 @@ def load_df_nb_dechet(): # Définition d'une fonction pour charger les autres données @st.cache_data def load_df_other(): - return pd.read_csv( + df = pd.read_csv( "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" "sation/data/data_zds_enriched.csv" ) + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] + + return df + # Appel des fonctions pour charger les données df_nb_dechet = load_df_nb_dechet() -df_other = load_df_other() df_dict_corr_dechet_materiau = load_df_dict_corr_dechet_materiau() - -# Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) -df_other["DEP_CODE_NOM"] = df_other["DEP"] + " - " + df_other["DEPARTEMENT"] -df_other["COMMUNE_CODE_NOM"] = df_other["INSEE_COM"] + " - " + df_other["commune"] - - -# Création du filtre dynamique par niveau géographique -# df_other = df_other.rename(columns={"REGION": "Région","DEP_CODE_NOM":"Département","LIBEPCI":"EPCI","COMMUNE_CODE_NOM":"Commune" }) -# niveaux_geo = ["Région", "Département", "EPCI", "Commune"] -niveaux_geo = ["REGION", "DEP_CODE_NOM", "LIBEPCI", "COMMUNE_CODE_NOM"] -dynamic_filters = DynamicFilters(df_other, filters=niveaux_geo) -df_other_filtre = dynamic_filters.filter_df() +# Appeler le dataframe filtré depuis le session state +if "df_other" in st.session_state: + df_other = st.session_state["df_other"].copy() +else: + df_other = load_df_other() # 3 Onglets : Matériaux, Top déchets, Filières et marques @@ -112,7 +113,7 @@ def load_df_other(): cols_volume = [k for k in df_other.columns if "GLOBAL_VOLUME_" in k] # Copie des données pour transfo - df_volume = df_other_filtre.copy() + df_volume = df_other.copy() # Calcul des indicateurs clés de haut de tableau avant transformation volume_total = df_volume["VOLUME_TOTAL"].sum() @@ -161,12 +162,6 @@ def load_df_other(): # Popover cell # with st.popover("Filtres géographiques", help = "Sélectionnez le niveau géographique souhaité pour afficher les indicateurs") : - dynamic_filters.display_filters(location="sidebar") - # filtre_region = st.selectbox( - # "Région :", collectivites_dict["Région"], - # index=None - # ) - # Ligne 1 : 2 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2, l1_col3 = st.columns(3) @@ -286,14 +281,12 @@ def load_df_other(): # Étape 1: Création des filtres selected_annee = st.selectbox( "Choisir une année:", - options=["Aucune sélection"] + list(df_other_filtre["ANNEE"].unique()), + options=["Aucune sélection"] + list(df_other["ANNEE"].unique()), ) if selected_annee != "Aucune sélection": - filtered_data_milieu = df_other_filtre[ - df_other_filtre["ANNEE"] == selected_annee - ] + filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee] else: - filtered_data_milieu = df_other_filtre + filtered_data_milieu = df_other selected_type_milieu = st.selectbox( "Choisir un type de milieu:", @@ -318,22 +311,22 @@ def load_df_other(): and selected_type_milieu == "Aucune sélection" and selected_type_lieu == "Aucune sélection" ): - df_filtered = df_other_filtre + df_filtered = df_other elif ( selected_type_milieu == "Aucune sélection" and selected_type_lieu == "Aucune sélection" ): - df_filtered = df_other_filtre[df_other_filtre["ANNEE"] == selected_annee] + df_filtered = df_other[df_other["ANNEE"] == selected_annee] elif selected_type_lieu == "Aucune sélection": - df_filtered = df_other_filtre[ - (df_other_filtre["ANNEE"] == selected_annee) - & (df_other_filtre["TYPE_MILIEU"] == selected_type_milieu) + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee) + & (df_other["TYPE_MILIEU"] == selected_type_milieu) ] else: - df_filtered = df_other_filtre[ - (df_other_filtre["ANNEE"] == selected_annee) - & (df_other_filtre["TYPE_MILIEU"] == selected_type_milieu) - & (df_other_filtre["TYPE_LIEU"] == selected_type_lieu) + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee) + & (df_other["TYPE_MILIEU"] == selected_type_milieu) + & (df_other["TYPE_LIEU"] == selected_type_lieu) ] # Étape 3: Preparation dataframe pour graphe @@ -387,6 +380,25 @@ def load_df_other(): else: st.write("Aucune donnée à afficher pour les filtres sélectionnés.") + # 2ème option de graphique, à choisir + if not df_filtered.empty: + fig5 = px.treemap( + df_totals_sorted2, + path=["Matériau"], + values="Volume", + title="2ème option : treemap de répartition des matériaux en volume", + color="Matériau", + color_discrete_map=colors_map, + ) + fig5.update_layout( + margin=dict(t=50, l=25, r=25, b=25), autosize=True, height=600 + ) + fig5.update_traces(textinfo="label+value") + with st.container(border=True): + st.plotly_chart(fig5, use_container_width=True) + else: + st.write("Aucune donnée à afficher pour les filtres sélectionnés.") + # Onglet 2 : Top Déchets with tab2: @@ -420,7 +432,7 @@ def load_df_other(): # Préparation des datas pour l'onglet 2 df_top = df_nb_dechet.copy() - df_top_data_releves = df_other_filtre.copy() + df_top_data_releves = df_other.copy() # Filtration des données pour nb_dechets df_top10 = pd.merge(df_top, df_top_data_releves, on="ID_RELEVE", how="inner") # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement @@ -482,18 +494,6 @@ def load_df_other(): # ) # ).to_df() - # st.bar_chart(data=res_aggCategory_filGroup, x="categorie", y="total_dechet") - - # st.altair_chart( - # alt.Chart(res_aggCategory_filGroup) - # .mark_bar() - # .encode( - # x=alt.X("categorie", sort=None, title=""), - # y=alt.Y("total_dechet", title="Total de déchet"), - # ), - # use_container_width=True, - # ) - with st.container(border=True): col1, col2 = st.columns([3, 1]) @@ -567,20 +567,18 @@ def load_df_other(): # Préparation des données df_dechet_copy = df_nb_dechet.copy() - df_filtre_copy = df_other_filtre.copy() + df_filtre_copy = df_other.copy() # Étape 1: Création des filtres selected_annee_onglet_3 = st.selectbox( "Choisir une année:", - options=["Aucune sélection"] + list(df_other_filtre["ANNEE"].unique()), + options=["Aucune sélection"] + list(df_other["ANNEE"].unique()), key="année_select", ) if selected_annee_onglet_3 != "Aucune sélection": - filtered_data_milieu = df_other_filtre[ - df_other_filtre["ANNEE"] == selected_annee_onglet_3 - ] + filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee_onglet_3] else: - filtered_data_milieu = df_other_filtre + filtered_data_milieu = df_other selected_type_milieu_onglet_3 = st.selectbox( "Choisir un type de milieu:", @@ -607,24 +605,22 @@ def load_df_other(): and selected_type_milieu_onglet_3 == "Aucune sélection" and selected_type_lieu_onglet_3 == "Aucune sélection" ): - df_filtered = df_other_filtre + df_filtered = df_other elif ( selected_type_milieu_onglet_3 == "Aucune sélection" and selected_type_lieu_onglet_3 == "Aucune sélection" ): - df_filtered = df_other_filtre[ - df_other_filtre["ANNEE"] == selected_annee_onglet_3 - ] + df_filtered = df_other[df_other["ANNEE"] == selected_annee_onglet_3] elif selected_type_lieu_onglet_3 == "Aucune sélection": - df_filtered = df_other_filtre[ - (df_other_filtre["ANNEE"] == selected_annee_onglet_3) - & (df_other_filtre["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee_onglet_3) + & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) ] else: - df_filtered = df_other_filtre[ - (df_other_filtre["ANNEE"] == selected_annee_onglet_3) - & (df_other_filtre["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - & (df_other_filtre["TYPE_LIEU"] == selected_type_lieu_onglet_3) + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee_onglet_3) + & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) ] # Filtration des données pour nb_dechets From 6eb76deb8cf954273d306544456ff1b861ebec98 Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Fri, 12 Apr 2024 05:33:37 -0400 Subject: [PATCH 017/147] =?UTF-8?q?[kb]=20=F0=9F=94=92=EF=B8=8F=20Add=20au?= =?UTF-8?q?thentication?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/home.py | 175 +++-- dashboards/app/pages/data.py | 1289 ++++++++++++++++--------------- dashboards/app/requirements.txt | 1 + 3 files changed, 756 insertions(+), 709 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 713eed5..45f480f 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -1,5 +1,10 @@ +from pathlib import Path + import pandas as pd import streamlit as st +import streamlit_authenticator as stauth +import yaml +from yaml.loader import SafeLoader st.markdown( """ @@ -8,79 +13,109 @@ """, ) -st.markdown("""# À propos""") +# Login +p_cred = Path(".credentials.yml") +with p_cred.open() as file: + config = yaml.load(file, Loader=SafeLoader) +authenticator = stauth.Authenticate( + config["credentials"], + config["cookie"]["name"], + config["cookie"]["key"], + config["cookie"]["expiry_days"], + config["pre-authorized"], +) -# Chargement des données géographiques pour le filtre : une seule fois à l'arrivée -@st.cache_data -def load_df_other() -> pd.DataFrame: - df = pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv", - ) - # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE - # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) - df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] - df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] - return df - - -# Appel des fonctions pour charger les données - -df_other = load_df_other() - - -# Création du filtre par niveau géographique : correspondance labels et variables du dataframe -niveaux_admin_dict = { - "Région": "REGION", - "Département": "DEP_CODE_NOM", - "EPCI": "LIBEPCI", - "Commune": "COMMUNE_CODE_NOM", -} - -# 1ère étape : sélection du niveau administratif concerné (région, dép...) -# Si déjà saisi précédemment, initialiser le filtre avec la valeur -index_admin = st.session_state.get("niveau_admin", None) -select_niveauadmin = st.selectbox( - "Niveau administratif : ", - niveaux_admin_dict.keys(), - index=1, - key="niveau_admin", +authenticator.login( + fields={ + "Form name": "Connexion", + "Username": "Identifiant", + "Password": "Mot de passe", + "Login": "Connexion", + }, ) -if select_niveauadmin is not None: - # Extraction de la liste depuis le dataframe - liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] - liste_collectivites = liste_collectivites.sort_values().unique() - - # 2ème filtre : sélection de la collectivité concernée - index_collec = st.session_state.get("collectivite", None) - select_collectivite = st.selectbox( - "Collectivité : ", - liste_collectivites, - index=2, - key="collectivite", - ) -else: - st.caption( - "Choisissez un niveau administratif pour afficher la liste des collectivités.", - ) +if st.session_state["authentication_status"]: + st.markdown("""# À propos""") + + # Chargement des données géographiques pour le filtre : une seule fois à l'arrivée + @st.cache_data + def load_df_other() -> pd.DataFrame: + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv", + ) + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE + # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] + return df + + # Appel des fonctions pour charger les données + + df_other = load_df_other() -if st.button("Enregistrer la sélection"): - # Retourner le filtre validé et le nombre de relevés disponibles - filtre_niveau = st.session_state["niveau_admin"] - filtre_collectivite = st.session_state["collectivite"] - st.write(f"Vous avez sélectionné : {filtre_niveau} {filtre_collectivite}.") - - # Enregistrer le DataFrame dans un "session state" pour conserver le filtre dans les onglets - colonne_filtre = niveaux_admin_dict[filtre_niveau] - st.session_state["df_other"] = df_other[ - df_other[colonne_filtre] == filtre_collectivite - ] - - nb_releves = len(st.session_state["df_other"]) - st.write( - f"{nb_releves} relevés de collecte disponibles \ - pour l'analyse sur votre territoire.", + # TODO : Raccourcir commentaire + # Création du filtre par niveau géographique + # : correspondance labels et variables du dataframe + niveaux_admin_dict = { + "Région": "REGION", + "Département": "DEP_CODE_NOM", + "EPCI": "LIBEPCI", + "Commune": "COMMUNE_CODE_NOM", + } + + # 1ère étape : sélection du niveau administratif concerné (région, dép...) + # Si déjà saisi précédemment, initialiser le filtre avec la valeur + index_admin = st.session_state.get("niveau_admin", None) + select_niveauadmin = st.selectbox( + "Niveau administratif : ", + niveaux_admin_dict.keys(), + index=1, + key="niveau_admin", ) + + if select_niveauadmin is not None: + # Extraction de la liste depuis le dataframe + liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] + liste_collectivites = liste_collectivites.sort_values().unique() + + # 2ème filtre : sélection de la collectivité concernée + index_collec = st.session_state.get("collectivite", None) + select_collectivite = st.selectbox( + "Collectivité : ", + liste_collectivites, + index=2, + key="collectivite", + ) + else: + st.caption( + "Choisissez un niveau administratif pour afficher la liste des collectivités.", + ) + + if st.button("Enregistrer la sélection"): + # Retourner le filtre validé et le nombre de relevés disponibles + filtre_niveau = st.session_state["niveau_admin"] + filtre_collectivite = st.session_state["collectivite"] + st.write(f"Vous avez sélectionné : {filtre_niveau} {filtre_collectivite}.") + + # TODO : Raccourcir commentaire + # Enregistrer le DataFrame dans un + # "session state" pour conserver le filtre dans les onglets + colonne_filtre = niveaux_admin_dict[filtre_niveau] + st.session_state["df_other"] = df_other[ + df_other[colonne_filtre] == filtre_collectivite + ] + + nb_releves = len(st.session_state["df_other"]) + st.write( + f"{nb_releves} relevés de collecte disponibles \ + pour l'analyse sur votre territoire.", + ) + + authenticator.logout() +elif st.session_state["authentication_status"] is False: + st.error("Mauvais identifiants ou mot de passe.") +elif st.session_state["authentication_status"] is None: + st.warning("Veuillez entrer votre identifiant et mot de passe") diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 92dc529..6af96b0 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -27,703 +27,714 @@ Visualisez les impacts sur les milieux naturels et secteurs/filières/marques à l’origine de cette pollution """ ) -st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") - -# Définition d'une fonction pour charger les données du nombre de déchets -@st.cache_data -def load_df_dict_corr_dechet_materiau(): - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" - "exploration-des-donn%C3%A9es/Exploration_visualisation/data/dict_de" - "chet_groupe_materiau.csv" - ) - - -@st.cache_data -def load_df_nb_dechet(): - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_releve_nb_dechet.csv" - ) - - -# Définition d'une fonction pour charger les autres données -@st.cache_data -def load_df_other(): - df = pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv" - ) - - # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) - df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] - df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] - - return df - - -# Appel des fonctions pour charger les données -df_nb_dechet = load_df_nb_dechet() -df_dict_corr_dechet_materiau = load_df_dict_corr_dechet_materiau() - -# Appeler le dataframe filtré depuis le session state -if "df_other" in st.session_state: - df_other = st.session_state["df_other"].copy() -else: - df_other = load_df_other() - - -# 3 Onglets : Matériaux, Top déchets, Filières et marques -tab1, tab2, tab3 = st.tabs( - [ - "Matériaux :wood:", - "Top Déchets :wastebasket:", - "Secteurs et marques :womans_clothes:", - ] -) - - -milieu_lieu_dict = ( - df_other.groupby("TYPE_MILIEU")["TYPE_LIEU"] - .unique() - .apply(lambda x: x.tolist()) - .to_dict() -) - -annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) - -# Onglet 1 : Matériaux -with tab1: - - # Transformation du dataframe pour les graphiques - # Variables à conserver en ligne - cols_identifiers = [ - "ANNEE", - "TYPE_MILIEU", - "INSEE_COM", - "DEP", - "REG", - "EPCI", - "BV2022", - ] - - # variables à décroiser de la base de données correspondant aux Volume global de chaque matériau - cols_volume = [k for k in df_other.columns if "GLOBAL_VOLUME_" in k] - - # Copie des données pour transfo - df_volume = df_other.copy() - - # Calcul des indicateurs clés de haut de tableau avant transformation - volume_total = df_volume["VOLUME_TOTAL"].sum() - poids_total = df_volume["POIDS_TOTAL"].sum() - volume_total_categorise = df_volume[cols_volume].sum().sum() - pct_volume_categorise = volume_total_categorise / volume_total - nb_collectes = len(df_volume) - - # estimation du poids categorisée en utilisant pct_volume_categorise - poids_total_categorise = round(poids_total * pct_volume_categorise) - - # Dépivotage du tableau pour avoir une base de données exploitable - df_volume = df_volume.melt( - id_vars=cols_identifiers, - value_vars=cols_volume, - var_name="Matériau", - value_name="Volume", - ) - - # Nettoyer le nom du Type déchet pour le rendre plus lisible - df_volume["Matériau"] = ( - df_volume["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() - ) - - # Grouper par type de matériau pour les visualisations - df_totals_sorted = df_volume.groupby(["Matériau"], as_index=False)["Volume"].sum() - df_totals_sorted = df_totals_sorted.sort_values(["Volume"], ascending=False) - - # Charte graphique MERTERRE : - colors_map = { - "Textile": "#C384B1", - "Papier": "#CAA674", - "Metal": "#A0A0A0", - "Verre": "#3DCE89", - "Autre": "#F3B900", - "Plastique": "#48BEF0", - "Caoutchouc": "#364E74", - "Bois": "#673C11", - "Papier/Carton": "#CAA674", - "Métal": "#A0A0A0", - "Verre/Céramique": "#3DCE89", - "Autre": "#F3B900", - } - - # Ligne 0 : Filtres géographiques - # Popover cell - # with st.popover("Filtres géographiques", help = "Sélectionnez le niveau géographique souhaité pour afficher les indicateurs") : - - # Ligne 1 : 2 cellules avec les indicateurs clés en haut de page - l1_col1, l1_col2, l1_col3 = st.columns(3) - - # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - - # 1ère métrique : volume total de déchets collectés - cell1 = l1_col1.container(border=True) - # Trick pour séparer les milliers - volume_total = f"{volume_total:,.0f}".replace(",", " ") - cell1.metric("Volume de déchets collectés", f"{volume_total} litres") - - # 2ème métrique : poids - cell2 = l1_col2.container(border=True) - poids_total = f"{poids_total:,.0f}".replace(",", " ") - - cell2.metric("Poids total collecté", f"{poids_total} kg") - - # 3ème métrique : nombre de relevés - cell3 = l1_col3.container(border=True) - nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") - cell3.metric("Nombre de collectes réalisées", f"{nb_collectes}") - - # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux - - l2_col1, l2_col2 = st.columns(2) - cell4 = l2_col1.container(border=True) - cell5 = l2_col2.container(border=True) - with cell4: - - # Création du diagramme en donut en utilisant le dictionnaire de couleurs pour la correspondance - fig = px.pie( - df_totals_sorted, - values="Volume", - names="Matériau", - title="Répartition des matériaux en volume", - hole=0.4, - color="Matériau", - color_discrete_map=colors_map, +if st.session_state["authentication_status"]: + st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") + + # Définition d'une fonction pour charger les données du nombre de déchets + @st.cache_data + def load_df_dict_corr_dechet_materiau(): + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" + "exploration-des-donn%C3%A9es/Exploration_visualisation/data/dict_de" + "chet_groupe_materiau.csv" ) - # Amélioration de l'affichage - fig.update_traces(textinfo="percent") - fig.update_layout(autosize=True, legend_title_text="Matériau") - - # Affichage du graphique - st.plotly_chart(fig, use_container_width=True) - - with cell5: - # Création du graphique en barres avec Plotly Express - fig2 = px.bar( - df_totals_sorted, - x="Matériau", - y="Volume", - text="Volume", - title="Volume total par materiau (en litres)", - color="Matériau", - color_discrete_map=colors_map, + @st.cache_data + def load_df_nb_dechet(): + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_releve_nb_dechet.csv" ) - # Amélioration du graphique - fig2.update_traces(texttemplate="%{text:.2s}", textposition="outside") - fig2.update_layout( - autosize=True, - uniformtext_minsize=8, - uniformtext_mode="hide", - xaxis_tickangle=90, - showlegend=False, + # Définition d'une fonction pour charger les autres données + @st.cache_data + def load_df_other(): + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv" ) - # Affichage du graphique - st.plotly_chart(fig2, use_container_width=True) + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] - st.write("") - st.caption( - f"Note : Cette analyse se base sur les déchets qui ont pu être classés par matériau : {volume_total_categorise:.0f} Litres, soit {pct_volume_categorise:.0%} du volume total collecté." - ) - - st.divider() - - # Ligne 3 : Graphe par milieu de collecte - st.write("**Volume collecté par matériau en fonction du milieu de collecte**") + return df - # Part de volume collecté par type de milieu + # Appel des fonctions pour charger les données + df_nb_dechet = load_df_nb_dechet() + df_dict_corr_dechet_materiau = load_df_dict_corr_dechet_materiau() - # Grouper par année et type de matériau - df_typemilieu = df_volume.groupby(["TYPE_MILIEU", "Matériau"], as_index=False)[ - "Volume" - ].sum() - df_typemilieu = df_typemilieu.sort_values( - ["TYPE_MILIEU", "Volume"], ascending=False + # Appeler le dataframe filtré depuis le session state + if "df_other" in st.session_state: + df_other = st.session_state["df_other"].copy() + else: + df_other = load_df_other() + + # 3 Onglets : Matériaux, Top déchets, Filières et marques + tab1, tab2, tab3 = st.tabs( + [ + "Matériaux :wood:", + "Top Déchets :wastebasket:", + "Secteurs et marques :womans_clothes:", + ] ) - # Graphique à barre empilées du pourcentage de volume collecté par an et type de matériau - fig3 = px.histogram( - df_typemilieu, - x="TYPE_MILIEU", - y="Volume", - color="Matériau", - barnorm="percent", - title="Répartition des matériaux en fonction du milieu de collecte", - text_auto=False, - color_discrete_map=colors_map, + milieu_lieu_dict = ( + df_other.groupby("TYPE_MILIEU")["TYPE_LIEU"] + .unique() + .apply(lambda x: x.tolist()) + .to_dict() ) - fig3.update_layout(bargap=0.2) - fig3.update_layout(yaxis_title="% du volume collecté", xaxis_title=None) - - # Afficher le graphique - with st.container(border=True): - st.plotly_chart(fig3, use_container_width=True) - - st.divider() - - # Ligne 3 : Graphe par milieu , lieu et année - st.write("**Détail par milieu, lieu ou année**") - - # Étape 1: Création des filtres - selected_annee = st.selectbox( - "Choisir une année:", - options=["Aucune sélection"] + list(df_other["ANNEE"].unique()), - ) - if selected_annee != "Aucune sélection": - filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee] - else: - filtered_data_milieu = df_other + annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) - selected_type_milieu = st.selectbox( - "Choisir un type de milieu:", - options=["Aucune sélection"] - + list(filtered_data_milieu["TYPE_MILIEU"].unique()), - ) + # Onglet 1 : Matériaux + with tab1: - if selected_type_milieu != "Aucune sélection": - filtered_data_lieu = filtered_data_milieu[ - filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu + # Transformation du dataframe pour les graphiques + # Variables à conserver en ligne + cols_identifiers = [ + "ANNEE", + "TYPE_MILIEU", + "INSEE_COM", + "DEP", + "REG", + "EPCI", + "BV2022", ] - else: - filtered_data_lieu = filtered_data_milieu - selected_type_lieu = st.selectbox( - "Choisir un type de lieu:", - options=["Aucune sélection"] + list(filtered_data_lieu["TYPE_LIEU"].unique()), - ) + # variables à décroiser de la base de données correspondant aux Volume global de chaque matériau + cols_volume = [k for k in df_other.columns if "GLOBAL_VOLUME_" in k] - if ( - selected_annee == "Aucune sélection" - and selected_type_milieu == "Aucune sélection" - and selected_type_lieu == "Aucune sélection" - ): - df_filtered = df_other - elif ( - selected_type_milieu == "Aucune sélection" - and selected_type_lieu == "Aucune sélection" - ): - df_filtered = df_other[df_other["ANNEE"] == selected_annee] - elif selected_type_lieu == "Aucune sélection": - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee) - & (df_other["TYPE_MILIEU"] == selected_type_milieu) - ] - else: - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee) - & (df_other["TYPE_MILIEU"] == selected_type_milieu) - & (df_other["TYPE_LIEU"] == selected_type_lieu) - ] + # Copie des données pour transfo + df_volume = df_other.copy() - # Étape 3: Preparation dataframe pour graphe - # Copie des données pour transfo - df_volume2 = df_filtered.copy() - - # Calcul des indicateurs clés de haut de tableau avant transformation - volume2_total = df_volume2["VOLUME_TOTAL"].sum() - poids2_total = df_volume2["POIDS_TOTAL"].sum() - volume2_total_categorise = df_volume2[cols_volume].sum().sum() - pct_volume2_categorise = volume2_total_categorise / volume2_total - nb_collectes2 = len(df_volume2) - - # estimation du poids categorisée en utilisant pct_volume_categorise - poids2_total_categorise = round(poids2_total * pct_volume2_categorise) - - # Dépivotage du tableau pour avoir une base de données exploitable - df_volume2 = df_volume2.melt( - id_vars=cols_identifiers, - value_vars=cols_volume, - var_name="Matériau", - value_name="Volume", - ) + # Calcul des indicateurs clés de haut de tableau avant transformation + volume_total = df_volume["VOLUME_TOTAL"].sum() + poids_total = df_volume["POIDS_TOTAL"].sum() + volume_total_categorise = df_volume[cols_volume].sum().sum() + pct_volume_categorise = volume_total_categorise / volume_total + nb_collectes = len(df_volume) - # Nettoyer le nom du Type déchet pour le rendre plus lisible - df_volume2["Matériau"] = ( - df_volume2["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() - ) + # estimation du poids categorisée en utilisant pct_volume_categorise + poids_total_categorise = round(poids_total * pct_volume_categorise) - # Grouper par type de matériau pour les visualisations - df_totals_sorted2 = df_volume2.groupby(["Matériau"], as_index=False)["Volume"].sum() - df_totals_sorted2 = df_totals_sorted2.sort_values(["Volume"], ascending=False) - - # Étape 4: Création du Graphique - if not df_filtered.empty: - fig4 = px.pie( - df_totals_sorted2, - values="Volume", - names="Matériau", - title="Répartition des matériaux en volume", - hole=0.4, - color="Matériau", - color_discrete_map=colors_map, + # Dépivotage du tableau pour avoir une base de données exploitable + df_volume = df_volume.melt( + id_vars=cols_identifiers, + value_vars=cols_volume, + var_name="Matériau", + value_name="Volume", ) - # Amélioration de l'affichage - fig4.update_traces(textinfo="percent") - fig4.update_layout(autosize=True, legend_title_text="Matériau") - with st.container(border=True): - st.plotly_chart(fig4, use_container_width=True) - else: - st.write("Aucune donnée à afficher pour les filtres sélectionnés.") - - # 2ème option de graphique, à choisir - if not df_filtered.empty: - fig5 = px.treemap( - df_totals_sorted2, - path=["Matériau"], - values="Volume", - title="2ème option : treemap de répartition des matériaux en volume", - color="Matériau", - color_discrete_map=colors_map, + # Nettoyer le nom du Type déchet pour le rendre plus lisible + df_volume["Matériau"] = ( + df_volume["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() ) - fig5.update_layout( - margin=dict(t=50, l=25, r=25, b=25), autosize=True, height=600 - ) - fig5.update_traces(textinfo="label+value") - with st.container(border=True): - st.plotly_chart(fig5, use_container_width=True) - else: - st.write("Aucune donnée à afficher pour les filtres sélectionnés.") - - -# Onglet 2 : Top Déchets -with tab2: - - # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page - l1_col1, l1_col2, l1_col3 = st.columns(3) - # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - # 1ère métrique : volume total de déchets collectés - cell1 = l1_col1.container(border=True) - # Trick pour séparer les milliers - volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") - cell1.metric("Volume de déchets catégorisés", f"{volume_total_categorise} litres") - - # 2ème métrique : poids - cell2 = l1_col2.container(border=True) - poids_total_categorise = f"{poids_total_categorise:,.0f}".replace(",", " ") - # poids_total = f"{poids_total:,.0f}".replace(",", " ") - cell2.metric( - "Poids estimé de déchets categorisés", - f"{poids_total_categorise} kg", - ) - - # 3ème métrique : nombre de relevés - cell3 = l1_col3.container(border=True) - # nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") - cell3.metric("Nombre de collectes réalisées", f"{nb_collectes}") - - # Ligne 2 : graphique top déchets - - # Préparation des datas pour l'onglet 2 - df_top = df_nb_dechet.copy() - - df_top_data_releves = df_other.copy() - # Filtration des données pour nb_dechets - df_top10 = pd.merge(df_top, df_top_data_releves, on="ID_RELEVE", how="inner") - # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement - df_dechets_groupe = df_top10[df_top10["type_regroupement"].isin(["GROUPE"])] - # Group by 'categorie', sum 'nb_dechet', et top 10 - df_top10_dechets = ( - df_dechets_groupe.groupby("categorie") - .agg({"nb_dechet": "sum"}) - .sort_values(by="nb_dechet", ascending=False) - .head(10) - ) - # recuperation de ces 10 dechets dans une liste pour filtration bubble map - noms_top10_dechets = df_top10_dechets.index.tolist() - # Preparation des datas pour l'onglet 3# ajout de la colonne materiau - df_top10_dechets = df_top10_dechets.merge( - df_dict_corr_dechet_materiau, on="categorie", how="left" - ) - # Preparation de la figure barplot - df_top10_dechets.reset_index(inplace=True) - # Création du graphique en barres avec Plotly Express - fig = px.bar( - df_top10_dechets, - x="categorie", - y="nb_dechet", - labels={"categorie": "Dechet", "nb_dechet": "Nombre total"}, - title="Top 10 dechets ramassés", - color="Materiau", - color_discrete_map=colors_map, - category_orders={"categorie": df_top10_dechets["categorie"].tolist()}, - ) - fig.update_layout(yaxis_type="log") - # Amélioration du visuel du graphique - fig.update_traces( - # texttemplate="%{text:.2f}", - textposition="outside" - ) - fig.update_layout( - width=1400, - height=900, - uniformtext_minsize=8, - uniformtext_mode="hide", - xaxis_tickangle=90, - ) - # Suppression de la colonne categorie - del df_top10_dechets["Materiau"] - - # st.markdown( - # """## Quels sont les types de déchets les plus présents sur votre territoire ? - # """ - # ) - # res_aggCategory_filGroup = duckdb.query( - # ( - # "SELECT categorie, sum(nb_dechet) AS total_dechet " - # "FROM df_nb_dechet " - # "WHERE type_regroupement = 'GROUPE' " - # "GROUP BY categorie " - # "HAVING sum(nb_dechet) > 10000 " - # "ORDER BY total_dechet DESC;" - # ) - # ).to_df() - - with st.container(border=True): - col1, col2 = st.columns([3, 1]) - - with col1: + # Grouper par type de matériau pour les visualisations + df_totals_sorted = df_volume.groupby(["Matériau"], as_index=False)[ + "Volume" + ].sum() + df_totals_sorted = df_totals_sorted.sort_values(["Volume"], ascending=False) + + # Charte graphique MERTERRE : + colors_map = { + "Textile": "#C384B1", + "Papier": "#CAA674", + "Metal": "#A0A0A0", + "Verre": "#3DCE89", + "Autre": "#F3B900", + "Plastique": "#48BEF0", + "Caoutchouc": "#364E74", + "Bois": "#673C11", + "Papier/Carton": "#CAA674", + "Métal": "#A0A0A0", + "Verre/Céramique": "#3DCE89", + "Autre": "#F3B900", + } + + # Ligne 0 : Filtres géographiques + # Popover cell + # with st.popover("Filtres géographiques", help = "Sélectionnez le niveau géographique souhaité pour afficher les indicateurs") : + + # Ligne 1 : 2 cellules avec les indicateurs clés en haut de page + l1_col1, l1_col2, l1_col3 = st.columns(3) + + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + + # 1ère métrique : volume total de déchets collectés + cell1 = l1_col1.container(border=True) + # Trick pour séparer les milliers + volume_total = f"{volume_total:,.0f}".replace(",", " ") + cell1.metric("Volume de déchets collectés", f"{volume_total} litres") + + # 2ème métrique : poids + cell2 = l1_col2.container(border=True) + poids_total = f"{poids_total:,.0f}".replace(",", " ") + + cell2.metric("Poids total collecté", f"{poids_total} kg") + + # 3ème métrique : nombre de relevés + cell3 = l1_col3.container(border=True) + nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") + cell3.metric("Nombre de collectes réalisées", f"{nb_collectes}") + + # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux + + l2_col1, l2_col2 = st.columns(2) + cell4 = l2_col1.container(border=True) + cell5 = l2_col2.container(border=True) + with cell4: + + # Création du diagramme en donut en utilisant le dictionnaire de couleurs pour la correspondance + fig = px.pie( + df_totals_sorted, + values="Volume", + names="Matériau", + title="Répartition des matériaux en volume", + hole=0.4, + color="Matériau", + color_discrete_map=colors_map, + ) + + # Amélioration de l'affichage + fig.update_traces(textinfo="percent") + fig.update_layout(autosize=True, legend_title_text="Matériau") + + # Affichage du graphique st.plotly_chart(fig, use_container_width=True) - with col2: - st.write("Nombre ramassé pour chaque déchet") - for index, row in df_top10_dechets.iterrows(): - value = f"{row['nb_dechet']:,.0f}".replace(",", " ") - st.metric(label=row["categorie"], value=value) - - with st.container(): - # Ajout de la selectbox - selected_dechet = st.selectbox( - "Choisir un type de déchet :", noms_top10_dechets, index=0 + with cell5: + # Création du graphique en barres avec Plotly Express + fig2 = px.bar( + df_totals_sorted, + x="Matériau", + y="Volume", + text="Volume", + title="Volume total par materiau (en litres)", + color="Matériau", + color_discrete_map=colors_map, + ) + + # Amélioration du graphique + fig2.update_traces(texttemplate="%{text:.2s}", textposition="outside") + fig2.update_layout( + autosize=True, + uniformtext_minsize=8, + uniformtext_mode="hide", + xaxis_tickangle=90, + showlegend=False, + ) + + # Affichage du graphique + st.plotly_chart(fig2, use_container_width=True) + + st.write("") + st.caption( + f"Note : Cette analyse se base sur les déchets qui ont pu être classés par matériau : {volume_total_categorise:.0f} Litres, soit {pct_volume_categorise:.0%} du volume total collecté." ) - # Filtration sur le dechet top 10 sélectionné - df_top_map = df_top[df_top["categorie"] == selected_dechet] + st.divider() - # Création du DataFrame de travail pour la carte - df_map_data = pd.merge( - df_top_map, df_top_data_releves, on="ID_RELEVE", how="inner" - ) + # Ligne 3 : Graphe par milieu de collecte + st.write("**Volume collecté par matériau en fonction du milieu de collecte**") - # Création de la carte centrée autour d'une localisation - # Calcul des limites à partir de vos données - min_lat = df_map_data["LIEU_COORD_GPS_Y"].min() - max_lat = df_map_data["LIEU_COORD_GPS_Y"].max() - min_lon = df_map_data["LIEU_COORD_GPS_X"].min() - max_lon = df_map_data["LIEU_COORD_GPS_X"].max() + # Part de volume collecté par type de milieu - map_paca = folium.Map( - location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], - zoom_start=8, - tiles="OpenStreetMap", + # Grouper par année et type de matériau + df_typemilieu = df_volume.groupby(["TYPE_MILIEU", "Matériau"], as_index=False)[ + "Volume" + ].sum() + df_typemilieu = df_typemilieu.sort_values( + ["TYPE_MILIEU", "Volume"], ascending=False ) - # Facteur de normalisation pour ajuster la taille des bulles - normalisation_facteur = 1000 - - for index, row in df_map_data.iterrows(): - # Application de la normalisation - radius = row["nb_dechet"] / normalisation_facteur + # Graphique à barre empilées du pourcentage de volume collecté par an et type de matériau + fig3 = px.histogram( + df_typemilieu, + x="TYPE_MILIEU", + y="Volume", + color="Matériau", + barnorm="percent", + title="Répartition des matériaux en fonction du milieu de collecte", + text_auto=False, + color_discrete_map=colors_map, + ) - # Application d'une limite minimale pour le rayon si nécessaire - radius = max(radius, 1) + fig3.update_layout(bargap=0.2) + fig3.update_layout(yaxis_title="% du volume collecté", xaxis_title=None) - folium.CircleMarker( - location=(row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]), - radius=radius, # Utilisation du rayon ajusté - popup=f"{row['NOM_ZONE']}, {row['LIEU_VILLE']}, {row['DATE']} : {row['nb_dechet']} {selected_dechet}", - color="#3186cc", - fill=True, - fill_color="#3186cc", - ).add_to(map_paca) + # Afficher le graphique + with st.container(border=True): + st.plotly_chart(fig3, use_container_width=True) - # Affichage de la carte Folium dans Streamlit - st_folium = st.components.v1.html - st_folium( - folium.Figure().add_child(map_paca).render(), # , width=1400 - height=1000, - ) + st.divider() + # Ligne 3 : Graphe par milieu , lieu et année + st.write("**Détail par milieu, lieu ou année**") -# Onglet 3 : Secteurs et marques -with tab3: - st.write("") + # Étape 1: Création des filtres + selected_annee = st.selectbox( + "Choisir une année:", + options=["Aucune sélection"] + list(df_other["ANNEE"].unique()), + ) + if selected_annee != "Aucune sélection": + filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee] + else: + filtered_data_milieu = df_other + + selected_type_milieu = st.selectbox( + "Choisir un type de milieu:", + options=["Aucune sélection"] + + list(filtered_data_milieu["TYPE_MILIEU"].unique()), + ) - # Préparation des données - df_dechet_copy = df_nb_dechet.copy() + if selected_type_milieu != "Aucune sélection": + filtered_data_lieu = filtered_data_milieu[ + filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu + ] + else: + filtered_data_lieu = filtered_data_milieu + + selected_type_lieu = st.selectbox( + "Choisir un type de lieu:", + options=["Aucune sélection"] + + list(filtered_data_lieu["TYPE_LIEU"].unique()), + ) - df_filtre_copy = df_other.copy() + if ( + selected_annee == "Aucune sélection" + and selected_type_milieu == "Aucune sélection" + and selected_type_lieu == "Aucune sélection" + ): + df_filtered = df_other + elif ( + selected_type_milieu == "Aucune sélection" + and selected_type_lieu == "Aucune sélection" + ): + df_filtered = df_other[df_other["ANNEE"] == selected_annee] + elif selected_type_lieu == "Aucune sélection": + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee) + & (df_other["TYPE_MILIEU"] == selected_type_milieu) + ] + else: + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee) + & (df_other["TYPE_MILIEU"] == selected_type_milieu) + & (df_other["TYPE_LIEU"] == selected_type_lieu) + ] + + # Étape 3: Preparation dataframe pour graphe + # Copie des données pour transfo + df_volume2 = df_filtered.copy() + + # Calcul des indicateurs clés de haut de tableau avant transformation + volume2_total = df_volume2["VOLUME_TOTAL"].sum() + poids2_total = df_volume2["POIDS_TOTAL"].sum() + volume2_total_categorise = df_volume2[cols_volume].sum().sum() + pct_volume2_categorise = volume2_total_categorise / volume2_total + nb_collectes2 = len(df_volume2) + + # estimation du poids categorisée en utilisant pct_volume_categorise + poids2_total_categorise = round(poids2_total * pct_volume2_categorise) + + # Dépivotage du tableau pour avoir une base de données exploitable + df_volume2 = df_volume2.melt( + id_vars=cols_identifiers, + value_vars=cols_volume, + var_name="Matériau", + value_name="Volume", + ) - # Étape 1: Création des filtres - selected_annee_onglet_3 = st.selectbox( - "Choisir une année:", - options=["Aucune sélection"] + list(df_other["ANNEE"].unique()), - key="année_select", - ) - if selected_annee_onglet_3 != "Aucune sélection": - filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee_onglet_3] - else: - filtered_data_milieu = df_other + # Nettoyer le nom du Type déchet pour le rendre plus lisible + df_volume2["Matériau"] = ( + df_volume2["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() + ) - selected_type_milieu_onglet_3 = st.selectbox( - "Choisir un type de milieu:", - options=["Aucune sélection"] - + list(filtered_data_milieu["TYPE_MILIEU"].unique()), - key="type_milieu_select", - ) + # Grouper par type de matériau pour les visualisations + df_totals_sorted2 = df_volume2.groupby(["Matériau"], as_index=False)[ + "Volume" + ].sum() + df_totals_sorted2 = df_totals_sorted2.sort_values(["Volume"], ascending=False) + + # Étape 4: Création du Graphique + if not df_filtered.empty: + fig4 = px.pie( + df_totals_sorted2, + values="Volume", + names="Matériau", + title="Répartition des matériaux en volume", + hole=0.4, + color="Matériau", + color_discrete_map=colors_map, + ) + + # Amélioration de l'affichage + fig4.update_traces(textinfo="percent") + fig4.update_layout(autosize=True, legend_title_text="Matériau") + with st.container(border=True): + st.plotly_chart(fig4, use_container_width=True) + else: + st.write("Aucune donnée à afficher pour les filtres sélectionnés.") + + # 2ème option de graphique, à choisir + if not df_filtered.empty: + fig5 = px.treemap( + df_totals_sorted2, + path=["Matériau"], + values="Volume", + title="2ème option : treemap de répartition des matériaux en volume", + color="Matériau", + color_discrete_map=colors_map, + ) + fig5.update_layout( + margin=dict(t=50, l=25, r=25, b=25), autosize=True, height=600 + ) + fig5.update_traces(textinfo="label+value") + with st.container(border=True): + st.plotly_chart(fig5, use_container_width=True) + else: + st.write("Aucune donnée à afficher pour les filtres sélectionnés.") + + # Onglet 2 : Top Déchets + with tab2: + + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page + l1_col1, l1_col2, l1_col3 = st.columns(3) + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + # 1ère métrique : volume total de déchets collectés + cell1 = l1_col1.container(border=True) + # Trick pour séparer les milliers + + volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") + cell1.metric( + "Volume de déchets catégorisés", f"{volume_total_categorise} litres" + ) - if selected_type_milieu_onglet_3 != "Aucune sélection": - filtered_data_lieu = filtered_data_milieu[ - filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu_onglet_3 - ] - else: - filtered_data_lieu = filtered_data_milieu + # 2ème métrique : poids + cell2 = l1_col2.container(border=True) + poids_total_categorise = f"{poids_total_categorise:,.0f}".replace(",", " ") + # poids_total = f"{poids_total:,.0f}".replace(",", " ") + cell2.metric( + "Poids estimé de déchets categorisés", + f"{poids_total_categorise} kg", + ) - selected_type_lieu_onglet_3 = st.selectbox( - "Choisir un type de lieu:", - options=["Aucune sélection"] + list(filtered_data_lieu["TYPE_LIEU"].unique()), - key="type_lieu_select", - ) + # 3ème métrique : nombre de relevés + cell3 = l1_col3.container(border=True) + # nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") + cell3.metric("Nombre de collectes réalisées", f"{nb_collectes}") + + # Ligne 2 : graphique top déchets + + # Préparation des datas pour l'onglet 2 + df_top = df_nb_dechet.copy() + + df_top_data_releves = df_other.copy() + # Filtration des données pour nb_dechets + df_top10 = pd.merge(df_top, df_top_data_releves, on="ID_RELEVE", how="inner") + # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement + df_dechets_groupe = df_top10[df_top10["type_regroupement"].isin(["GROUPE"])] + # Group by 'categorie', sum 'nb_dechet', et top 10 + df_top10_dechets = ( + df_dechets_groupe.groupby("categorie") + .agg({"nb_dechet": "sum"}) + .sort_values(by="nb_dechet", ascending=False) + .head(10) + ) + # recuperation de ces 10 dechets dans une liste pour filtration bubble map + noms_top10_dechets = df_top10_dechets.index.tolist() + # Preparation des datas pour l'onglet 3# ajout de la colonne materiau + df_top10_dechets = df_top10_dechets.merge( + df_dict_corr_dechet_materiau, on="categorie", how="left" + ) + # Preparation de la figure barplot + df_top10_dechets.reset_index(inplace=True) + # Création du graphique en barres avec Plotly Express + fig = px.bar( + df_top10_dechets, + x="categorie", + y="nb_dechet", + labels={"categorie": "Dechet", "nb_dechet": "Nombre total"}, + title="Top 10 dechets ramassés", + color="Materiau", + color_discrete_map=colors_map, + category_orders={"categorie": df_top10_dechets["categorie"].tolist()}, + ) + fig.update_layout(yaxis_type="log") + # Amélioration du visuel du graphique + fig.update_traces( + # texttemplate="%{text:.2f}", + textposition="outside" + ) + fig.update_layout( + width=1400, + height=900, + uniformtext_minsize=8, + uniformtext_mode="hide", + xaxis_tickangle=90, + ) + # Suppression de la colonne categorie + del df_top10_dechets["Materiau"] + + # st.markdown( + # """## Quels sont les types de déchets les plus présents sur votre territoire ? + # """ + # ) + # res_aggCategory_filGroup = duckdb.query( + # ( + # "SELECT categorie, sum(nb_dechet) AS total_dechet " + # "FROM df_nb_dechet " + # "WHERE type_regroupement = 'GROUPE' " + # "GROUP BY categorie " + # "HAVING sum(nb_dechet) > 10000 " + # "ORDER BY total_dechet DESC;" + # ) + # ).to_df() - if ( - selected_annee_onglet_3 == "Aucune sélection" - and selected_type_milieu_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" - ): - df_filtered = df_other - elif ( - selected_type_milieu_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" - ): - df_filtered = df_other[df_other["ANNEE"] == selected_annee_onglet_3] - elif selected_type_lieu_onglet_3 == "Aucune sélection": - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - ] - else: - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) - ] + with st.container(border=True): + col1, col2 = st.columns([3, 1]) + + with col1: + st.plotly_chart(fig, use_container_width=True) + + with col2: + st.write("Nombre ramassé pour chaque déchet") + for index, row in df_top10_dechets.iterrows(): + value = f"{row['nb_dechet']:,.0f}".replace(",", " ") + st.metric(label=row["categorie"], value=value) + + with st.container(): + # Ajout de la selectbox + selected_dechet = st.selectbox( + "Choisir un type de déchet :", noms_top10_dechets, index=0 + ) + + # Filtration sur le dechet top 10 sélectionné + df_top_map = df_top[df_top["categorie"] == selected_dechet] + + # Création du DataFrame de travail pour la carte + df_map_data = pd.merge( + df_top_map, df_top_data_releves, on="ID_RELEVE", how="inner" + ) + + # Création de la carte centrée autour d'une localisation + # Calcul des limites à partir de vos données + min_lat = df_map_data["LIEU_COORD_GPS_Y"].min() + max_lat = df_map_data["LIEU_COORD_GPS_Y"].max() + min_lon = df_map_data["LIEU_COORD_GPS_X"].min() + max_lon = df_map_data["LIEU_COORD_GPS_X"].max() + + map_paca = folium.Map( + location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], + zoom_start=8, + tiles="OpenStreetMap", + ) + + # Facteur de normalisation pour ajuster la taille des bulles + normalisation_facteur = 1000 + + for index, row in df_map_data.iterrows(): + # Application de la normalisation + radius = row["nb_dechet"] / normalisation_facteur + + # Application d'une limite minimale pour le rayon si nécessaire + radius = max(radius, 1) + + folium.CircleMarker( + location=(row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]), + radius=radius, # Utilisation du rayon ajusté + popup=f"{row['NOM_ZONE']}, {row['LIEU_VILLE']}, {row['DATE']} : {row['nb_dechet']} {selected_dechet}", + color="#3186cc", + fill=True, + fill_color="#3186cc", + ).add_to(map_paca) + + # Affichage de la carte Folium dans Streamlit + st_folium = st.components.v1.html + st_folium( + folium.Figure().add_child(map_paca).render(), # , width=1400 + height=1000, + ) + + # Onglet 3 : Secteurs et marques + with tab3: + st.write("") + + # Préparation des données + df_dechet_copy = df_nb_dechet.copy() + + df_filtre_copy = df_other.copy() + + # Étape 1: Création des filtres + selected_annee_onglet_3 = st.selectbox( + "Choisir une année:", + options=["Aucune sélection"] + list(df_other["ANNEE"].unique()), + key="année_select", + ) + if selected_annee_onglet_3 != "Aucune sélection": + filtered_data_milieu = df_other[ + df_other["ANNEE"] == selected_annee_onglet_3 + ] + else: + filtered_data_milieu = df_other + + selected_type_milieu_onglet_3 = st.selectbox( + "Choisir un type de milieu:", + options=["Aucune sélection"] + + list(filtered_data_milieu["TYPE_MILIEU"].unique()), + key="type_milieu_select", + ) - # Filtration des données pour nb_dechets - df_init = pd.merge(df_dechet_copy, df_filtered, on="ID_RELEVE", how="inner") + if selected_type_milieu_onglet_3 != "Aucune sélection": + filtered_data_lieu = filtered_data_milieu[ + filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu_onglet_3 + ] + else: + filtered_data_lieu = filtered_data_milieu + + selected_type_lieu_onglet_3 = st.selectbox( + "Choisir un type de lieu:", + options=["Aucune sélection"] + + list(filtered_data_lieu["TYPE_LIEU"].unique()), + key="type_lieu_select", + ) - # Data pour le plot secteur - secteur_df = df_init[df_init["type_regroupement"].isin(["SECTEUR"])] - top_secteur_df = ( - secteur_df.groupby("categorie")["nb_dechet"].sum().sort_values(ascending=True) - ) - top_secteur_df = top_secteur_df.reset_index() - top_secteur_df.columns = ["Secteur", "Nombre de déchets"] + if ( + selected_annee_onglet_3 == "Aucune sélection" + and selected_type_milieu_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 == "Aucune sélection" + ): + df_filtered = df_other + elif ( + selected_type_milieu_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 == "Aucune sélection" + ): + df_filtered = df_other[df_other["ANNEE"] == selected_annee_onglet_3] + elif selected_type_lieu_onglet_3 == "Aucune sélection": + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee_onglet_3) + & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + ] + else: + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee_onglet_3) + & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) + ] + + # Filtration des données pour nb_dechets + df_init = pd.merge(df_dechet_copy, df_filtered, on="ID_RELEVE", how="inner") + + # Data pour le plot secteur + secteur_df = df_init[df_init["type_regroupement"].isin(["SECTEUR"])] + top_secteur_df = ( + secteur_df.groupby("categorie")["nb_dechet"] + .sum() + .sort_values(ascending=True) + ) + top_secteur_df = top_secteur_df.reset_index() + top_secteur_df.columns = ["Secteur", "Nombre de déchets"] + + # Data pour le plot marque + marque_df = df_init[df_init["type_regroupement"].isin(["MARQUE"])] + top_marque_df = ( + marque_df.groupby("categorie")["nb_dechet"] + .sum() + .sort_values(ascending=True) + ) + top_marque_df = top_marque_df.reset_index() + top_marque_df.columns = ["Marque", "Nombre de déchets"] - # Data pour le plot marque - marque_df = df_init[df_init["type_regroupement"].isin(["MARQUE"])] - top_marque_df = ( - marque_df.groupby("categorie")["nb_dechet"].sum().sort_values(ascending=True) - ) - top_marque_df = top_marque_df.reset_index() - top_marque_df.columns = ["Marque", "Nombre de déchets"] + # Chiffres clés + nb_dechet_secteur = secteur_df["nb_dechet"].sum() + nb_secteurs = len(top_secteur_df["Secteur"].unique()) - # Chiffres clés - nb_dechet_secteur = secteur_df["nb_dechet"].sum() - nb_secteurs = len(top_secteur_df["Secteur"].unique()) + nb_dechet_marque = marque_df["nb_dechet"].sum() + nb_marques = len(top_marque_df["Marque"].unique()) - nb_dechet_marque = marque_df["nb_dechet"].sum() - nb_marques = len(top_marque_df["Marque"].unique()) + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page - # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page + l1_col1, l1_col2 = st.columns(2) + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + # 1ère métrique : volume total de déchets collectés + cell1 = l1_col1.container(border=True) + # Trick pour séparer les milliers - l1_col1, l1_col2 = st.columns(2) - # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - # 1ère métrique : volume total de déchets collectés - cell1 = l1_col1.container(border=True) - # Trick pour séparer les milliers + nb_dechet_secteur = f"{nb_dechet_secteur:,.0f}".replace(",", " ") + cell1.metric( + "Nombre de déchets catégorisés par secteur", f"{nb_dechet_secteur} dechets" + ) - nb_dechet_secteur = f"{nb_dechet_secteur:,.0f}".replace(",", " ") - cell1.metric( - "Nombre de déchets catégorisés par secteur", f"{nb_dechet_secteur} dechets" - ) + # 2ème métrique : poids + cell2 = l1_col2.container(border=True) + nb_secteurs = f"{nb_secteurs:,.0f}".replace(",", " ") + # poids_total = f"{poids_total:,.0f}".replace(",", " ") + cell2.metric( + "Nombre de secteurs identifiés lors des collectes", + f"{nb_secteurs} secteurs", + ) - # 2ème métrique : poids - cell2 = l1_col2.container(border=True) - nb_secteurs = f"{nb_secteurs:,.0f}".replace(",", " ") - # poids_total = f"{poids_total:,.0f}".replace(",", " ") - cell2.metric( - "Nombre de secteurs identifiés lors des collectes", - f"{nb_secteurs} secteurs", - ) + fig_secteur = px.bar( + top_secteur_df.tail(10), + x="Nombre de déchets", + y="Secteur", + title="Top 10 des secteurs les plus ramassés", + orientation="h", + ) + # add log scale to x axis + fig_secteur.update_layout(xaxis_type="log") + fig_secteur.update_traces( + # texttemplate="%{text:.2f}", + textposition="outside" + ) + fig_secteur.update_layout( + width=800, height=500, uniformtext_minsize=8, uniformtext_mode="hide" + ) + with st.container(border=True): + st.plotly_chart(fig_secteur, use_container_width=True) - fig_secteur = px.bar( - top_secteur_df.tail(10), - x="Nombre de déchets", - y="Secteur", - title="Top 10 des secteurs les plus ramassés", - orientation="h", - ) - # add log scale to x axis - fig_secteur.update_layout(xaxis_type="log") - fig_secteur.update_traces( - # texttemplate="%{text:.2f}", - textposition="outside" - ) - fig_secteur.update_layout( - width=800, height=500, uniformtext_minsize=8, uniformtext_mode="hide" - ) - with st.container(border=True): - st.plotly_chart(fig_secteur, use_container_width=True) + l1_col1, l1_col2 = st.columns(2) + cell1 = l1_col1.container(border=True) + # Trick pour séparer les milliers - l1_col1, l1_col2 = st.columns(2) - cell1 = l1_col1.container(border=True) - # Trick pour séparer les milliers + nb_dechet_marque = f"{nb_dechet_marque:,.0f}".replace(",", " ") + cell1.metric( + "Nombre de déchets catégorisés par marque", f"{nb_dechet_marque} dechets" + ) - nb_dechet_marque = f"{nb_dechet_marque:,.0f}".replace(",", " ") - cell1.metric( - "Nombre de déchets catégorisés par marque", f"{nb_dechet_marque} dechets" - ) + # 2ème métrique : poids + cell2 = l1_col2.container(border=True) + nb_marques = f"{nb_marques:,.0f}".replace(",", " ") + # poids_total = f"{poids_total:,.0f}".replace(",", " ") + cell2.metric( + "Nombre de marques identifiés lors des collectes", + f"{nb_marques} marques", + ) + fig_marque = px.bar( + top_marque_df.tail(10), + x="Nombre de déchets", + y="Marque", + title="Top 10 des marques les plus ramassées", + orientation="h", + ) + # add log scale to x axis + fig_marque.update_layout(xaxis_type="log") + fig_marque.update_traces( + # texttemplate="%{text:.2f}", + textposition="outside" + ) - # 2ème métrique : poids - cell2 = l1_col2.container(border=True) - nb_marques = f"{nb_marques:,.0f}".replace(",", " ") - # poids_total = f"{poids_total:,.0f}".replace(",", " ") - cell2.metric( - "Nombre de marques identifiés lors des collectes", - f"{nb_marques} marques", - ) - fig_marque = px.bar( - top_marque_df.tail(10), - x="Nombre de déchets", - y="Marque", - title="Top 10 des marques les plus ramassées", - orientation="h", - ) - # add log scale to x axis - fig_marque.update_layout(xaxis_type="log") - fig_marque.update_traces( - # texttemplate="%{text:.2f}", - textposition="outside" - ) + fig_marque.update_layout( + width=800, height=500, uniformtext_minsize=8, uniformtext_mode="hide" + ) - fig_marque.update_layout( - width=800, height=500, uniformtext_minsize=8, uniformtext_mode="hide" - ) + with st.container(border=True): + st.plotly_chart(fig_marque, use_container_width=True) - with st.container(border=True): - st.plotly_chart(fig_marque, use_container_width=True) +else: + st.markdown("## 🚨 Veuillez vous connecter pour accéder à l'onglet 🚨") diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index af5ff81..aeb0cac 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -4,3 +4,4 @@ streamlit==1.32.2 folium==0.15.1 plotly==5.19.0 streamlit-dynamic-filters==0.1.6 +streamlit-authenticator==0.3.2 From d811f8471adcda768cef6e0ab6d5be98fb3fa222 Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Fri, 12 Apr 2024 05:36:06 -0400 Subject: [PATCH 018/147] =?UTF-8?q?[kb]=20=F0=9F=99=88=20Add=20streamlit?= =?UTF-8?q?=20credentials?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index b8fb0eb..b269410 100644 --- a/.gitignore +++ b/.gitignore @@ -160,4 +160,7 @@ dmypy.json cython_debug/ # Precommit hooks: ruff cache -.ruff_cache \ No newline at end of file +.ruff_cache + +# Streamlit: credentials +dashboards/app/.credentials.yml From 01824796e3f71868bc79e500d0784092a6149277 Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Fri, 12 Apr 2024 08:25:31 -0400 Subject: [PATCH 019/147] =?UTF-8?q?[kb]=20=F0=9F=94=92=EF=B8=8F=20Add=20re?= =?UTF-8?q?gistration?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/home.py | 19 ++++++++++++- dashboards/app/pages/register.py | 47 ++++++++++++++++++++++++++++++++ dashboards/app/requirements.txt | 1 + 3 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 dashboards/app/pages/register.py diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 45f480f..0a5daab 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -4,6 +4,7 @@ import streamlit as st import streamlit_authenticator as stauth import yaml +from st_pages import Page, show_pages from yaml.loader import SafeLoader st.markdown( @@ -25,7 +26,6 @@ config["cookie"]["expiry_days"], config["pre-authorized"], ) - authenticator.login( fields={ "Form name": "Connexion", @@ -35,7 +35,17 @@ }, ) + if st.session_state["authentication_status"]: + show_pages( + [ + Page("home.py", "Accueil", "🏠"), + Page("pages/actions.py", "Actions", "👊"), + Page("pages/data.py", "Data", "🔍"), + Page("pages/hotspots.py", "Hotspots", "🔥"), + Page("pages/structures.py", "Structures", "🔭"), + ], + ) st.markdown("""# À propos""") # Chargement des données géographiques pour le filtre : une seule fois à l'arrivée @@ -119,3 +129,10 @@ def load_df_other() -> pd.DataFrame: st.error("Mauvais identifiants ou mot de passe.") elif st.session_state["authentication_status"] is None: st.warning("Veuillez entrer votre identifiant et mot de passe") + + show_pages( + [ + Page("home.py", "Home", "🏠 "), + Page("pages/register.py", "S'enregistrer", "🚀"), + ], + ) diff --git a/dashboards/app/pages/register.py b/dashboards/app/pages/register.py new file mode 100644 index 0000000..be54cb4 --- /dev/null +++ b/dashboards/app/pages/register.py @@ -0,0 +1,47 @@ +from pathlib import Path +import yaml +from yaml.loader import SafeLoader +import streamlit as st +import streamlit_authenticator as stauth + +st.markdown( + """ +# Bienvenue 👋 +#### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! +""", +) + +p_cred = Path(".credentials.yml") +with p_cred.open() as file: + config = yaml.load(file, Loader=SafeLoader) + +authenticator = stauth.Authenticate( + config["credentials"], + config["cookie"]["name"], + config["cookie"]["key"], + config["cookie"]["expiry_days"], + config["pre-authorized"], +) + +try: + ( + email_of_registered_user, + username_of_registered_user, + name_of_registered_user, + ) = authenticator.register_user( + pre_authorization=False, + fields={ + "Form name": "S'enregistrer", + "Email": "Email", + "Username": "Identifiant", + "Password": "Mot de passe", + "Repeat password": "Répeter le mot de passe", + "Register": "S'enregistrer", + }, + ) + if email_of_registered_user: + with open(".credentials.yml", "w") as file: + yaml.dump(config, file, default_flow_style=False) + st.success("Utilisateur enregistré") +except Exception as e: + st.error(e) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index aeb0cac..8bbfc43 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -5,3 +5,4 @@ folium==0.15.1 plotly==5.19.0 streamlit-dynamic-filters==0.1.6 streamlit-authenticator==0.3.2 +st-pages==0.4.5 From f2de3be3a76d90db4fa72869574a71a1d36b7ef8 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Sun, 14 Apr 2024 16:08:27 +0200 Subject: [PATCH 020/147] ajout metrics onglet 1 data , corrections filtres onglets 1 et 3 data --- dashboards/app/pages/data.py | 160 ++++++++++++++++++++++++++++++++--- 1 file changed, 149 insertions(+), 11 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 92dc529..f06e7b6 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -74,6 +74,10 @@ def load_df_other(): else: df_other = load_df_other() +if "df_other_metrics_raw" in st.session_state: + df_other_metrics_raw = st.session_state["df_other_metrics_raw"].copy() +else: + df_other_metrics_raw = load_df_other() # 3 Onglets : Matériaux, Top déchets, Filières et marques tab1, tab2, tab3 = st.tabs( @@ -279,14 +283,22 @@ def load_df_other(): st.write("**Détail par milieu, lieu ou année**") # Étape 1: Création des filtres + + df_other_metrics = df_other_metrics_raw.copy() + df_other_metrics = df_other_metrics.fillna(0) + selected_annee = st.selectbox( "Choisir une année:", options=["Aucune sélection"] + list(df_other["ANNEE"].unique()), ) if selected_annee != "Aucune sélection": - filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee] + filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee].copy() + filtered_metrics_milieu = df_other_metrics[ + df_other_metrics["ANNEE"] == selected_annee + ].copy() else: - filtered_data_milieu = df_other + filtered_data_milieu = df_other.copy() + filtered_metrics_milieu = df_other_metrics.copy() selected_type_milieu = st.selectbox( "Choisir un type de milieu:", @@ -298,8 +310,12 @@ def load_df_other(): filtered_data_lieu = filtered_data_milieu[ filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu ] + filtered_metrics_milieu = filtered_metrics_milieu[ + filtered_metrics_milieu["TYPE_MILIEU"] == selected_type_milieu + ] else: - filtered_data_lieu = filtered_data_milieu + filtered_data_lieu = filtered_data_milieu.copy() + filtered_metrics_milieu = df_other_metrics.copy() selected_type_lieu = st.selectbox( "Choisir un type de lieu:", @@ -311,24 +327,102 @@ def load_df_other(): and selected_type_milieu == "Aucune sélection" and selected_type_lieu == "Aucune sélection" ): - df_filtered = df_other + df_filtered = df_other.copy() + df_filtered_metrics = df_other_metrics_raw.copy() elif ( selected_type_milieu == "Aucune sélection" and selected_type_lieu == "Aucune sélection" ): - df_filtered = df_other[df_other["ANNEE"] == selected_annee] - elif selected_type_lieu == "Aucune sélection": + df_filtered = df_other[df_other["ANNEE"] == selected_annee].copy() + df_filtered_metrics = df_other_metrics_raw[ + df_other_metrics["ANNEE"] == selected_annee + ].copy() + elif ( + selected_annee == "Aucune sélection" + and selected_type_lieu == "Aucune sélection" + and selected_type_milieu != "Aucune sélection" + ): + df_filtered = df_other[df_other["TYPE_MILIEU"] == selected_type_milieu].copy() + df_filtered_metrics = df_other_metrics_raw[ + df_other_metrics["TYPE_MILIEU"] == selected_type_milieu + ].copy() + + elif ( + selected_annee == "Aucune sélection" + and selected_type_lieu != "Aucune sélection" + and selected_type_milieu == "Aucune sélection" + ): + df_filtered = df_other[df_other["TYPE_LIEU"] == selected_type_lieu].copy() + df_filtered_metrics = df_other_metrics_raw[ + df_other_metrics["TYPE_LIEU"] == selected_type_lieu + ].copy() + + elif ( + selected_annee == "Aucune sélection" + and selected_type_lieu != "Aucune sélection" + and selected_type_milieu != "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["TYPE_LIEU"] == selected_type_lieu) + & (df_other["TYPE_MILIEU"] == selected_type_milieu) + ].copy() + df_filtered_metrics = df_other_metrics_raw[ + (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) + & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) + ] + elif ( + selected_annee != "Aucune sélection" + and selected_type_lieu != "Aucune sélection" + and selected_type_milieu == "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee) + & (df_other["TYPE_LIEU"] == selected_type_lieu) + ].copy() + df_filtered_metrics = df_other_metrics_raw[ + (df_other_metrics["ANNEE"] == selected_annee) + & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) + ] + elif ( + selected_annee != "Aucune sélection" + and selected_type_lieu == "Aucune sélection" + and selected_type_milieu != "Aucune sélection" + ): df_filtered = df_other[ (df_other["ANNEE"] == selected_annee) & (df_other["TYPE_MILIEU"] == selected_type_milieu) + ].copy() + df_filtered_metrics = df_other_metrics_raw[ + (df_other_metrics["ANNEE"] == selected_annee) + & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) ] + else: df_filtered = df_other[ (df_other["ANNEE"] == selected_annee) & (df_other["TYPE_MILIEU"] == selected_type_milieu) & (df_other["TYPE_LIEU"] == selected_type_lieu) + ].copy() + df_filtered_metrics = df_other_metrics_raw[ + (df_other_metrics["ANNEE"] == selected_annee) + & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) + & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) ] + # Ligne 5 : Metriques filtrés + l5_col1, l5_col2 = st.columns(2) + cell6 = l5_col1.container(border=True) + cell7 = l5_col2.container(border=True) + + poids_total_filtered = df_filtered_metrics["POIDS_TOTAL"].sum() + volume_total_filtered = df_filtered_metrics["VOLUME_TOTAL"].sum() + + volume_total_filtered = f"{volume_total_filtered:,.0f}".replace(",", " ") + cell6.metric("Volume de dechets collectés", f"{volume_total_filtered} litres") + + poids_total_filtered = f"{poids_total_filtered:,.0f}".replace(",", " ") + cell7.metric("Poids total collecté", f"{poids_total_filtered} kg") + # Étape 3: Preparation dataframe pour graphe # Copie des données pour transfo df_volume2 = df_filtered.copy() @@ -578,7 +672,7 @@ def load_df_other(): if selected_annee_onglet_3 != "Aucune sélection": filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee_onglet_3] else: - filtered_data_milieu = df_other + filtered_data_milieu = df_other.copy() selected_type_milieu_onglet_3 = st.selectbox( "Choisir un type de milieu:", @@ -605,23 +699,67 @@ def load_df_other(): and selected_type_milieu_onglet_3 == "Aucune sélection" and selected_type_lieu_onglet_3 == "Aucune sélection" ): - df_filtered = df_other + df_filtered = df_other.copy() elif ( selected_type_milieu_onglet_3 == "Aucune sélection" and selected_type_lieu_onglet_3 == "Aucune sélection" ): - df_filtered = df_other[df_other["ANNEE"] == selected_annee_onglet_3] + df_filtered = df_other[df_other["ANNEE"] == selected_annee_onglet_3].copy() + elif ( + selected_annee_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 == "Aucune sélection" + and selected_type_milieu_onglet_3 != "Aucune sélection" + ): + df_filtered = df_other[ + df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3 + ].copy() + elif ( + selected_annee_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 != "Aucune sélection" + and selected_type_milieu_onglet_3 == "Aucune sélection" + ): + df_filtered = df_other[ + df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3 + ].copy() + elif ( + selected_annee_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 != "Aucune sélection" + and selected_type_milieu_onglet_3 != "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) + & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + ].copy() + elif ( + selected_annee_onglet_3 != "Aucune sélection" + and selected_type_lieu_onglet_3 != "Aucune sélection" + and selected_type_milieu_onglet_3 == "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee_onglet_3) + & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) + ].copy() + elif ( + selected_annee_onglet_3 != "Aucune sélection" + and selected_type_lieu_onglet_3 == "Aucune sélection" + and selected_type_milieu_onglet_3 != "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee_onglet_3) + & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + ].copy() + elif selected_type_lieu_onglet_3 == "Aucune sélection": df_filtered = df_other[ (df_other["ANNEE"] == selected_annee_onglet_3) & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - ] + ].copy() else: df_filtered = df_other[ (df_other["ANNEE"] == selected_annee_onglet_3) & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) - ] + ].copy() # Filtration des données pour nb_dechets df_init = pd.merge(df_dechet_copy, df_filtered, on="ID_RELEVE", how="inner") From 6b89f613d82dc6e88d349a35479470fa7c7cd9a2 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Sun, 14 Apr 2024 16:38:00 +0200 Subject: [PATCH 021/147] ajout metric nombre de collecte apres filtration --- dashboards/app/pages/data.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index f06e7b6..b4e103e 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -410,9 +410,10 @@ def load_df_other(): ] # Ligne 5 : Metriques filtrés - l5_col1, l5_col2 = st.columns(2) + l5_col1, l5_col2, l5_col3 = st.columns(3) cell6 = l5_col1.container(border=True) cell7 = l5_col2.container(border=True) + cell8 = l5_col3.container(border=True) poids_total_filtered = df_filtered_metrics["POIDS_TOTAL"].sum() volume_total_filtered = df_filtered_metrics["VOLUME_TOTAL"].sum() @@ -423,6 +424,9 @@ def load_df_other(): poids_total_filtered = f"{poids_total_filtered:,.0f}".replace(",", " ") cell7.metric("Poids total collecté", f"{poids_total_filtered} kg") + nombre_collectes_filtered = f"{len(df_filtered):,.0f}".replace(",", " ") + cell8.metric("Nombre de collectes", f"{nombre_collectes_filtered}") + # Étape 3: Preparation dataframe pour graphe # Copie des données pour transfo df_volume2 = df_filtered.copy() From 24aea8f1ada2fa069c457246a96641487f81fd4a Mon Sep 17 00:00:00 2001 From: "F.Hakimi" Date: Sun, 14 Apr 2024 17:23:54 +0200 Subject: [PATCH 022/147] proposition alternative top dechets + couleurs secteurs et marques --- dashboards/app/pages/data.py | 84 +++++++++++++++++++++++++-- dashboards/{Dockerfile => dockerfile} | 0 2 files changed, 78 insertions(+), 6 deletions(-) rename dashboards/{Dockerfile => dockerfile} (100%) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index b4e103e..742b730 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -30,6 +30,8 @@ st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") # Définition d'une fonction pour charger les données du nombre de déchets + + @st.cache_data def load_df_dict_corr_dechet_materiau(): return pd.read_csv( @@ -226,7 +228,7 @@ def load_df_other(): ) # Amélioration du graphique - fig2.update_traces(texttemplate="%{text:.2s}", textposition="outside") + fig2.update_traces(texttemplate="%{text:.2s}", textposition="inside") fig2.update_layout( autosize=True, uniformtext_minsize=8, @@ -563,17 +565,42 @@ def load_df_other(): ) fig.update_layout(yaxis_type="log") # Amélioration du visuel du graphique - fig.update_traces( + + fig.update_layout( + width=1400, + height=900, + uniformtext_minsize=8, + uniformtext_mode="hide", + xaxis_tickangle=90, + ) + + fig_alt = px.bar( + df_top10_dechets, + y="categorie", + x="nb_dechet", + labels={"categorie": "Dechet", "nb_dechet": "Nombre total"}, + title="Top 10 dechets ramassés (alternative)", + text="nb_dechet", + color="Materiau", + color_discrete_map=colors_map, + category_orders={"categorie": df_top10_dechets["categorie"].tolist()}, + ) + fig_alt.update_layout(xaxis_type="log") + # Amélioration du visuel du graphique + fig_alt.update_traces( # texttemplate="%{text:.2f}", - textposition="outside" + textposition="inside", + textfont_color="white", + textfont_size=20, ) - fig.update_layout( + fig_alt.update_layout( width=1400, height=900, uniformtext_minsize=8, uniformtext_mode="hide", xaxis_tickangle=90, ) + # Suppression de la colonne categorie del df_top10_dechets["Materiau"] @@ -597,6 +624,7 @@ def load_df_other(): with col1: st.plotly_chart(fig, use_container_width=True) + st.plotly_chart(fig_alt, use_container_width=True) with col2: st.write("Nombre ramassé pour chaque déchet") @@ -812,13 +840,42 @@ def load_df_other(): "Nombre de secteurs identifiés lors des collectes", f"{nb_secteurs} secteurs", ) + colors_map_secteur = { + "AGRICULTURE": "#156644", + "ALIMENTATION": "#F7D156", + "AMEUBLEMENT, DÉCORATION ET ÉQUIPEMENT DE LA MAISON": "#F79D65", + "AQUACULTURE": "#0067C2", + "BÂTIMENT, TRAVAUX ET MATÉRIAUX DE CONSTRUCTION": "#FF9900", + "CHASSE ET ARMEMENT": "#23A76F", + "COSMÉTIQUES, HYGIÈNE ET SOINS PERSONNELS": "#BF726B", + "DÉTERGENTS ET PRODUITS D'ENTRETIENS": "#506266", + "EMBALLAGE INDUSTRIEL ET COLIS": "#754B30", + "GRAPHIQUE ET PAPETERIE ET FOURNITURES DE BUREAU": "#EFEFEF", + "INDÉTERMINÉ": "#967EA1", + "INFORMATIQUE ET HIGHTECH": "#E351F7", + "JOUETS ET LOISIR": "#A64D79", + "MATÉRIEL ÉLECTRIQUE ET ÉLECTROMÉNAGER": "#AE05C3", + "MÉTALLURGIE": "#EC4773", + "PÊCHE": "#003463", + "PETROCHIMIE": "#0D0D0D", + "PHARMACEUTIQUE/PARAMÉDICAL": "#61BF5E", + "PLASTURGIE": "#05A2AD", + "TABAC": "#E9003F", + "TEXTILE ET HABILLEMENT": "#FA9EE5", + "TRAITEMENT DES EAUX": "#4AA6F7", + "TRANSPORT / AUTOMOBILE": "#6C2775", + "VAISSELLE À USAGE UNIQUE": "#732D3A", + "AUTRES SECTEURS": "#D9C190", + } fig_secteur = px.bar( - top_secteur_df.tail(10), + top_secteur_df.tail(10).sort_values(by="Nombre de déchets", ascending=False), x="Nombre de déchets", y="Secteur", + color="Secteur", title="Top 10 des secteurs les plus ramassés", orientation="h", + color_discrete_map=colors_map_secteur, ) # add log scale to x axis fig_secteur.update_layout(xaxis_type="log") @@ -849,12 +906,27 @@ def load_df_other(): "Nombre de marques identifiés lors des collectes", f"{nb_marques} marques", ) + colors_map_marque = { + "HEINEKEN": "#F7D156", + "COCA-COLA": "#F7D156", + "MARLBORO": "#E9003F", + "CRISTALINE": "#F7D156", + "PHILIP MORRIS": "#E9003F", + "CAPRI-SUN": "#F7D156", + "OASIS": "#F7D156", + "1664": "#F7D156", + "WINSTON": "#E9003F", + "RED BULL": "#F7D156", + } + fig_marque = px.bar( - top_marque_df.tail(10), + top_marque_df.tail(10).sort_values(by="Nombre de déchets", ascending=False), x="Nombre de déchets", y="Marque", title="Top 10 des marques les plus ramassées", + color="Marque", orientation="h", + color_discrete_map=colors_map_marque, ) # add log scale to x axis fig_marque.update_layout(xaxis_type="log") diff --git a/dashboards/Dockerfile b/dashboards/dockerfile similarity index 100% rename from dashboards/Dockerfile rename to dashboards/dockerfile From 5f66eb16d630e09d0315bea153f94d42a1793d22 Mon Sep 17 00:00:00 2001 From: DridrM Date: Sun, 14 Apr 2024 23:42:09 +0200 Subject: [PATCH 023/147] Add params.py into hotspots_function and first attempt to plot a map on the hotspot tab --- dashboards/app/pages/hotspots.py | 81 ++++++++++--------- .../app/pages/hotspots_functions/params.py | 20 +++++ 2 files changed, 62 insertions(+), 39 deletions(-) create mode 100644 dashboards/app/pages/hotspots_functions/params.py diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index 02f4fb2..7b22fce 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -4,46 +4,49 @@ import duckdb from hotspots_functions.maps import plot_adopted_waste_spots - -st.markdown( - """# 🔥 Hotspots -*Quelles sont les zones les plus impactées ?* -""" +from hotspots_functions.params import ( + NB_DECHETS_PATH, + DATA_ZDS_PATH, + REGION_GEOJSON_PATH, ) -df_nb_dechet = pd.read_csv( - ( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_releve_nb_dechet.csv" - ) -) -df_other = pd.read_csv( - ( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv" - ) -) +################## +# 1/ Import data # +################## -res_aggCategory_filGroup = duckdb.query( - ( - "SELECT categorie, sum(nb_dechet) AS total_dechet " - "FROM df_nb_dechet " - "WHERE type_regroupement = 'GROUPE' " - "GROUP BY categorie " - "HAVING sum(nb_dechet) > 10000 " - "ORDER BY total_dechet DESC;" - ) -).to_df() - -st.altair_chart( - alt.Chart(res_aggCategory_filGroup) - .mark_bar() - .encode( - x=alt.X("categorie", sort = None, title = ""), - y=alt.Y("total_dechet", title = "Total de déchet"), - ), - use_container_width = True, -) +# nb dechets : Unused for now +df_nb_dechets = pd.read_csv(NB_DECHETS_PATH) + +# data_zds : main source of data for the hotspots tab +data_zds = pd.read_csv(DATA_ZDS_PATH) + +################## +# 2/ Hotspot tab # +################## + +# Tab title +st.markdown("""# 🔥 Hotspots : **Quelles sont les zones les plus impactées ?**""") + +################################ +# 2.1/ Carte des spots adoptés # +################################ + +# Create 2 columns for 2 filters +columns = st.columns(2) + +# Choice of the region +x1 = data_zds["REGION"].unique() +f1 = columns[0].selectbox("Seléctionnez une région (par défaut votre région) :", x1) +columns[0].write(f1) + +# Choice of the environment +x2 = data_zds["TYPE_MILIEU"].unique() +f2 = columns[0].selectbox("Seléctionnez un environnement :", x2) +columns[0].write(f2) + +# Create the filter dict +filter_dict = {"REGION": f1, "TYPE_MILIEU": f2} + +# Create the map of the adopted spots +plot_adopted_waste_spots(data_zds, filter_dict, region_geojson_path=REGION_GEOJSON_PATH) diff --git a/dashboards/app/pages/hotspots_functions/params.py b/dashboards/app/pages/hotspots_functions/params.py new file mode 100644 index 0000000..9c28ca1 --- /dev/null +++ b/dashboards/app/pages/hotspots_functions/params.py @@ -0,0 +1,20 @@ +# Data path for the df_nb_dechets +NB_DECHETS_PATH = ( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_releve_nb_dechet.csv" +) + +# Data path for the data_zds path +DATA_ZDS_PATH = ( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv" +) + +# Data path for the France regions geojson +REGION_GEOJSON_PATH = ( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" + "exploration-des-donn%C3%A9es/Exploration_visualisation/regions" + "-avec-outre-mer.geojson" +) From d2203ccc625391fc6240d4855738cc5956e6f8cf Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Mon, 15 Apr 2024 18:03:43 +0200 Subject: [PATCH 024/147] =?UTF-8?q?[tg]=20=F0=9F=94=A7=20Improve=20home=20?= =?UTF-8?q?page=20filters=20and=20remove=20dynamic-filters=20from=20req.tx?= =?UTF-8?q?t?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/home.py | 55 ++++++++++++++++++++------------- dashboards/app/pages/data.py | 5 ++- dashboards/app/requirements.txt | 3 +- 3 files changed, 36 insertions(+), 27 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 713eed5..f557554 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -40,46 +40,57 @@ def load_df_other() -> pd.DataFrame: } # 1ère étape : sélection du niveau administratif concerné (région, dép...) -# Si déjà saisi précédemment, initialiser le filtre avec la valeur -index_admin = st.session_state.get("niveau_admin", None) +# Si déjà saisi précédemment, initialiser le filtre avec les valeurs entrées précédemment +# Récupérer les index pour conserver la valeur des filtres au changement de pages +# Filtre niveau administratif +niveau_admin = st.session_state.get("niveau_admin", None) +index_admin = st.session_state.get("index_admin", None) +# Filtre collectivité +collectivite = st.session_state.get("collectivite", None) +index_collec = st.session_state.get("index_collec", None) + +# Initialiser la selectbox avec l'index récupéré select_niveauadmin = st.selectbox( "Niveau administratif : ", niveaux_admin_dict.keys(), - index=1, - key="niveau_admin", + index=index_admin, ) if select_niveauadmin is not None: - # Extraction de la liste depuis le dataframe + # Filtrer la liste des collectivités en fonction du niveau admin liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] liste_collectivites = liste_collectivites.sort_values().unique() # 2ème filtre : sélection de la collectivité concernée - index_collec = st.session_state.get("collectivite", None) select_collectivite = st.selectbox( "Collectivité : ", liste_collectivites, - index=2, - key="collectivite", - ) -else: - st.caption( - "Choisissez un niveau administratif pour afficher la liste des collectivités.", + index=index_collec, ) + if st.button("Enregistrer la sélection"): - # Retourner le filtre validé et le nombre de relevés disponibles - filtre_niveau = st.session_state["niveau_admin"] - filtre_collectivite = st.session_state["collectivite"] - st.write(f"Vous avez sélectionné : {filtre_niveau} {filtre_collectivite}.") - - # Enregistrer le DataFrame dans un "session state" pour conserver le filtre dans les onglets - colonne_filtre = niveaux_admin_dict[filtre_niveau] - st.session_state["df_other"] = df_other[ - df_other[colonne_filtre] == filtre_collectivite + # Enregistrer les valeurs sélectionnées dans le session.state + st.session_state["niveau_admin"] = select_niveauadmin + st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( + select_niveauadmin, + ) + + st.session_state["collectivite"] = select_collectivite + st.session_state["index_collec"] = list(liste_collectivites).index( + select_collectivite, + ) + + # Afficher la collectivité sélectionnée + st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") + + # Filtrer et enregistrer le DataFrame dans un "session state" pour les onglets suivants + colonne_filtre = niveaux_admin_dict[select_niveauadmin] + st.session_state["df_other_filtre"] = df_other[ + df_other[colonne_filtre] == select_collectivite ] - nb_releves = len(st.session_state["df_other"]) + nb_releves = len(st.session_state["df_other_filtre"]) st.write( f"{nb_releves} relevés de collecte disponibles \ pour l'analyse sur votre territoire.", diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 742b730..9580b24 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -7,7 +7,6 @@ import plotly.express as px import folium from folium import IFrame -from streamlit_dynamic_filters import DynamicFilters # Page setting : wide layout st.set_page_config( @@ -71,8 +70,8 @@ def load_df_other(): df_dict_corr_dechet_materiau = load_df_dict_corr_dechet_materiau() # Appeler le dataframe filtré depuis le session state -if "df_other" in st.session_state: - df_other = st.session_state["df_other"].copy() +if "df_other_filtre" in st.session_state: + df_other = st.session_state["df_other_filtre"].copy() else: df_other = load_df_other() diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index af5ff81..9e9156e 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -2,5 +2,4 @@ pandas==2.0.3 duckdb==0.10.0 streamlit==1.32.2 folium==0.15.1 -plotly==5.19.0 -streamlit-dynamic-filters==0.1.6 +plotly==5.19.0 \ No newline at end of file From 5d9c27318f1d1e81de4a095bee24383e60afc5b7 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Tue, 16 Apr 2024 11:52:22 +0200 Subject: [PATCH 025/147] Modifications mineures --- dashboards/app/pages/data.py | 44 +++++++++--------------------------- 1 file changed, 11 insertions(+), 33 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 9580b24..200c6fb 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -1,14 +1,10 @@ import streamlit as st - -# import altair as alt import pandas as pd - -# import duckdb import plotly.express as px import folium from folium import IFrame -# Page setting : wide layout +# Configuration de la page st.set_page_config( layout="wide", page_title="Dashboard Zéro Déchet Sauvage : onglet Data" ) @@ -26,12 +22,16 @@ Visualisez les impacts sur les milieux naturels et secteurs/filières/marques à l’origine de cette pollution """ ) -st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") -# Définition d'une fonction pour charger les données du nombre de déchets +if filtre_niveau == "" and filtre_collectivite == "": + st.write( + "Aucune sélection de territoire n'ayant été effectuée les données sont globales" + ) +else: + st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") -@st.cache_data +# Définition d'une fonction pour charger les données du nombre de déchets@st.cache_data def load_df_dict_corr_dechet_materiau(): return pd.read_csv( "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" @@ -61,7 +61,6 @@ def load_df_other(): # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] - return df @@ -89,7 +88,6 @@ def load_df_other(): ] ) - milieu_lieu_dict = ( df_other.groupby("TYPE_MILIEU")["TYPE_LIEU"] .unique() @@ -603,21 +601,6 @@ def load_df_other(): # Suppression de la colonne categorie del df_top10_dechets["Materiau"] - # st.markdown( - # """## Quels sont les types de déchets les plus présents sur votre territoire ? - # """ - # ) - # res_aggCategory_filGroup = duckdb.query( - # ( - # "SELECT categorie, sum(nb_dechet) AS total_dechet " - # "FROM df_nb_dechet " - # "WHERE type_regroupement = 'GROUPE' " - # "GROUP BY categorie " - # "HAVING sum(nb_dechet) > 10000 " - # "ORDER BY total_dechet DESC;" - # ) - # ).to_df() - with st.container(border=True): col1, col2 = st.columns([3, 1]) @@ -691,7 +674,6 @@ def load_df_other(): # Préparation des données df_dechet_copy = df_nb_dechet.copy() - df_filtre_copy = df_other.copy() # Étape 1: Création des filtres @@ -824,8 +806,8 @@ def load_df_other(): # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) # 1ère métrique : volume total de déchets collectés cell1 = l1_col1.container(border=True) - # Trick pour séparer les milliers + # Trick pour séparer les milliers nb_dechet_secteur = f"{nb_dechet_secteur:,.0f}".replace(",", " ") cell1.metric( "Nombre de déchets catégorisés par secteur", f"{nb_dechet_secteur} dechets" @@ -878,10 +860,7 @@ def load_df_other(): ) # add log scale to x axis fig_secteur.update_layout(xaxis_type="log") - fig_secteur.update_traces( - # texttemplate="%{text:.2f}", - textposition="outside" - ) + fig_secteur.update_traces(textposition="outside") fig_secteur.update_layout( width=800, height=500, uniformtext_minsize=8, uniformtext_mode="hide" ) @@ -890,8 +869,8 @@ def load_df_other(): l1_col1, l1_col2 = st.columns(2) cell1 = l1_col1.container(border=True) - # Trick pour séparer les milliers + # Trick pour séparer les milliers nb_dechet_marque = f"{nb_dechet_marque:,.0f}".replace(",", " ") cell1.metric( "Nombre de déchets catégorisés par marque", f"{nb_dechet_marque} dechets" @@ -900,7 +879,6 @@ def load_df_other(): # 2ème métrique : poids cell2 = l1_col2.container(border=True) nb_marques = f"{nb_marques:,.0f}".replace(",", " ") - # poids_total = f"{poids_total:,.0f}".replace(",", " ") cell2.metric( "Nombre de marques identifiés lors des collectes", f"{nb_marques} marques", From e96d5974ee71c7d35b78208baca511e7022d6145 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Tue, 16 Apr 2024 12:11:20 +0200 Subject: [PATCH 026/147] =?UTF-8?q?[tg]=20remont=C3=A9e=20filtre=20g=C3=A9?= =?UTF-8?q?o,=20correction=20calculs=20metrics,=20am=C3=A9lioration=20grap?= =?UTF-8?q?hs?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/home.py | 30 ++++++-- dashboards/app/pages/data.py | 142 +++++++++++++++++------------------ 2 files changed, 93 insertions(+), 79 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index f557554..72176f6 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -11,7 +11,8 @@ st.markdown("""# À propos""") -# Chargement des données géographiques pour le filtre : une seule fois à l'arrivée +# Chargement des données et filtre géographique à l'arrivée sur le dashboard +# Table des volumes par matériaux @st.cache_data def load_df_other() -> pd.DataFrame: df = pd.read_csv( @@ -26,9 +27,20 @@ def load_df_other() -> pd.DataFrame: return df +# Table du nb de déchets +@st.cache_data +def load_df_nb_dechet() -> pd.DataFrame: + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_releve_nb_dechet.csv", + ) + + # Appel des fonctions pour charger les données df_other = load_df_other() +df_nb_dechets = load_df_nb_dechet() # Création du filtre par niveau géographique : correspondance labels et variables du dataframe @@ -84,14 +96,22 @@ def load_df_other() -> pd.DataFrame: # Afficher la collectivité sélectionnée st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") - # Filtrer et enregistrer le DataFrame dans un "session state" pour les onglets suivants + # Filtrer et enregistrer le DataFrame dans un session state pour la suite colonne_filtre = niveaux_admin_dict[select_niveauadmin] - st.session_state["df_other_filtre"] = df_other[ - df_other[colonne_filtre] == select_collectivite + df_other_filtre = df_other[df_other[colonne_filtre] == select_collectivite] + st.session_state["df_other_filtre"] = df_other_filtre + + # Filtrer et enregistrer le dataframe nb_dechets dans session.State + # Récuperer la liste des relevés + id_releves = df_other_filtre["ID_RELEVE"].unique() + # Filtrer df_nb_dechets sur la liste des relevés + st.session_state["df_nb_dechets_filtre"] = df_nb_dechets[ + df_nb_dechets["ID_RELEVE"].isin(id_releves) ] + # Afficher le nombre de relevés disponibles nb_releves = len(st.session_state["df_other_filtre"]) st.write( - f"{nb_releves} relevés de collecte disponibles \ + f"{nb_releves} relevés de collecte sont disponibles \ pour l'analyse sur votre territoire.", ) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 9580b24..d8f6019 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -28,9 +28,7 @@ ) st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") -# Définition d'une fonction pour charger les données du nombre de déchets - - +# Fonction pour charger le dictionnaire de correspondance déchets-matériaux @st.cache_data def load_df_dict_corr_dechet_materiau(): return pd.read_csv( @@ -40,45 +38,26 @@ def load_df_dict_corr_dechet_materiau(): ) -@st.cache_data -def load_df_nb_dechet(): - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_releve_nb_dechet.csv" - ) - - -# Définition d'une fonction pour charger les autres données -@st.cache_data -def load_df_other(): - df = pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv" - ) - - # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) - df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] - df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] - - return df - - # Appel des fonctions pour charger les données -df_nb_dechet = load_df_nb_dechet() df_dict_corr_dechet_materiau = load_df_dict_corr_dechet_materiau() -# Appeler le dataframe filtré depuis le session state -if "df_other_filtre" in st.session_state: - df_other = st.session_state["df_other_filtre"].copy() +# Appeler les dataframes volumes et nb_dechets filtré depuis le session state +if ("df_other_filtre" not in st.session_state) or ( + "df_nb_dechets_filtre" not in st.session_state +): + st.write( + """ + ### :warning: Merci de sélectionner une collectivité\ + dans l'onglet Home pour afficher les données. :warning: + """ + ) else: - df_other = load_df_other() + df_other = st.session_state["df_other_filtre"].copy() + df_nb_dechet = st.session_state["df_nb_dechets_filtre"].copy() + +# Copier le df pour la partie filtrée par milieu/lieu/année +df_other_metrics_raw = df_other.copy() -if "df_other_metrics_raw" in st.session_state: - df_other_metrics_raw = st.session_state["df_other_metrics_raw"].copy() -else: - df_other_metrics_raw = load_df_other() # 3 Onglets : Matériaux, Top déchets, Filières et marques tab1, tab2, tab3 = st.tabs( @@ -163,10 +142,6 @@ def load_df_other(): "Autre": "#F3B900", } - # Ligne 0 : Filtres géographiques - # Popover cell - # with st.popover("Filtres géographiques", help = "Sélectionnez le niveau géographique souhaité pour afficher les indicateurs") : - # Ligne 1 : 2 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2, l1_col3 = st.columns(3) @@ -244,12 +219,7 @@ def load_df_other(): f"Note : Cette analyse se base sur les déchets qui ont pu être classés par matériau : {volume_total_categorise:.0f} Litres, soit {pct_volume_categorise:.0%} du volume total collecté." ) - st.divider() - # Ligne 3 : Graphe par milieu de collecte - st.write("**Volume collecté par matériau en fonction du milieu de collecte**") - - # Part de volume collecté par type de milieu # Grouper par année et type de matériau df_typemilieu = df_volume.groupby(["TYPE_MILIEU", "Matériau"], as_index=False)[ @@ -266,12 +236,11 @@ def load_df_other(): y="Volume", color="Matériau", barnorm="percent", - title="Répartition des matériaux en fonction du milieu de collecte", - text_auto=False, + title="Part de chaque matériau en volume selon le milieu de collecte", color_discrete_map=colors_map, ) + fig3.update_layout(bargap=0.2, height=500) - fig3.update_layout(bargap=0.2) fig3.update_layout(yaxis_title="% du volume collecté", xaxis_title=None) # Afficher le graphique @@ -479,7 +448,7 @@ def load_df_other(): else: st.write("Aucune donnée à afficher pour les filtres sélectionnés.") - # 2ème option de graphique, à choisir + # 2ème option de graphique, à choisir if not df_filtered.empty: fig5 = px.treemap( df_totals_sorted2, @@ -502,6 +471,16 @@ def load_df_other(): # Onglet 2 : Top Déchets with tab2: + # Préparation des datas pour l'onglet 2 + df_top = df_nb_dechet.copy() + df_top_data_releves = df_other.copy() + + # Calcul du nombre total de déchets catégorisés sur le territoier + nb_total_dechets = df_top[(df_top["type_regroupement"] == "GROUPE")][ + "nb_dechet" + ].sum() + nb_total_dechets = f"{nb_total_dechets:,.0f}".replace(",", " ") + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2, l1_col3 = st.columns(3) # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) @@ -509,29 +488,33 @@ def load_df_other(): cell1 = l1_col1.container(border=True) # Trick pour séparer les milliers - volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") - cell1.metric("Volume de déchets catégorisés", f"{volume_total_categorise} litres") + # volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") + cell1.metric("Nombre de déchets catégorisés", f"{nb_total_dechets} déchets") - # 2ème métrique : poids + # 2ème métrique : équivalent volume catégorisé cell2 = l1_col2.container(border=True) - poids_total_categorise = f"{poids_total_categorise:,.0f}".replace(",", " ") - # poids_total = f"{poids_total:,.0f}".replace(",", " ") + volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") cell2.metric( - "Poids estimé de déchets categorisés", - f"{poids_total_categorise} kg", + "Equivalent en volume ", + f"{volume_total_categorise} litres", ) + # # 2ème métrique : poids + # cell2 = l1_col2.container(border=True) + # poids_total_categorise = f"{poids_total_categorise:,.0f}".replace(",", " ") + # # poids_total = f"{poids_total:,.0f}".replace(",", " ") + # cell2.metric( + # "Poids estimé de déchets categorisés", + # f"{poids_total_categorise} kg", + # ) + # 3ème métrique : nombre de relevés cell3 = l1_col3.container(border=True) # nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") - cell3.metric("Nombre de collectes réalisées", f"{nb_collectes}") + cell3.metric("Nombre de collectes comptabilisées", f"{nb_collectes}") # Ligne 2 : graphique top déchets - # Préparation des datas pour l'onglet 2 - df_top = df_nb_dechet.copy() - - df_top_data_releves = df_other.copy() # Filtration des données pour nb_dechets df_top10 = pd.merge(df_top, df_top_data_releves, on="ID_RELEVE", how="inner") # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement @@ -561,6 +544,7 @@ def load_df_other(): color="Materiau", color_discrete_map=colors_map, category_orders={"categorie": df_top10_dechets["categorie"].tolist()}, + text_auto=True, ) fig.update_layout(yaxis_type="log") # Amélioration du visuel du graphique @@ -631,6 +615,12 @@ def load_df_other(): value = f"{row['nb_dechet']:,.0f}".replace(",", " ") st.metric(label=row["categorie"], value=value) + st.write("") + st.caption( + f"Note : Analyse basée sur les collectes qui ont fait l'objet d'un comptage détaillé par déchet,\ + soit {volume_total_categorise} litres équivalent à {pct_volume_categorise:.0%} du volume collecté\ + sur le territoire." + ) with st.container(): # Ajout de la selectbox selected_dechet = st.selectbox( @@ -681,7 +671,7 @@ def load_df_other(): st_folium = st.components.v1.html st_folium( folium.Figure().add_child(map_paca).render(), # , width=1400 - height=1000, + height=750, ) @@ -697,7 +687,7 @@ def load_df_other(): # Étape 1: Création des filtres selected_annee_onglet_3 = st.selectbox( "Choisir une année:", - options=["Aucune sélection"] + list(df_other["ANNEE"].unique()), + options=["Aucune sélection"] + list(df_other["ANNEE"].sort_values().unique()), key="année_select", ) if selected_annee_onglet_3 != "Aucune sélection": @@ -875,15 +865,17 @@ def load_df_other(): title="Top 10 des secteurs les plus ramassés", orientation="h", color_discrete_map=colors_map_secteur, + text_auto=True, ) # add log scale to x axis fig_secteur.update_layout(xaxis_type="log") - fig_secteur.update_traces( - # texttemplate="%{text:.2f}", - textposition="outside" - ) + fig_secteur.update_traces(texttemplate="%{value:.0f}", textposition="inside") fig_secteur.update_layout( - width=800, height=500, uniformtext_minsize=8, uniformtext_mode="hide" + width=800, + height=500, + uniformtext_mode="hide", + showlegend=False, + yaxis_title=None, ) with st.container(border=True): st.plotly_chart(fig_secteur, use_container_width=True) @@ -926,16 +918,18 @@ def load_df_other(): color="Marque", orientation="h", color_discrete_map=colors_map_marque, + text_auto=False, ) # add log scale to x axis fig_marque.update_layout(xaxis_type="log") - fig_marque.update_traces( - # texttemplate="%{text:.2f}", - textposition="outside" - ) + fig_marque.update_traces(texttemplate="%{value:.0f}", textposition="inside") fig_marque.update_layout( - width=800, height=500, uniformtext_minsize=8, uniformtext_mode="hide" + width=800, + height=500, + uniformtext_minsize=8, + uniformtext_mode="hide", + yaxis_title=None, ) with st.container(border=True): From 16c0a270d6df8b038a38b521db2b9ab3a4ad12a8 Mon Sep 17 00:00:00 2001 From: DridrM Date: Tue, 16 Apr 2024 19:53:19 +0200 Subject: [PATCH 027/147] Hotspots tab adopted spots map released and functionnal --- dashboards/app/pages/hotspots.py | 228 ++++++++++++++++-- .../app/pages/hotspots_functions/__init__.py | 0 .../app/pages/hotspots_functions/maps.py | 94 -------- .../app/pages/hotspots_functions/params.py | 20 -- .../app/pages/hotspots_functions/utils.py | 25 -- dashboards/app/requirements.txt | 1 + 6 files changed, 208 insertions(+), 160 deletions(-) delete mode 100644 dashboards/app/pages/hotspots_functions/__init__.py delete mode 100644 dashboards/app/pages/hotspots_functions/maps.py delete mode 100644 dashboards/app/pages/hotspots_functions/params.py delete mode 100644 dashboards/app/pages/hotspots_functions/utils.py diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index 7b22fce..a64d50b 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -1,15 +1,109 @@ import streamlit as st -import altair as alt + +# import altair as alt # Unused for now import pandas as pd -import duckdb +import geopandas as gpd + +# import duckdb # Unused for now +import folium +from folium.plugins import MarkerCluster + +# To show folium maps on streamlit +from streamlit_folium import folium_static, st_folium + + +###################################### +# 0/ Parameters for the hotspots tab # +###################################### + +# Data path for the df_nb_dechets +NB_DECHETS_PATH = ( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_releve_nb_dechet.csv" +) -from hotspots_functions.maps import plot_adopted_waste_spots -from hotspots_functions.params import ( - NB_DECHETS_PATH, - DATA_ZDS_PATH, - REGION_GEOJSON_PATH, +# Data path for the data_zds path +DATA_ZDS_PATH = ( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv" ) +# Data path for the France regions geojson +REGION_GEOJSON_PATH = ( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" + "exploration-des-donn%C3%A9es/Exploration_visualisation/regions" + "-avec-outre-mer.geojson" +) + +# Params for the adopted spots map filters +ADOPTED_SPOTS_FILTERS_PARAMS = [ + { + "filter_col": "REGION", + "filter_message": "Seléctionnez une région (par défaut votre région) :", + }, + {"filter_col": "TYPE_MILIEU", "filter_message": "Seléctionnez un milieu :"}, +] + +########################################################################### +# 0 bis/ Fonctions utilitaires : peuvent être utilisées par tout le monde # +########################################################################### + + +def construct_query_string(bound_word=" and ", **params) -> str: + """Construct a query string in the right format for the pandas 'query' + function. The different params are bounded together in the query string with the + bound word given by default. If one of the params is 'None', it is not + included in the final query string.""" + + # Instanciate query string + query_string = "" + + # Iterate over the params to construct the query string + for param_key, param in params.items(): + # Construct the param sub string if the param is not 'None' + if param: + query_sub_string = f'{param_key} == "{param}"' + + # Add to the query string + query_string += f"{query_sub_string}{bound_word}" + + # Strip any remaining " and " at the end of the query string + return query_string.strip(bound_word) + + +def scalable_filters( + data_zds: pd.DataFrame, filters_params=ADOPTED_SPOTS_FILTERS_PARAMS +) -> dict: + """Create streamlit select box filters as specified by the filters_params list. + Create the filter dict used to filter the hotspots maps accordingly.""" + + # Instanciate the empty filter dict + filter_dict = dict() + + # Create as many columns as the lenght of the filters_params list + columns = st.columns(len(filters_params)) + + # Iterate over filters_params + for i, filter_params in enumerate(filters_params): + # Set the filter column and the filter message + column, message = filter_params["filter_col"], filter_params["filter_message"] + + # Set the list of choices + x = data_zds[column].unique() + + # Create the streamlit select box + s = columns[i].selectbox(message, x) + + # Show the select box on screen + columns[i].write(s) + + # Fill the filter dict + filter_dict[column] = s + + return filter_dict + ################## # 1/ Import data # @@ -21,6 +115,7 @@ # data_zds : main source of data for the hotspots tab data_zds = pd.read_csv(DATA_ZDS_PATH) + ################## # 2/ Hotspot tab # ################## @@ -28,25 +123,116 @@ # Tab title st.markdown("""# 🔥 Hotspots : **Quelles sont les zones les plus impactées ?**""") + ################################ # 2.1/ Carte des spots adoptés # ################################ -# Create 2 columns for 2 filters -columns = st.columns(2) +# Create the filter dict for the adopted spots map and the streamlit filter boxes +filter_dict = scalable_filters(data_zds) -# Choice of the region -x1 = data_zds["REGION"].unique() -f1 = columns[0].selectbox("Seléctionnez une région (par défaut votre région) :", x1) -columns[0].write(f1) +# Create the map of the adopted spots +def plot_adopted_waste_spots( + data_zds: pd.DataFrame, + filter_dict: dict, + region_geojson_path: str, +) -> folium.Map: + """Show a folium innteractive map of adopted spots within a selected region, + filtered by environments of deposit. + Arguments: + - data_zds: The waste dataframe + - filter_dict: dictionary mapping the name of the column in the waste df and the value you want to filter by + """ + # 1/ Create the waste geodataframe # -# Choice of the environment -x2 = data_zds["TYPE_MILIEU"].unique() -f2 = columns[0].selectbox("Seléctionnez un environnement :", x2) -columns[0].write(f2) + # Create a GeoDataFrame for waste points + gdf = gpd.GeoDataFrame( + data_zds, + geometry=gpd.points_from_xy( + data_zds["LIEU_COORD_GPS_X"], data_zds["LIEU_COORD_GPS_Y"] + ), + crs="EPSG:4326", + ) -# Create the filter dict -filter_dict = {"REGION": f1, "TYPE_MILIEU": f2} + # Construct the query string + query_string = construct_query_string(**filter_dict) -# Create the map of the adopted spots -plot_adopted_waste_spots(data_zds, filter_dict, region_geojson_path=REGION_GEOJSON_PATH) + # Filter the geodataframe by region and by environment + gdf_filtered = gdf.query(query_string) + + # 2/ Create the regions geodataframe # + + # Unpack the region name + region = filter_dict["REGION"] + + # Load France regions from a GeoJSON file + regions = gpd.read_file(region_geojson_path) + regions = regions.loc[regions["nom"] == region, :] + + # Filter the region geodataframe for the specified region + selected_region = regions[regions["nom"].str.lower() == region.lower()] + if selected_region.empty: + raise KeyError(f"Region '{region}' not found.") + + # 3/ Initialize folium map # + + # Initialize a folium map, centered around the mean location of the waste points + map_center = [gdf_filtered.geometry.y.mean(), gdf_filtered.geometry.x.mean()] + + # Catch ValueError if the filtered geodataframe contain no rows + try: + m = folium.Map( + location=map_center, zoom_start=5 + ) # Adjust zoom_start as needed for the best initial view + + # Return None if ValueError + except ValueError as e: + st.markdown( + "Il n'y a pas de hotspots pour les valeurs de filtres selectionnés !" + ) + return + + # 4/ Add the markers # + + # Use MarkerCluster to manage markers if dealing with a large number of points + marker_cluster = MarkerCluster().add_to(m) + + # Add each waste point as a marker on the folium map + for _, row in gdf_filtered.iterrows(): + # Define the marker color: green for adopted spots, red for others + marker_color = "darkgreen" if row["SPOT_A1S"] else "red" + # Define the icon: check-circle for adopted, info-sign for others + icon_type = "check-circle" if row["SPOT_A1S"] else "info-sign" + + folium.Marker( + location=[row.geometry.y, row.geometry.x], + popup=f"Zone: {row['NOM_ZONE']}
Date: {row['DATE']}
Volume: {row['VOLUME_TOTAL']} litres", + icon=folium.Icon(color=marker_color, icon=icon_type, prefix="fa"), + ).add_to(marker_cluster) + + # 5/ Add the region boundary # + + # Add the region boundary to the map for context + folium.GeoJson( + selected_region, + name="Region Boundary", + style_function=lambda feature: { + "weight": 2, + "fillOpacity": 0.1, + }, + ).add_to(m) + + return m + + +# Construct the map +m = plot_adopted_waste_spots(data_zds, filter_dict, REGION_GEOJSON_PATH) + +# Show the adopted spots map on the streamlit tab +if m: + folium_static(m) + + +######################################################## +# 2.1/ Carte densité de déchets sur les zones étudiées # +######################################################## diff --git a/dashboards/app/pages/hotspots_functions/__init__.py b/dashboards/app/pages/hotspots_functions/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/dashboards/app/pages/hotspots_functions/maps.py b/dashboards/app/pages/hotspots_functions/maps.py deleted file mode 100644 index e3a5e35..0000000 --- a/dashboards/app/pages/hotspots_functions/maps.py +++ /dev/null @@ -1,94 +0,0 @@ -import pandas as pd -import geopandas as gpd -import folium -from folium.plugins import MarkerCluster - -from utils import construct_query_string - -def plot_adopted_waste_spots(data_zds: pd.DataFrame, - filter_dict: dict, - region_geojson_path: str, - ) -> folium.Map: - """Show a folium innteractive map of adopted spots within a selected region, - filtered by environments of deposit. - Arguments: - - data_zds: The waste dataframe - - filter_dict: dictionary mapping the name of the column in the waste df and the value you want to filter by - """ - #################################### - # 1/ Create the waste geodataframe # - #################################### - - # Create a GeoDataFrame for waste points - gdf = gpd.GeoDataFrame( - data_zds, - geometry = gpd.points_from_xy(data_zds["LIEU_COORD_GPS_X"], data_zds["LIEU_COORD_GPS_Y"]), - crs = "EPSG:4326" - ) - - # Construct the query string - query_string = construct_query_string(**filter_dict) - - # Filter the geodataframe by region and by environment - gdf_filtered = gdf.query(query_string) - - ###################################### - # 2/ Create the regions geodataframe # - ###################################### - - # Unpack the region name - region = filter_dict["REGION"] - - # Load France regions from a GeoJSON file - regions = gpd.read_file(region_geojson_path) - regions = regions.loc[regions["nom"] == region, :] - - # Filter the region geodataframe for the specified region - selected_region = regions[regions["nom"].str.lower() == region.lower()] - if selected_region.empty: - raise KeyError(f"Region '{region}' not found.") - - ############################ - # 3/ Initialize folium map # - ############################ - - # Initialize a folium map, centered around the mean location of the waste points - map_center = [gdf_filtered.geometry.y.mean(), gdf_filtered.geometry.x.mean()] - m = folium.Map(location = map_center, zoom_start = 5) # Adjust zoom_start as needed for the best initial view - - ###################### - # 4/ Add the markers # - ###################### - - # Use MarkerCluster to manage markers if dealing with a large number of points - marker_cluster = MarkerCluster().add_to(m) - - # Add each waste point as a marker on the folium map - for _, row in gdf_filtered.iterrows(): - # Define the marker color: green for adopted spots, red for others - marker_color = 'darkgreen' if row['SPOT_A1S'] else 'red' - # Define the icon: check-circle for adopted, info-sign for others - icon_type = 'check-circle' if row['SPOT_A1S'] else 'info-sign' - - folium.Marker( - location = [row.geometry.y, row.geometry.x], - popup = f"Zone: {row['NOM_ZONE']}
Date: {row['DATE']}
Volume: {row['VOLUME_TOTAL']} litres", - icon = folium.Icon(color = marker_color, icon = icon_type, prefix = 'fa') - ).add_to(marker_cluster) - - ############################## - # 5/ Add the region boundary # - ############################## - - # Add the region boundary to the map for context - folium.GeoJson( - selected_region, - name = "Region Boundary", - style_function = lambda feature: { - 'weight': 2, - 'fillOpacity': 0.1, - } - ).add_to(m) - - # Return the map - return m diff --git a/dashboards/app/pages/hotspots_functions/params.py b/dashboards/app/pages/hotspots_functions/params.py deleted file mode 100644 index 9c28ca1..0000000 --- a/dashboards/app/pages/hotspots_functions/params.py +++ /dev/null @@ -1,20 +0,0 @@ -# Data path for the df_nb_dechets -NB_DECHETS_PATH = ( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_releve_nb_dechet.csv" -) - -# Data path for the data_zds path -DATA_ZDS_PATH = ( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv" -) - -# Data path for the France regions geojson -REGION_GEOJSON_PATH = ( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" - "exploration-des-donn%C3%A9es/Exploration_visualisation/regions" - "-avec-outre-mer.geojson" -) diff --git a/dashboards/app/pages/hotspots_functions/utils.py b/dashboards/app/pages/hotspots_functions/utils.py deleted file mode 100644 index b82de7a..0000000 --- a/dashboards/app/pages/hotspots_functions/utils.py +++ /dev/null @@ -1,25 +0,0 @@ -# Imports here - - -def construct_query_string(bound_word = " and ", - **params - ) -> str: - """Construct a query string in the right format for the pandas 'query' - function. The different params are bounded together in the query string with the - bound word given by default. If one of the params is 'None', it is not - included in the final query string.""" - - # Instanciate query string - query_string = "" - - # Iterate over the params to construct the query string - for param_key, param in params.items(): - # Construct the param sub string if the param is not 'None' - if param: - query_sub_string = f"{param_key} == '{param}'" - - # Add to the query string - query_string += f"{query_sub_string}{bound_word}" - - # Strip any remaining " and " at the end of the query string - return query_string.strip(bound_word) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index a69bed9..9b2e557 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -3,3 +3,4 @@ geopandas==0.14.3 folium==0.16.0 duckdb==0.10.0 streamlit==1.32.2 +streamlit-folium From 4760405ef4b6df3b40c30bb31430537326a0ccb0 Mon Sep 17 00:00:00 2001 From: Mendi33 Date: Wed, 17 Apr 2024 13:43:31 +0000 Subject: [PATCH 028/147] =?UTF-8?q?Copier=20coller=20du=20home.py=20(depui?= =?UTF-8?q?s=20l'onglet=20DATA)=20pour=20avoir=20les=20filtres=20administr?= =?UTF-8?q?atif=20et=20collectivit=C3=A9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ajout dans requierements.txt de folium 0.15.1 plotly 5.19.0 actions.py : 2 onglet evenement a venir (Mehdi) Evenement (Valerie) --- dashboards/app/home.py | 88 ++++++- dashboards/app/pages/actions.py | 437 +++++++++++++++++++++++++++++--- dashboards/app/requirements.txt | 2 + 3 files changed, 488 insertions(+), 39 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 3fd4b7b..f557554 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -1,3 +1,4 @@ +import pandas as pd import streamlit as st st.markdown( @@ -8,4 +9,89 @@ ) st.markdown("""# À propos""") -st.image("media/ZDS-logo.png") + + +# Chargement des données géographiques pour le filtre : une seule fois à l'arrivée +@st.cache_data +def load_df_other() -> pd.DataFrame: + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv", + ) + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE + # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] + return df + + +# Appel des fonctions pour charger les données + +df_other = load_df_other() + + +# Création du filtre par niveau géographique : correspondance labels et variables du dataframe +niveaux_admin_dict = { + "Région": "REGION", + "Département": "DEP_CODE_NOM", + "EPCI": "LIBEPCI", + "Commune": "COMMUNE_CODE_NOM", +} + +# 1ère étape : sélection du niveau administratif concerné (région, dép...) +# Si déjà saisi précédemment, initialiser le filtre avec les valeurs entrées précédemment +# Récupérer les index pour conserver la valeur des filtres au changement de pages +# Filtre niveau administratif +niveau_admin = st.session_state.get("niveau_admin", None) +index_admin = st.session_state.get("index_admin", None) +# Filtre collectivité +collectivite = st.session_state.get("collectivite", None) +index_collec = st.session_state.get("index_collec", None) + +# Initialiser la selectbox avec l'index récupéré +select_niveauadmin = st.selectbox( + "Niveau administratif : ", + niveaux_admin_dict.keys(), + index=index_admin, +) + +if select_niveauadmin is not None: + # Filtrer la liste des collectivités en fonction du niveau admin + liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] + liste_collectivites = liste_collectivites.sort_values().unique() + + # 2ème filtre : sélection de la collectivité concernée + select_collectivite = st.selectbox( + "Collectivité : ", + liste_collectivites, + index=index_collec, + ) + + +if st.button("Enregistrer la sélection"): + # Enregistrer les valeurs sélectionnées dans le session.state + st.session_state["niveau_admin"] = select_niveauadmin + st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( + select_niveauadmin, + ) + + st.session_state["collectivite"] = select_collectivite + st.session_state["index_collec"] = list(liste_collectivites).index( + select_collectivite, + ) + + # Afficher la collectivité sélectionnée + st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") + + # Filtrer et enregistrer le DataFrame dans un "session state" pour les onglets suivants + colonne_filtre = niveaux_admin_dict[select_niveauadmin] + st.session_state["df_other_filtre"] = df_other[ + df_other[colonne_filtre] == select_collectivite + ] + + nb_releves = len(st.session_state["df_other_filtre"]) + st.write( + f"{nb_releves} relevés de collecte disponibles \ + pour l'analyse sur votre territoire.", + ) diff --git a/dashboards/app/pages/actions.py b/dashboards/app/pages/actions.py index 208b092..249fb63 100644 --- a/dashboards/app/pages/actions.py +++ b/dashboards/app/pages/actions.py @@ -1,49 +1,410 @@ -import streamlit as st -import altair as alt import pandas as pd -import duckdb +from datetime import datetime, timedelta + +import plotly.express as px + +import streamlit as st +import folium + +# from folium import IFrame +# Page setting : wide layout +st.set_page_config( + layout="wide", page_title="Dashboard Zéro Déchet Sauvage : onglet Actions" +) + +# Session state +session_state = st.session_state + +# Récupérer les filtres géographiques s'ils ont été fixés +filtre_niveau = st.session_state.get("niveau_admin", "") +filtre_collectivite = st.session_state.get("collectivite", "") + +# Titre de l'onglet st.markdown( - """# 👊 Actions -*Quels sont les actions mises en place par les acteurs ?* + """# 🔎 Actions +Quels sont les actions mises en place par les acteurs ? """ ) -df_nb_dechet = pd.read_csv( - ( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_releve_nb_dechet.csv" - ) +# 2 Onglets : Evènements, Evènements à venir +tab1, tab2 = st.tabs( + [ + "Evènements", + "Evènements à venir", + ] ) -df_other = pd.read_csv( - ( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv" +# Onglet 1 : Evènements +with tab1: + if filtre_niveau == "" and filtre_collectivite == "": + st.write( + "Aucune sélection de territoire n'ayant été effectuée les données sont globales" + ) + else: + st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") + + # Définition d'une fonction pour charger les données du nombre de déchets + @st.cache_data + def load_df_dict_corr_dechet_materiau(): + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" + "exploration-des-donn%C3%A9es/Exploration_visualisation/data/dict_de" + "chet_groupe_materiau.csv" + ) + + @st.cache_data + def load_df_nb_dechet(): + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_releve_nb_dechet.csv" + ) + + # Définition d'une fonction pour charger les autres données + @st.cache_data + def load_df_other(): + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv" + ) + + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] + + return df + + # Appel des fonctions pour charger les données + df_nb_dechet = load_df_nb_dechet() + # df_dict_corr_dechet_materiau = load_df_dict_corr_dechet_materiau() + + # Appeler le dataframe filtré depuis le session state + if "df_other_filtre" in st.session_state: + df_other = st.session_state["df_other_filtre"].copy() + else: + df_other = load_df_other() + + if "df_other_metrics_raw" in st.session_state: + df_other_metrics_raw = st.session_state["df_other_metrics_raw"].copy() + else: + df_other_metrics_raw = load_df_other() + + #################### + # @Valerie : J'ai comment pour éviter les errreur + # Les DF sont chargés au dessus comme dans l'onglet DATA + # Je n'ai pas trouvé de référence à 'df_nb_dechets_filtre' dans l'onglet DATA + #################### + + # Appeler les dataframes volumes et nb_dechets filtré depuis le session state + # if ("df_other_filtre" not in st.session_state) or ( + # "df_nb_dechets_filtre" not in st.session_state + # ): + # st.write( + # """ + # ### :warning: Merci de sélectionner une collectivité\ + # dans l'onglet Home pour afficher les données. :warning: + # """ + # ) + + # df_nb_dechet = pd.read_csv( + # ( + # "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + # "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + # "sation/data/data_releve_nb_dechet.csv" + # ) + # ) + + # df_other = pd.read_csv( + # ( + # "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + # "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + # "sation/data/data_zds_enriched.csv" + # ) + # ) + + # else: + # df_other = st.session_state["df_other_filtre"].copy() + # df_nb_dechet = st.session_state["df_nb_dechets_filtre"].copy() + + # Copier le df pour la partie filtrée par milieu/lieu/année + df_other_metrics_raw = df_other.copy() + + annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) + + # Transformation du dataframe pour les graphiques + # Variables à conserver en ligne + cols_identifiers = [ + "ANNEE", + "TYPE_MILIEU", + "INSEE_COM", + "DEP", + "REG", + "EPCI", + "BV2022", + ] + + # variables à décroiser de la base de données correspondant aux Volume global de chaque matériau + cols_volume = [k for k in df_other.columns if "GLOBAL_VOLUME_" in k] + + # Copie des données pour transfo + df_volume = df_other.copy() + + # Calcul des indicateurs clés de haut de tableau avant transformation + volume_total = df_volume["VOLUME_TOTAL"].sum() + poids_total = df_volume["POIDS_TOTAL"].sum() + volume_total_categorise = df_volume[cols_volume].sum().sum() + pct_volume_categorise = volume_total_categorise / volume_total + nb_collectes = len(df_volume) + + # estimation du poids categorisée en utilisant pct_volume_categorise + poids_total_categorise = round(poids_total * pct_volume_categorise) + + # Dépivotage du tableau pour avoir une base de données exploitable + df_volume = df_volume.melt( + id_vars=cols_identifiers, + value_vars=cols_volume, + var_name="Matériau", + value_name="Volume", ) -) -res_aggCategory_filGroup = duckdb.query( - ( - "SELECT categorie, sum(nb_dechet) AS total_dechet " - "FROM df_nb_dechet " - "WHERE type_regroupement = 'GROUPE' " - "GROUP BY categorie " - "HAVING sum(nb_dechet) > 10000 " - "ORDER BY total_dechet DESC;" + # Nettoyer le nom du Type déchet pour le rendre plus lisible + df_volume["Matériau"] = ( + df_volume["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() + ) + + # Grouper par type de matériau pour les visualisations + df_totals_sorted = df_volume.groupby(["Matériau"], as_index=False)["Volume"].sum() + df_totals_sorted = df_totals_sorted.sort_values(["Volume"], ascending=False) + + # Charte graphique MERTERRE : + colors_map = { + "Textile": "#C384B1", + "Papier": "#CAA674", + "Metal": "#A0A0A0", + "Verre": "#3DCE89", + "Autre": "#F3B900", + "Plastique": "#48BEF0", + "Caoutchouc": "#364E74", + "Bois": "#673C11", + "Papier/Carton": "#CAA674", + "Métal": "#A0A0A0", + "Verre/Céramique": "#3DCE89", + "Autre": "#F3B900", + } + + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page + l1_col1, l1_col2, l1_col3 = st.columns(3) + + # 1ère métrique : nombre de relevés + cell1 = l1_col1.container(border=True) + nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") + cell1.metric("Nombre de collectes réalisées", f"{nb_collectes}") + + # 2ème métrique : Nombre de Participants + # cell2 = l1_col2.container(border=True) + # poids_total = f"{poids_total:,.0f}".replace(",", " ") + + # cell2.metric("Poids total collecté", f"{poids_total} kg") + + # 3ème métrique : Nombre de Structures + # cell3 = l1_col3.container(border=True) + # nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") + # cell3.metric("Nombre de collectes réalisées", f"{nb_collectes}") + + # Ligne 2 : 2 cellules avec les indicateurs clés en haut de page + l2_col1, l2_col2 = st.columns(2) + + # 1ère métrique : volume total de déchets collectés + cell4 = l2_col1.container(border=True) + # Trick pour séparer les milliers + volume_total = f"{volume_total:,.0f}".replace(",", " ") + cell4.metric("Volume de déchets collectés", f"{volume_total} litres") + + # 2ème métrique : poids + cell5 = l2_col2.container(border=True) + poids_total = f"{poids_total:,.0f}".replace(",", " ") + + cell5.metric("Poids total collecté", f"{poids_total} kg") + + # Ligne 3 : 2 graphiques en ligne : carte relevés et bar chart matériaux + l3_col1, l3_col2 = st.columns(2) + cell6 = l3_col1.container(border=True) + cell7 = l3_col2.container(border=True) + + # with cell6: + # Création de la carte + + with cell7: + # Création du graphique en barres avec Plotly Express + fig2 = px.bar( + df_totals_sorted, + x="Matériau", + y="Volume", + text="Volume", + title="Volume total par materiau (en litres)", + color="Matériau", + color_discrete_map=colors_map, + ) + + # Amélioration du graphique + fig2.update_traces(texttemplate="%{text:.2s}", textposition="inside") + fig2.update_layout( + autosize=True, + uniformtext_minsize=8, + uniformtext_mode="hide", + xaxis_tickangle=90, + showlegend=False, + ) + + # Affichage du graphique + st.plotly_chart(fig2, use_container_width=True) + + st.write("") + st.caption( + f"Note : Cette analyse se base sur les déchets qui ont pu être classés par matériau : {volume_total_categorise:.0f} Litres, soit {pct_volume_categorise:.0%} du volume total collecté." + ) + + # Ligne 4 : 2 graphiques en ligne : bar chart milieux et bar chart types déchets + l4_col1, l4_col2 = st.columns(2) + cell8 = l4_col1.container(border=True) + cell9 = l4_col2.container(border=True) + + # with cell8: + # # Création du graphique en barres avec Plotly Express + # fig3 = px.bar( + # df_volume, + # x="TYPE_MILIEU", + # y="nb_collectes", + # text="Nombre de Collectes", + # title="Nombre de Collectes par Types de Milieux", + # color="#48BEF0", + # color_discrete_map=colors_map, + # orientation='h', + # ) + + # Amélioration du graphique + # fig3.update_traces(texttemplate="%{text:.2s}", textposition="inside") + # fig3.update_layout( + # autosize=True, + # uniformtext_minsize=8, + # uniformtext_mode="hide", + # xaxis_tickangle=90, + # showlegend=False, + # ) + + # Affichage du graphique + # st.plotly_chart(fig3, use_container_width=True) + + # with cell9: + # # Création du graphique en barres avec Plotly Express + # fig4 = px.bar( + # df_volume, + # x="TYPE_DECHET", + # y="nb_collectes", + # text="Nombre de Collectes", + # title="Nombre de Collectes par Types de Déchets", + # color="#48BEF0", + # color_discrete_map=colors_map, + # ) + + # # Amélioration du graphique + # fig4.update_traces(texttemplate="%{text:.2s}", textposition="inside") + # fig4.update_layout( + # autosize=True, + # uniformtext_minsize=8, + # uniformtext_mode="hide", + # xaxis_tickangle=90, + # showlegend=False, + # ) + + # # Affichage du graphique + # st.plotly_chart(fig4, use_container_width=True) + + # Ligne 5 : 2 graphiques en ligne : line chart volume + nb collectes et Pie niveau de caractérisation + l5_col1, l5_col2 = st.columns(2) + cell10 = l5_col1.container(border=True) + cell11 = l5_col2.container(border=True) + + # with cell10: + # Création du graphique en barres volume + ligne nb de relevées avec Plotly Express + + # with cell11: + # Création du graphique en donut avec Plotly Express + + +# onglet Evenements a venir +with tab2: + st.write(f"Votre territoire : Pays - France") + + # Définition d'une fonction pour charger les evenements à venir + @st.cache_data + def load_df_events_clean() -> pd.DataFrame: + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/export_events_cleaned.csv" + ) + + # Appel des fonctions pour charger les données + df_events = load_df_events_clean() + + df_events.DATE = pd.to_datetime(df_events.DATE) + + # Filtrer les événements à venir + df_events_a_venir = df_events[df_events.DATE > (datetime.now() - timedelta(days=5))] + + # Trie les events par date + df_events_a_venir.sort_values(by="DATE", inplace=True) + + # Coord approximatives du centre de la France + coord_centre_france = [46.603354, 1.888334] + + # Code couleurs de ZDS + color_ZDS_bleu = "#003463" + color_ZDS_rouge = "#e9003f" + + # Créer la carte + map_events = folium.Map( + location=coord_centre_france, + zoom_start=6, + ) + + # Ajouter des marqueurs pour chaque événement à venir sur la carte + for idx, row in df_events_a_venir.iterrows(): + folium.Marker( + location=[row.COORD_GPS_Y, row.COORD_GPS_X], + popup=folium.Popup(row.NOM_EVENEMENT, lazy=False), + # tooltip=row.NOM_EVENEMENT, + # icon=folium.Icon(icon_color=color_ZDS_bleu) + ).add_to(map_events) + + # Afficher la liste des événements à venir avec la date affichée avant le nom + st.subheader("Actions à venir :") + + with st.container(height=500, border=False): + for idx, row in df_events_a_venir.iterrows(): + with st.container(border=True): + # Bloc contenant la date + date_block = f"

{row.DATE.day}
{row.DATE.strftime('%b')}
" + # Bloc contenant le nom de l'événement + event_block = ( + f"
{row.NOM_EVENEMENT}
" + ) + # Bloc contenant le type d'événement et le nom de la structure + type_structure_block = f"{row.TYPE_EVENEMENT} | {row.NOM_STRUCTURE}" + + # Ajout de chaque événement dans la liste + st.write( + f"
{date_block}
{event_block}{type_structure_block}
", + unsafe_allow_html=True, + ) + + # Afficher la carte avec Streamlit + st_folium = st.components.v1.html + st_folium( + folium.Figure().add_child(map_events).render(), + width=800, + height=800, ) -).to_df() - -# st.bar_chart(data=res_aggCategory_filGroup, x="categorie", y="total_dechet") - -st.altair_chart( - alt.Chart(res_aggCategory_filGroup) - .mark_bar() - .encode( - x=alt.X("categorie", sort=None, title=""), - y=alt.Y("total_dechet", title="Total de déchet"), - ), - use_container_width=True, -) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index 28dbd01..9e9156e 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -1,3 +1,5 @@ pandas==2.0.3 duckdb==0.10.0 streamlit==1.32.2 +folium==0.15.1 +plotly==5.19.0 \ No newline at end of file From e3285569d64a43a6bd53d213d3d1227f991b2223 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Wed, 17 Apr 2024 17:46:01 +0200 Subject: [PATCH 029/147] =?UTF-8?q?ajout=20filtre=20ann=C3=A9e=20positionn?= =?UTF-8?q?ement=20metrics,=20donut=20chart=20niveaux=20de=20caracterisati?= =?UTF-8?q?on=20et=20barplot=20types=20de=20milieux?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/actions.py | 323 +++++++++++--------------------- 1 file changed, 109 insertions(+), 214 deletions(-) diff --git a/dashboards/app/pages/actions.py b/dashboards/app/pages/actions.py index 249fb63..acdb778 100644 --- a/dashboards/app/pages/actions.py +++ b/dashboards/app/pages/actions.py @@ -1,13 +1,9 @@ import pandas as pd from datetime import datetime, timedelta - import plotly.express as px - import streamlit as st import folium -# from folium import IFrame - # Page setting : wide layout st.set_page_config( layout="wide", page_title="Dashboard Zéro Déchet Sauvage : onglet Actions" @@ -20,6 +16,51 @@ filtre_niveau = st.session_state.get("niveau_admin", "") filtre_collectivite = st.session_state.get("collectivite", "") +# Définition d'une fonction pour charger les données du nombre de déchets +@st.cache_data +def load_df_dict_corr_dechet_materiau(): + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" + "exploration-des-donn%C3%A9es/Exploration_visualisation/data/dict_de" + "chet_groupe_materiau.csv" + ) + + +@st.cache_data +def load_df_nb_dechet(): + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_releve_nb_dechet.csv" + ) + + +# Définition d'une fonction pour charger les autres données +@st.cache_data +def load_df_other(): + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv" + ) + + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] + + return df + + +# Appel des fonctions pour charger les données +df_nb_dechet = load_df_nb_dechet() +# df_dict_corr_dechet_materiau = load_df_dict_corr_dechet_materiau() + +# Appeler le dataframe filtré depuis le session state +if "df_other_filtre" in st.session_state: + df_other = st.session_state["df_other_filtre"].copy() +else: + df_other = load_df_other() + # Titre de l'onglet st.markdown( """# 🔎 Actions @@ -44,53 +85,6 @@ else: st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") - # Définition d'une fonction pour charger les données du nombre de déchets - @st.cache_data - def load_df_dict_corr_dechet_materiau(): - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" - "exploration-des-donn%C3%A9es/Exploration_visualisation/data/dict_de" - "chet_groupe_materiau.csv" - ) - - @st.cache_data - def load_df_nb_dechet(): - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_releve_nb_dechet.csv" - ) - - # Définition d'une fonction pour charger les autres données - @st.cache_data - def load_df_other(): - df = pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv" - ) - - # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) - df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] - df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] - - return df - - # Appel des fonctions pour charger les données - df_nb_dechet = load_df_nb_dechet() - # df_dict_corr_dechet_materiau = load_df_dict_corr_dechet_materiau() - - # Appeler le dataframe filtré depuis le session state - if "df_other_filtre" in st.session_state: - df_other = st.session_state["df_other_filtre"].copy() - else: - df_other = load_df_other() - - if "df_other_metrics_raw" in st.session_state: - df_other_metrics_raw = st.session_state["df_other_metrics_raw"].copy() - else: - df_other_metrics_raw = load_df_other() - #################### # @Valerie : J'ai comment pour éviter les errreur # Les DF sont chargés au dessus comme dans l'onglet DATA @@ -133,66 +127,25 @@ def load_df_other(): annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) - # Transformation du dataframe pour les graphiques - # Variables à conserver en ligne - cols_identifiers = [ - "ANNEE", - "TYPE_MILIEU", - "INSEE_COM", - "DEP", - "REG", - "EPCI", - "BV2022", - ] + # Filtre par année: + options = ["Aucune sélection"] + list(df_other["ANNEE"].unique()) + annee_choisie = st.selectbox("Choisissez l'année:", options, index=0) - # variables à décroiser de la base de données correspondant aux Volume global de chaque matériau - cols_volume = [k for k in df_other.columns if "GLOBAL_VOLUME_" in k] + if annee_choisie == "Aucune sélection": + df_other_filtre = df_other.copy() + + if annee_choisie != "Aucune sélection": + df_other_filtre = df_other[df_other["ANNEE"] == annee_choisie].copy() # Copie des données pour transfo - df_volume = df_other.copy() + df_events = df_other_filtre.copy() # Calcul des indicateurs clés de haut de tableau avant transformation - volume_total = df_volume["VOLUME_TOTAL"].sum() - poids_total = df_volume["POIDS_TOTAL"].sum() - volume_total_categorise = df_volume[cols_volume].sum().sum() - pct_volume_categorise = volume_total_categorise / volume_total - nb_collectes = len(df_volume) - - # estimation du poids categorisée en utilisant pct_volume_categorise - poids_total_categorise = round(poids_total * pct_volume_categorise) - - # Dépivotage du tableau pour avoir une base de données exploitable - df_volume = df_volume.melt( - id_vars=cols_identifiers, - value_vars=cols_volume, - var_name="Matériau", - value_name="Volume", - ) - - # Nettoyer le nom du Type déchet pour le rendre plus lisible - df_volume["Matériau"] = ( - df_volume["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() - ) - - # Grouper par type de matériau pour les visualisations - df_totals_sorted = df_volume.groupby(["Matériau"], as_index=False)["Volume"].sum() - df_totals_sorted = df_totals_sorted.sort_values(["Volume"], ascending=False) - - # Charte graphique MERTERRE : - colors_map = { - "Textile": "#C384B1", - "Papier": "#CAA674", - "Metal": "#A0A0A0", - "Verre": "#3DCE89", - "Autre": "#F3B900", - "Plastique": "#48BEF0", - "Caoutchouc": "#364E74", - "Bois": "#673C11", - "Papier/Carton": "#CAA674", - "Métal": "#A0A0A0", - "Verre/Céramique": "#3DCE89", - "Autre": "#F3B900", - } + volume_total = df_events["VOLUME_TOTAL"].sum() + poids_total = df_events["POIDS_TOTAL"].sum() + nombre_participants = df_events["NB_PARTICIPANTS"].sum() + nb_collectes = len(df_events) + nombre_structures = df_events["ID_STRUCTURE"].nunique() # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2, l1_col3 = st.columns(3) @@ -203,136 +156,78 @@ def load_df_other(): cell1.metric("Nombre de collectes réalisées", f"{nb_collectes}") # 2ème métrique : Nombre de Participants - # cell2 = l1_col2.container(border=True) - # poids_total = f"{poids_total:,.0f}".replace(",", " ") - - # cell2.metric("Poids total collecté", f"{poids_total} kg") + cell2 = l1_col2.container(border=True) + nombre_participants = f"{nombre_participants:,.0f}".replace(",", " ") + cell2.metric("Nombre de participants", f"{nombre_participants}") # 3ème métrique : Nombre de Structures - # cell3 = l1_col3.container(border=True) - # nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") - # cell3.metric("Nombre de collectes réalisées", f"{nb_collectes}") + cell3 = l1_col3.container(border=True) + nombre_structures = f"{nombre_structures:,.0f}".replace(",", " ") + cell3.metric("Nombre de structures", f"{nombre_structures}") - # Ligne 2 : 2 cellules avec les indicateurs clés en haut de page - l2_col1, l2_col2 = st.columns(2) + # Ligne 2 : Carte - # 1ère métrique : volume total de déchets collectés - cell4 = l2_col1.container(border=True) - # Trick pour séparer les milliers - volume_total = f"{volume_total:,.0f}".replace(",", " ") - cell4.metric("Volume de déchets collectés", f"{volume_total} litres") + # Ligne 3 : 1 graphique donut chart et un graphique barplot horizontal nombre de relevés par types de milieux + # préparation du dataframe et figure niveaux de caracterisation - # 2ème métrique : poids - cell5 = l2_col2.container(border=True) - poids_total = f"{poids_total:,.0f}".replace(",", " ") + df_carac = df_other_filtre.copy() + df_carac_counts = df_carac["NIVEAU_CARAC"].value_counts().reset_index() + df_carac_counts.columns = ["NIVEAU_CARAC", "counts"] - cell5.metric("Poids total collecté", f"{poids_total} kg") + fig1_actions = px.pie( + df_carac_counts, + values="counts", + names="NIVEAU_CARAC", + title="Répartition des niveaux de caractérisation", + hole=0.5, + ) + fig1_actions.update_traces(textposition="inside", textinfo="percent+label") - # Ligne 3 : 2 graphiques en ligne : carte relevés et bar chart matériaux - l3_col1, l3_col2 = st.columns(2) - cell6 = l3_col1.container(border=True) - cell7 = l3_col2.container(border=True) + # préparation du dataframe et figure releves types de milieux - # with cell6: - # Création de la carte - - with cell7: - # Création du graphique en barres avec Plotly Express - fig2 = px.bar( - df_totals_sorted, - x="Matériau", - y="Volume", - text="Volume", - title="Volume total par materiau (en litres)", - color="Matériau", - color_discrete_map=colors_map, - ) + df_milieux = df_other_filtre.copy() + df_milieux_counts = df_milieux["TYPE_MILIEU"].value_counts().reset_index() + df_milieux_counts.columns = ["TYPE_MILIEU", "counts"] + df_milieux_counts_sorted = df_milieux_counts.sort_values( + by="counts", ascending=True + ) - # Amélioration du graphique - fig2.update_traces(texttemplate="%{text:.2s}", textposition="inside") - fig2.update_layout( - autosize=True, - uniformtext_minsize=8, - uniformtext_mode="hide", - xaxis_tickangle=90, - showlegend=False, - ) + fig2_actions = px.bar( + df_milieux_counts_sorted, + y="TYPE_MILIEU", + x="counts", + title="Nombre de relevés par types de milieux", + text="counts", + orientation="h", + ) - # Affichage du graphique - st.plotly_chart(fig2, use_container_width=True) + l3_col1, l3_col2 = st.columns(2) + cell4 = l3_col1.container(border=True) + cell5 = l3_col2.container(border=True) - st.write("") - st.caption( - f"Note : Cette analyse se base sur les déchets qui ont pu être classés par matériau : {volume_total_categorise:.0f} Litres, soit {pct_volume_categorise:.0%} du volume total collecté." - ) + # Affichage donut + with cell4: + st.plotly_chart(fig1_actions, use_container_width=True) + + # Affichage barplot + with cell5: + st.plotly_chart(fig2_actions, use_container_width=True) + + # Ligne 3 : 2 graphiques en ligne : carte relevés et bar chart matériaux + l3_col1, l3_col2 = st.columns(2) + cell6 = l3_col1.container(border=True) + cell7 = l3_col2.container(border=True) # Ligne 4 : 2 graphiques en ligne : bar chart milieux et bar chart types déchets l4_col1, l4_col2 = st.columns(2) cell8 = l4_col1.container(border=True) cell9 = l4_col2.container(border=True) - # with cell8: - # # Création du graphique en barres avec Plotly Express - # fig3 = px.bar( - # df_volume, - # x="TYPE_MILIEU", - # y="nb_collectes", - # text="Nombre de Collectes", - # title="Nombre de Collectes par Types de Milieux", - # color="#48BEF0", - # color_discrete_map=colors_map, - # orientation='h', - # ) - - # Amélioration du graphique - # fig3.update_traces(texttemplate="%{text:.2s}", textposition="inside") - # fig3.update_layout( - # autosize=True, - # uniformtext_minsize=8, - # uniformtext_mode="hide", - # xaxis_tickangle=90, - # showlegend=False, - # ) - - # Affichage du graphique - # st.plotly_chart(fig3, use_container_width=True) - - # with cell9: - # # Création du graphique en barres avec Plotly Express - # fig4 = px.bar( - # df_volume, - # x="TYPE_DECHET", - # y="nb_collectes", - # text="Nombre de Collectes", - # title="Nombre de Collectes par Types de Déchets", - # color="#48BEF0", - # color_discrete_map=colors_map, - # ) - - # # Amélioration du graphique - # fig4.update_traces(texttemplate="%{text:.2s}", textposition="inside") - # fig4.update_layout( - # autosize=True, - # uniformtext_minsize=8, - # uniformtext_mode="hide", - # xaxis_tickangle=90, - # showlegend=False, - # ) - - # # Affichage du graphique - # st.plotly_chart(fig4, use_container_width=True) - # Ligne 5 : 2 graphiques en ligne : line chart volume + nb collectes et Pie niveau de caractérisation l5_col1, l5_col2 = st.columns(2) cell10 = l5_col1.container(border=True) cell11 = l5_col2.container(border=True) - # with cell10: - # Création du graphique en barres volume + ligne nb de relevées avec Plotly Express - - # with cell11: - # Création du graphique en donut avec Plotly Express - # onglet Evenements a venir with tab2: From c5d295aab00f446012dc63028edf93508b296ce1 Mon Sep 17 00:00:00 2001 From: linh dinh Date: Wed, 17 Apr 2024 19:50:05 +0200 Subject: [PATCH 030/147] =?UTF-8?q?Carte=20choropl=C3=A8the?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/hotspots_Linh.py | 413 ++++++++++++++++++++++++++ 1 file changed, 413 insertions(+) create mode 100644 dashboards/app/pages/hotspots_Linh.py diff --git a/dashboards/app/pages/hotspots_Linh.py b/dashboards/app/pages/hotspots_Linh.py new file mode 100644 index 0000000..7529dd2 --- /dev/null +++ b/dashboards/app/pages/hotspots_Linh.py @@ -0,0 +1,413 @@ +import streamlit as st + +# import altair as alt # Unused for now +import pandas as pd +import numpy as np +import geopandas as gpd + +# import duckdb # Unused for now +import requests +import plotly.express as px + +# To show folium maps on streamlit +import folium +from folium.plugins import MarkerCluster +from streamlit_folium import folium_static, st_folium + + +###################### +# Page configuration # +###################### +st.set_page_config( + page_title="Hotspots", + layout="wide", + initial_sidebar_state="expanded") + +###################################### +# 0/ Parameters for the hotspots tab # +###################################### + +# Data path for the df_nb_dechets +NB_DECHETS_PATH = ( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_releve_nb_dechet.csv" +) + +# Data path for the data_zds path +DATA_ZDS_PATH = ( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv" +) + +# Data path for the France regions geojson +REGION_GEOJSON_PATH = ( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" + "exploration-des-donn%C3%A9es/Exploration_visualisation/regions" + "-avec-outre-mer.geojson" +) + +# Data path for Correction +CORRECTION = ( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/" + "1-exploration-des-donn%C3%A9es/Exploration_visualisation/data/" + "releves_corrects_surf_lineaire.xlsx" +) + + +# Data path for Data Spot +DATA_SPOT = ( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/" + "raw/1-exploration-des-donn%C3%A9es/Exploration_visualisation" + "/data/export_structures_29022024.xlsx" +) + +# Params for the adopted spots map filters +ADOPTED_SPOTS_FILTERS_PARAMS = [ + { + "filter_col": "REGION", + "filter_message": "Sélectionnez une région:\n(par défaut votre région)", + }, + {"filter_col": "TYPE_MILIEU", "filter_message": "Sélectionnez un milieu :"}, +] + +########################################################################### +# 0 bis/ Fonctions utilitaires : peuvent être utilisées par tout le monde # +########################################################################### + + +def construct_query_string(bound_word=" and ", **params) -> str: + """Construct a query string in the right format for the pandas 'query' + function. The different params are bounded together in the query string with the + bound word given by default. If one of the params is 'None', it is not + included in the final query string.""" + + # Instanciate query string + query_string = "" + + # Iterate over the params to construct the query string + for param_key, param in params.items(): + # Construct the param sub string if the param is not 'None' + if param: + query_sub_string = f'{param_key} == "{param}"' + + # Add to the query string + query_string += f"{query_sub_string}{bound_word}" + + # Strip any remaining " and " at the end of the query string + return query_string.strip(bound_word) + + +def scalable_filters(data_zds: pd.DataFrame, filters_params=ADOPTED_SPOTS_FILTERS_PARAMS) -> dict: + """Create Streamlit select box filters in the sidebar as specified by the filters_params list. + Create and return the filter dict used to filter the hotspots maps accordingly.""" + + filter_dict = {} + + with st.sidebar: + for filter_params in filters_params: + column = filter_params['filter_col'] + message = filter_params['filter_message'] + + # Check if the message contains a newline character + if '\n' in message: + # Split the message at the newline character + main_message, sub_message = message.split('\n') + st.markdown(f"**{main_message}**") # Display the main part as bold text + st.caption(sub_message) # Display the secondary part as caption + else: + st.markdown(f"**{message}**") # If no newline, display the message as bold text + + # No newline in the selectbox label, so we pass only the main_message + selected_value = st.selectbox("", data_zds[column].unique()) + + filter_dict[column] = selected_value + + return filter_dict + + +############################## +# 1/ Import and prepare data # +############################## + +# Load all regions from the GeoJSON file +regions = gpd.read_file(REGION_GEOJSON_PATH) + +# nb dechets : Unused for now +df_nb_dechets = pd.read_csv(NB_DECHETS_PATH) + +# data_zds : main source of data for the hotspots tab +data_zds = pd.read_csv(DATA_ZDS_PATH) + +# correction : corrected data for density map +correction = pd.read_excel(CORRECTION) + +# spot: +spot = pd.read_excel(DATA_SPOT) + +# Fusion and correction +data_correct = pd.merge(data_zds, correction, on='ID_RELEVE', how='left') +data_correct = data_correct[data_correct['SURFACE_OK'] == 'OUI'] +data_zds = data_correct[data_correct['VOLUME_TOTAL'] > 0] + +# Calculate the total VOLUME_TOTAL for each region without removing duplicate data +volume_total_sums = data_zds.groupby('LIEU_REGION')['VOLUME_TOTAL'].sum().reset_index() + +# Merge the waste data and the geographical data +volume_total_sums = pd.merge(regions, volume_total_sums, left_on='nom', right_on='LIEU_REGION', how='left') + +# Remove rows containing NaN +volume_total_sums = volume_total_sums.dropna() + +# Remove duplicate data and calculate SURFACE total +data_unique = data_zds.drop_duplicates(subset=['LIEU_COORD_GPS']) +surface_total_sums = data_unique.groupby('LIEU_REGION')['SURFACE'].sum().reset_index() + +# Combine two datasets and calculate DENSITE +data_choropleth_sums = pd.merge(volume_total_sums, surface_total_sums, on='LIEU_REGION') +data_choropleth_sums['DENSITE'] = data_choropleth_sums['VOLUME_TOTAL'] / data_choropleth_sums['SURFACE'] + +################## +# 2/ Hotspot tab # +################## + +# Tab title +st.markdown("""# 🔥 Hotspots : **Quelles sont les zones les plus impactées ?**""") + +################################ +# 2.1/ Carte des spots adoptés # +################################ + +# Create the filter dict for the adopted spots map and the streamlit filter boxes +filter_dict = scalable_filters(data_zds) + +# Create the map of the adopted spots +def plot_adopted_waste_spots( + data_zds: pd.DataFrame, + filter_dict: dict, + region_geojson_path: str, +) -> folium.Map: + """Show a folium innteractive map of adopted spots within a selected region, + filtered by environments of deposit. + Arguments: + - data_zds: The waste dataframe + - filter_dict: dictionary mapping the name of the column in the waste df and the value you want to filter by + """ + # 1/ Create the waste geodataframe # + + # Create a GeoDataFrame for waste points + gdf = gpd.GeoDataFrame( + data_zds, + geometry=gpd.points_from_xy( + data_zds["LIEU_COORD_GPS_X"], data_zds["LIEU_COORD_GPS_Y"] + ), + crs="EPSG:4326", + ) + + # Construct the query string + query_string = construct_query_string(**filter_dict) + + # Filter the geodataframe by region and by environment + gdf_filtered = gdf.query(query_string) + + # 2/ Create the regions geodataframe # + + # Unpack the region name + region = filter_dict["REGION"] + + # Load France regions from a GeoJSON file + regions = gpd.read_file(region_geojson_path) + regions = regions.loc[regions["nom"] == region, :] + + # Filter the region geodataframe for the specified region + selected_region = regions[regions["nom"].str.lower() == region.lower()] + if selected_region.empty: + raise KeyError(f"Region '{region}' not found.") + + # 3/ Initialize folium map # + + # Initialize a folium map, centered around the mean location of the waste points + map_center = [gdf_filtered.geometry.y.mean(), gdf_filtered.geometry.x.mean()] + + # Catch ValueError if the filtered geodataframe contain no rows + try: + m = folium.Map( + location=map_center, zoom_start=5 + ) # Adjust zoom_start as needed for the best initial view + + # Return None if ValueError + except ValueError as e: + st.markdown( + "Il n'y a pas de hotspots pour les valeurs de filtres selectionnés !" + ) + return + + # 4/ Add the markers # + + # Use MarkerCluster to manage markers if dealing with a large number of points + marker_cluster = MarkerCluster().add_to(m) + + # Add each waste point as a marker on the folium map + for _, row in gdf_filtered.iterrows(): + # Define the marker color: green for adopted spots, red for others + marker_color = "darkgreen" if row["SPOT_A1S"] else "red" + # Define the icon: check-circle for adopted, info-sign for others + icon_type = "check-circle" if row["SPOT_A1S"] else "info-sign" + + folium.Marker( + location=[row.geometry.y, row.geometry.x], + popup=f"Zone: {row['NOM_ZONE']}
Date: {row['DATE']}
Volume: {row['VOLUME_TOTAL']} litres", + icon=folium.Icon(color=marker_color, icon=icon_type, prefix="fa"), + ).add_to(marker_cluster) + + # 5/ Add the region boundary # + + # Add the region boundary to the map for context + folium.GeoJson( + selected_region, + name="Region Boundary", + style_function=lambda feature: { + "weight": 2, + "fillOpacity": 0.1, + }, + ).add_to(m) + + return m + + +######################################################## +# 2.1/ Carte densité de déchets sur les zones étudiées # +######################################################## + + + +######################################################## +# 2.2/ Carte choropleth de la densité de déchets # +######################################################## + +def plot_waste_density_choropleth( + data_zds: pd.DataFrame, + region_geojson_path: str, +) -> folium.Map: + + # Load all regions from the GeoJSON file + regions = gpd.read_file(region_geojson_path) + + # Calculate the total VOLUME_TOTAL for each region without removing duplicate data + volume_total_sums = data_zds.groupby('LIEU_REGION')['VOLUME_TOTAL'].sum().reset_index() + + # Merge the waste data and the geographical data + volume_total_sums = pd.merge(regions, volume_total_sums, left_on='nom', right_on='LIEU_REGION', how='left') + + # Remove rows containing NaN + volume_total_sums = volume_total_sums.dropna() + + # Remove duplicate data and calculate SURFACE total + data_unique = data_zds.drop_duplicates(subset=['LIEU_COORD_GPS']) + surface_total_sums = data_unique.groupby('LIEU_REGION')['SURFACE'].sum().reset_index() + + # Combine two datasets and calculate DENSITE + data_choropleth_sums = pd.merge(volume_total_sums, surface_total_sums, on='LIEU_REGION') + data_choropleth_sums['DENSITE'] = data_choropleth_sums['VOLUME_TOTAL'] / data_choropleth_sums['SURFACE'] + + # Set bins for the choropleth + min_bin = data_choropleth_sums['DENSITE'][data_choropleth_sums['DENSITE'] > 0].min() + min_bin = max(min_bin, 1e-10) + max_bin = data_choropleth_sums['DENSITE'].max() * 1.01 + num_bins = 6 + bins = np.logspace(np.log10(min_bin), np.log10(max_bin), num_bins) + + # Initialize the map centered on France + map_center = [46.2276, 2.2137] # Coordinates for France + m = folium.Map(location=map_center, zoom_start=6) + + # Create the choropleth map + folium.Choropleth( + geo_data=regions.to_json(), + name='Densité de Déchets', + data=data_choropleth_sums, + columns=['LIEU_REGION', 'DENSITE'], + key_on='feature.properties.nom', + fill_color='Reds', + bins=bins, + fill_opacity=0.7, + line_opacity=0.2, + legend_name='Densité de Déchets' + ).add_to(m) + + return m + + + +def make_density_choropleth(data_choropleth_sums, region_geojson_path): + # Load all regions from the GeoJSON file + regions_geojson = requests.get(region_geojson_path).json() + + # Set bins for the choropleth + min_density = data_choropleth_sums['DENSITE'].min() + max_density = data_choropleth_sums['DENSITE'].max() + + # Create the choropleth map using Plotly Express + choropleth = px.choropleth( + data_choropleth_sums, + geojson=regions_geojson, + featureidkey="properties.nom", + locations='LIEU_REGION', + color='DENSITE', + color_continuous_scale='Reds', + #color_continuous_midpoint=np.median(data_choropleth_sums['DENSITE']), + #range_color=(min_density, max_density), + labels={'DENSITE': 'Densité de Déchets'} + ) + + # Update layout to fit the map to the boundaries of the GeoJSON + choropleth.update_layout( + geo=dict( + fitbounds="locations", + visible=False + ), + margin=dict(l=0, r=0, t=0, b=0) + ) + + # Disable axis ticks and labels and set country borders to red + choropleth.update_geos( + resolution=50, + showcountries=True, countrycolor="red" + ) + + # Disable the display of other countries' borders + choropleth.update_geos( + showcountries=False, + showcoastlines=False, + showland=False, + showocean=False + ) + + return choropleth + + + +####################### +# Dashboard Main Panel# +####################### + +col = st.columns((1.5, 4.5, 2), gap='medium') + +# Construct the map +with col[1]: + st.markdown('### Spots Adoptés') + m = plot_adopted_waste_spots(data_zds, filter_dict, REGION_GEOJSON_PATH) + # Show the adopted spots map on the streamlit tab + if m: + folium_static(m) + + st.markdown('### Densité des déchets') + choropleth = make_density_choropleth(data_choropleth_sums, REGION_GEOJSON_PATH) + st.plotly_chart(choropleth, use_container_width=True) + + + st.markdown('### Densité des déchets') + m = plot_waste_density_choropleth(data_zds, REGION_GEOJSON_PATH) + if m: + folium_static(m) From b34796e1b8a8b4d2176f119229609b8cbba01f65 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Thu, 18 Apr 2024 16:52:56 +0200 Subject: [PATCH 031/147] =?UTF-8?q?partie=20des=20modifications=20demand?= =?UTF-8?q?=C3=A9es=20pour=20cet=20onglet=20(voir=20issue)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 119 ++++++++++++++--------------------- 1 file changed, 46 insertions(+), 73 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index ef70db2..d4a1a8f 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -105,7 +105,7 @@ def load_df_dict_corr_dechet_materiau(): poids_total = df_volume["POIDS_TOTAL"].sum() volume_total_categorise = df_volume[cols_volume].sum().sum() pct_volume_categorise = volume_total_categorise / volume_total - nb_collectes = len(df_volume) + nb_collectes_int = len(df_volume) # estimation du poids categorisée en utilisant pct_volume_categorise poids_total_categorise = round(poids_total * pct_volume_categorise) @@ -162,9 +162,23 @@ def load_df_dict_corr_dechet_materiau(): # 3ème métrique : nombre de relevés cell3 = l1_col3.container(border=True) - nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") + nb_collectes = f"{nb_collectes_int:,.0f}".replace(",", " ") cell3.metric("Nombre de collectes réalisées", f"{nb_collectes}") + # Message d'avertissement nb de collectes en dessous de 5 + if nb_collectes_int == 1: + st.warning( + "⚠️ Il n'y a qu' " + + str(nb_collectes_int) + + " collecte considérées dans les données présentées." + ) + elif nb_collectes_int <= 5: + st.warning( + "⚠️ Il n'y a que " + + str(nb_collectes_int) + + " collectes considérées dans les données présentées." + ) + # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux l2_col1, l2_col2 = st.columns(2) @@ -241,8 +255,8 @@ def load_df_dict_corr_dechet_materiau(): color_discrete_map=colors_map, ) fig3.update_layout(bargap=0.2, height=500) - fig3.update_layout(yaxis_title="% du volume collecté", xaxis_title=None) + fig3.update_xaxes(tickangle=-45) # Afficher le graphique with st.container(border=True): @@ -398,6 +412,20 @@ def load_df_dict_corr_dechet_materiau(): nombre_collectes_filtered = f"{len(df_filtered):,.0f}".replace(",", " ") cell8.metric("Nombre de collectes", f"{nombre_collectes_filtered}") + # Message d'avertissement nb de collectes en dessous de 5 + if len(df_filtered) == 1: + st.warning( + "⚠️ Il n'y a qu' " + + str(len(df_filtered)) + + " collecte considérées dans les données présentées." + ) + elif len(df_filtered) <= 5: + st.warning( + "⚠️ Il n'y a que " + + str(len(df_filtered)) + + " collectes considérées dans les données présentées." + ) + # Étape 3: Preparation dataframe pour graphe # Copie des données pour transfo df_volume2 = df_filtered.copy() @@ -430,41 +458,26 @@ def load_df_dict_corr_dechet_materiau(): df_totals_sorted2 = df_totals_sorted2.sort_values(["Volume"], ascending=False) # Étape 4: Création du Graphique - if not df_filtered.empty: - fig4 = px.pie( - df_totals_sorted2, - values="Volume", - names="Matériau", - title="Répartition des matériaux en volume", - hole=0.4, - color="Matériau", - color_discrete_map=colors_map, - ) - - # Amélioration de l'affichage - fig4.update_traces(textinfo="percent") - fig4.update_layout(autosize=True, legend_title_text="Matériau") - with st.container(border=True): - st.plotly_chart(fig4, use_container_width=True) - else: - st.write("Aucune donnée à afficher pour les filtres sélectionnés.") - # 2ème option de graphique, à choisir if not df_filtered.empty: - fig5 = px.treemap( + fig4 = px.treemap( df_totals_sorted2, path=["Matériau"], values="Volume", - title="2ème option : treemap de répartition des matériaux en volume", + title="Répartition des matériaux en volume", color="Matériau", color_discrete_map=colors_map, ) - fig5.update_layout( + fig4.update_layout( margin=dict(t=50, l=25, r=25, b=25), autosize=True, height=600 ) - fig5.update_traces(textinfo="label+value") + fig4.update_traces( + textinfo="label+value", + textfont=dict(size=16, family="Arial", color="black"), + ) with st.container(border=True): - st.plotly_chart(fig5, use_container_width=True) + st.plotly_chart(fig4, use_container_width=True) + else: st.write("Aucune donnée à afficher pour les filtres sélectionnés.") @@ -500,15 +513,6 @@ def load_df_dict_corr_dechet_materiau(): f"{volume_total_categorise} litres", ) - # # 2ème métrique : poids - # cell2 = l1_col2.container(border=True) - # poids_total_categorise = f"{poids_total_categorise:,.0f}".replace(",", " ") - # # poids_total = f"{poids_total:,.0f}".replace(",", " ") - # cell2.metric( - # "Poids estimé de déchets categorisés", - # f"{poids_total_categorise} kg", - # ) - # 3ème métrique : nombre de relevés cell3 = l1_col3.container(border=True) # nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") @@ -536,48 +540,27 @@ def load_df_dict_corr_dechet_materiau(): # Preparation de la figure barplot df_top10_dechets.reset_index(inplace=True) # Création du graphique en barres avec Plotly Express - fig = px.bar( - df_top10_dechets, - x="categorie", - y="nb_dechet", - labels={"categorie": "Dechet", "nb_dechet": "Nombre total"}, - title="Top 10 dechets ramassés", - color="Materiau", - color_discrete_map=colors_map, - category_orders={"categorie": df_top10_dechets["categorie"].tolist()}, - text_auto=True, - ) - fig.update_layout(yaxis_type="log") - # Amélioration du visuel du graphique - fig.update_layout( - width=1400, - height=900, - uniformtext_minsize=8, - uniformtext_mode="hide", - xaxis_tickangle=90, - ) - - fig_alt = px.bar( + fig5 = px.bar( df_top10_dechets, y="categorie", x="nb_dechet", labels={"categorie": "Dechet", "nb_dechet": "Nombre total"}, - title="Top 10 dechets ramassés (alternative)", + title="Top 10 dechets ramassés ", text="nb_dechet", color="Materiau", color_discrete_map=colors_map, category_orders={"categorie": df_top10_dechets["categorie"].tolist()}, ) - fig_alt.update_layout(xaxis_type="log") + fig5.update_layout(xaxis_type="log") # Amélioration du visuel du graphique - fig_alt.update_traces( + fig5.update_traces( # texttemplate="%{text:.2f}", textposition="inside", textfont_color="white", textfont_size=20, ) - fig_alt.update_layout( + fig5.update_layout( width=1400, height=900, uniformtext_minsize=8, @@ -589,17 +572,7 @@ def load_df_dict_corr_dechet_materiau(): del df_top10_dechets["Materiau"] with st.container(border=True): - col1, col2 = st.columns([3, 1]) - - with col1: - st.plotly_chart(fig, use_container_width=True) - st.plotly_chart(fig_alt, use_container_width=True) - - with col2: - st.write("Nombre ramassé pour chaque déchet") - for index, row in df_top10_dechets.iterrows(): - value = f"{row['nb_dechet']:,.0f}".replace(",", " ") - st.metric(label=row["categorie"], value=value) + st.plotly_chart(fig5, use_container_width=True) st.write("") st.caption( From 1219923a8bbc0b46918d5a9c3f874b37980b552e Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Thu, 18 Apr 2024 17:17:20 +0200 Subject: [PATCH 032/147] Ajout carte --- dashboards/app/pages/actions.py | 39 +++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/dashboards/app/pages/actions.py b/dashboards/app/pages/actions.py index acdb778..6f6d3bf 100644 --- a/dashboards/app/pages/actions.py +++ b/dashboards/app/pages/actions.py @@ -166,6 +166,45 @@ def load_df_other(): cell3.metric("Nombre de structures", f"{nombre_structures}") # Ligne 2 : Carte + with st.container(): + # Création du DataFrame de travail pour la carte + df_map_evnenements = df_other_filtre.copy() + # Création de la carte centrée autour d'une localisation + # Calcul des limites à partir de vos données + min_lat = df_map_evnenements["LIEU_COORD_GPS_Y"].min() + max_lat = df_map_evnenements["LIEU_COORD_GPS_Y"].max() + min_lon = df_map_evnenements["LIEU_COORD_GPS_X"].min() + max_lon = df_map_evnenements["LIEU_COORD_GPS_X"].max() + + map_evenements = folium.Map( + location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], + zoom_start=8, + tiles="OpenStreetMap", + ) + # Facteur de normalisation pour ajuster la taille des bulles + normalisation_facteur = 100 + for index, row in df_map_evnenements.iterrows(): + # Application de la normalisation + radius = row["NB_PARTICIPANTS"] / normalisation_facteur + + # Application d'une limite minimale pour le rayon si nécessaire + radius = max(radius, 5) + + folium.CircleMarker( + location=(row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]), + radius=radius, # Utilisation du rayon ajusté + popup=f"{row['NOM_ZONE']}, {row['LIEU_VILLE']}, {row['NOM_EVENEMENT']}, {row['DATE']} : nombre de participants : {row['NB_PARTICIPANTS']}", + color="#3186cc", + fill=True, + fill_color="#3186cc", + ).add_to(map_evenements) + + # Affichage de la carte Folium dans Streamlit + st_folium = st.components.v1.html + st_folium( + folium.Figure().add_child(map_evenements).render(), # , width=1400 + height=750, + ) # Ligne 3 : 1 graphique donut chart et un graphique barplot horizontal nombre de relevés par types de milieux # préparation du dataframe et figure niveaux de caracterisation From 0cdabe9985bd38a8447254ebd1d40797114574ac Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Thu, 18 Apr 2024 17:26:30 +0200 Subject: [PATCH 033/147] Modification taille minimale des points sur la carte et deplacement de la legende du barplot --- dashboards/app/pages/data.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index d4a1a8f..4e88f50 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -566,6 +566,7 @@ def load_df_dict_corr_dechet_materiau(): uniformtext_minsize=8, uniformtext_mode="hide", xaxis_tickangle=90, + legend=dict(x=1, y=0, xanchor="right", yanchor="bottom"), ) # Suppression de la colonne categorie @@ -601,7 +602,7 @@ def load_df_dict_corr_dechet_materiau(): min_lon = df_map_data["LIEU_COORD_GPS_X"].min() max_lon = df_map_data["LIEU_COORD_GPS_X"].max() - map_paca = folium.Map( + map_data = folium.Map( location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], zoom_start=8, tiles="OpenStreetMap", @@ -615,7 +616,7 @@ def load_df_dict_corr_dechet_materiau(): radius = row["nb_dechet"] / normalisation_facteur # Application d'une limite minimale pour le rayon si nécessaire - radius = max(radius, 1) + radius = max(radius, 5) folium.CircleMarker( location=(row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]), @@ -624,12 +625,12 @@ def load_df_dict_corr_dechet_materiau(): color="#3186cc", fill=True, fill_color="#3186cc", - ).add_to(map_paca) + ).add_to(map_data) # Affichage de la carte Folium dans Streamlit st_folium = st.components.v1.html st_folium( - folium.Figure().add_child(map_paca).render(), # , width=1400 + folium.Figure().add_child(map_data).render(), # , width=1400 height=750, ) From 4dca7a7ea82202de9553a37c3c5a1ff49a019547 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Thu, 18 Apr 2024 17:31:54 +0200 Subject: [PATCH 034/147] Ajout d'un stop pour le bug en cas de non selection de territoire --- dashboards/app/pages/data.py | 1 + 1 file changed, 1 insertion(+) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 4e88f50..f1be7c2 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -53,6 +53,7 @@ def load_df_dict_corr_dechet_materiau(): dans l'onglet Home pour afficher les données. :warning: """ ) + st.stop() else: df_other = st.session_state["df_other_filtre"].copy() df_nb_dechet = st.session_state["df_nb_dechets_filtre"].copy() From 1ddc10bec3dbaf5ab085f92149f3bf95cc6771d9 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Thu, 18 Apr 2024 20:41:01 +0200 Subject: [PATCH 035/147] changement nom Dockerfile --- dashboards/{dockerfile => Dockerfile} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename dashboards/{dockerfile => Dockerfile} (100%) diff --git a/dashboards/dockerfile b/dashboards/Dockerfile similarity index 100% rename from dashboards/dockerfile rename to dashboards/Dockerfile From f513668e5c7c70f5395fb3729bf6ee482c348add Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Fri, 19 Apr 2024 08:50:28 +0200 Subject: [PATCH 036/147] =?UTF-8?q?suite=20modifications=20demand=C3=A9es?= =?UTF-8?q?=20par=20merterre=20(voir=20issue)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 70 ++++++++++++++++++++++++------------ 1 file changed, 48 insertions(+), 22 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index f1be7c2..6b3de88 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -24,9 +24,7 @@ ) if filtre_niveau == "" and filtre_collectivite == "": - st.write( - "Aucune sélection de territoire n'ayant été effectuée les données sont globales" - ) + st.write("Aucune sélection de territoire n'a été effectuée") else: st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") @@ -164,7 +162,7 @@ def load_df_dict_corr_dechet_materiau(): # 3ème métrique : nombre de relevés cell3 = l1_col3.container(border=True) nb_collectes = f"{nb_collectes_int:,.0f}".replace(",", " ") - cell3.metric("Nombre de collectes réalisées", f"{nb_collectes}") + cell3.metric("Nombre de collectes comptabilisées", f"{nb_collectes}") # Message d'avertissement nb de collectes en dessous de 5 if nb_collectes_int == 1: @@ -516,9 +514,22 @@ def load_df_dict_corr_dechet_materiau(): # 3ème métrique : nombre de relevés cell3 = l1_col3.container(border=True) - # nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") cell3.metric("Nombre de collectes comptabilisées", f"{nb_collectes}") + # Message d'avertissement nb de collectes en dessous de 5 + if nb_collectes_int == 1: + st.warning( + "⚠️ Il n'y a qu' " + + str(nb_collectes) + + " collecte considérées dans les données présentées." + ) + elif nb_collectes_int <= 5: + st.warning( + "⚠️ Il n'y a que " + + str(nb_collectes) + + " collectes considérées dans les données présentées." + ) + # Ligne 2 : graphique top déchets # Filtration des données pour nb_dechets @@ -767,10 +778,11 @@ def load_df_dict_corr_dechet_materiau(): nb_dechet_marque = marque_df["nb_dechet"].sum() nb_marques = len(top_marque_df["Marque"].unique()) + collectes = len(df_filtered) # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page - l1_col1, l1_col2 = st.columns(2) + l1_col1, l1_col2, l1_col3 = st.columns(3) # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) # 1ère métrique : volume total de déchets collectés cell1 = l1_col1.container(border=True) @@ -789,6 +801,30 @@ def load_df_dict_corr_dechet_materiau(): "Nombre de secteurs identifiés lors des collectes", f"{nb_secteurs} secteurs", ) + + # 3ème métrique : nombre de collectes + cell3 = l1_col3.container(border=True) + collectes_formatted = f"{collectes:,.0f}".replace(",", " ") + cell3.metric( + "Nombre de collectes comptabilisées", + f"{collectes_formatted} collectes", + ) + + # Message d'avertissement nb de collectes en dessous de 5 + if collectes == 1: + st.warning( + "⚠️ Il n'y a qu' " + + str(collectes) + + " collecte considérées dans les données présentées." + ) + elif collectes <= 5: + st.warning( + "⚠️ Il n'y a que " + + str(collectes) + + " collectes considérées dans les données présentées." + ) + + # Ligne 2 : 3 cellules avec les indicateurs clés en bas de page colors_map_secteur = { "AGRICULTURE": "#156644", "ALIMENTATION": "#F7D156", @@ -856,32 +892,22 @@ def load_df_dict_corr_dechet_materiau(): "Nombre de marques identifiés lors des collectes", f"{nb_marques} marques", ) - colors_map_marque = { - "HEINEKEN": "#F7D156", - "COCA-COLA": "#F7D156", - "MARLBORO": "#E9003F", - "CRISTALINE": "#F7D156", - "PHILIP MORRIS": "#E9003F", - "CAPRI-SUN": "#F7D156", - "OASIS": "#F7D156", - "1664": "#F7D156", - "WINSTON": "#E9003F", - "RED BULL": "#F7D156", - } fig_marque = px.bar( - top_marque_df.tail(10).sort_values(by="Nombre de déchets", ascending=False), + top_marque_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), x="Nombre de déchets", y="Marque", title="Top 10 des marques les plus ramassées", - color="Marque", + color_discrete_sequence=["#1951A0"], orientation="h", - color_discrete_map=colors_map_marque, text_auto=False, + text=top_marque_df.tail(10)["Marque"] + + ": " + + top_marque_df.tail(10)["Nombre de déchets"].astype(str), ) # add log scale to x axis fig_marque.update_layout(xaxis_type="log") - fig_marque.update_traces(texttemplate="%{value:.0f}", textposition="inside") + # fig_marque.update_traces(texttemplate="%{value:.0f}", textposition="inside") fig_marque.update_layout( width=800, From 44c6af9c73e33b2f569da7fe8c8914209f2a020b Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Fri, 19 Apr 2024 09:07:10 +0200 Subject: [PATCH 037/147] ajout message d'erreur si pas de selection de territoire --- dashboards/app/pages/actions.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/dashboards/app/pages/actions.py b/dashboards/app/pages/actions.py index 6f6d3bf..ea669a4 100644 --- a/dashboards/app/pages/actions.py +++ b/dashboards/app/pages/actions.py @@ -52,14 +52,18 @@ def load_df_other(): # Appel des fonctions pour charger les données -df_nb_dechet = load_df_nb_dechet() -# df_dict_corr_dechet_materiau = load_df_dict_corr_dechet_materiau() -# Appeler le dataframe filtré depuis le session state -if "df_other_filtre" in st.session_state: - df_other = st.session_state["df_other_filtre"].copy() +# Appeler les dataframes volumes et nb_dechets filtré depuis le session state +if "df_other_filtre" not in st.session_state: + st.write( + """ + ### :warning: Merci de sélectionner une collectivité\ + dans l'onglet Home pour afficher les données. :warning: + """ + ) + st.stop() else: - df_other = load_df_other() + df_other = st.session_state["df_other_filtre"].copy() # Titre de l'onglet st.markdown( @@ -239,6 +243,7 @@ def load_df_other(): text="counts", orientation="h", ) + fig2_actions.update_layout(xaxis_title="", yaxis_title="") l3_col1, l3_col2 = st.columns(2) cell4 = l3_col1.container(border=True) From d6dd406ef3ca718c346d7c265de6c811ef648255 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Fri, 19 Apr 2024 09:11:31 +0200 Subject: [PATCH 038/147] harmonisation requirements.txt onglets --- dashboards/app/requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index 9b2e557..d2adc42 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -4,3 +4,5 @@ folium==0.16.0 duckdb==0.10.0 streamlit==1.32.2 streamlit-folium +folium==0.15.1 +plotly==5.19.0 From a1eedcba197dad18889ffb4e422245876e09ce22 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Fri, 19 Apr 2024 09:12:34 +0200 Subject: [PATCH 039/147] harmonisation requirements.txt onglets --- dashboards/app/requirements.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index 9e9156e..d2adc42 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -1,5 +1,8 @@ pandas==2.0.3 +geopandas==0.14.3 +folium==0.16.0 duckdb==0.10.0 streamlit==1.32.2 +streamlit-folium folium==0.15.1 -plotly==5.19.0 \ No newline at end of file +plotly==5.19.0 From c3d3d021375aa129591ed18a4d6fe445bff9171f Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Fri, 19 Apr 2024 09:13:23 +0200 Subject: [PATCH 040/147] harmonisation requirements.txt onglets --- dashboards/app/requirements.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index 9e9156e..d2adc42 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -1,5 +1,8 @@ pandas==2.0.3 +geopandas==0.14.3 +folium==0.16.0 duckdb==0.10.0 streamlit==1.32.2 +streamlit-folium folium==0.15.1 -plotly==5.19.0 \ No newline at end of file +plotly==5.19.0 From 1b81f858a66bf03837b888443b2f6d233a3d13dc Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Fri, 19 Apr 2024 09:22:18 +0200 Subject: [PATCH 041/147] rectification harmonisation requirements.txt onglets --- dashboards/app/requirements.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index d2adc42..e75f8b9 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -3,6 +3,5 @@ geopandas==0.14.3 folium==0.16.0 duckdb==0.10.0 streamlit==1.32.2 -streamlit-folium -folium==0.15.1 +streamlit-folium==0.19.1 plotly==5.19.0 From 0fa9023d2234822b8702bb00ef9ee7933a4770a1 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Fri, 19 Apr 2024 09:23:32 +0200 Subject: [PATCH 042/147] rectification harmonisation requirements.txt onglets --- dashboards/app/requirements.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index d2adc42..e75f8b9 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -3,6 +3,5 @@ geopandas==0.14.3 folium==0.16.0 duckdb==0.10.0 streamlit==1.32.2 -streamlit-folium -folium==0.15.1 +streamlit-folium==0.19.1 plotly==5.19.0 From 4b6c95b6c0c90fecabd61744ef2dd31e71ac3a3e Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Fri, 19 Apr 2024 09:24:09 +0200 Subject: [PATCH 043/147] rectification harmonisation requirements.txt onglets --- dashboards/app/requirements.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index d2adc42..e75f8b9 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -3,6 +3,5 @@ geopandas==0.14.3 folium==0.16.0 duckdb==0.10.0 streamlit==1.32.2 -streamlit-folium -folium==0.15.1 +streamlit-folium==0.19.1 plotly==5.19.0 From 599043e29c263ebab89718a1142d12fc12cea915 Mon Sep 17 00:00:00 2001 From: linh-dinh-1012 <147145870+linh-dinh-1012@users.noreply.github.com> Date: Fri, 19 Apr 2024 10:08:03 +0200 Subject: [PATCH 044/147] Delete dashboards/app/pages/hotspots_Linh.py --- dashboards/app/pages/hotspots_Linh.py | 413 -------------------------- 1 file changed, 413 deletions(-) delete mode 100644 dashboards/app/pages/hotspots_Linh.py diff --git a/dashboards/app/pages/hotspots_Linh.py b/dashboards/app/pages/hotspots_Linh.py deleted file mode 100644 index 7529dd2..0000000 --- a/dashboards/app/pages/hotspots_Linh.py +++ /dev/null @@ -1,413 +0,0 @@ -import streamlit as st - -# import altair as alt # Unused for now -import pandas as pd -import numpy as np -import geopandas as gpd - -# import duckdb # Unused for now -import requests -import plotly.express as px - -# To show folium maps on streamlit -import folium -from folium.plugins import MarkerCluster -from streamlit_folium import folium_static, st_folium - - -###################### -# Page configuration # -###################### -st.set_page_config( - page_title="Hotspots", - layout="wide", - initial_sidebar_state="expanded") - -###################################### -# 0/ Parameters for the hotspots tab # -###################################### - -# Data path for the df_nb_dechets -NB_DECHETS_PATH = ( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_releve_nb_dechet.csv" -) - -# Data path for the data_zds path -DATA_ZDS_PATH = ( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv" -) - -# Data path for the France regions geojson -REGION_GEOJSON_PATH = ( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" - "exploration-des-donn%C3%A9es/Exploration_visualisation/regions" - "-avec-outre-mer.geojson" -) - -# Data path for Correction -CORRECTION = ( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/" - "1-exploration-des-donn%C3%A9es/Exploration_visualisation/data/" - "releves_corrects_surf_lineaire.xlsx" -) - - -# Data path for Data Spot -DATA_SPOT = ( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/" - "raw/1-exploration-des-donn%C3%A9es/Exploration_visualisation" - "/data/export_structures_29022024.xlsx" -) - -# Params for the adopted spots map filters -ADOPTED_SPOTS_FILTERS_PARAMS = [ - { - "filter_col": "REGION", - "filter_message": "Sélectionnez une région:\n(par défaut votre région)", - }, - {"filter_col": "TYPE_MILIEU", "filter_message": "Sélectionnez un milieu :"}, -] - -########################################################################### -# 0 bis/ Fonctions utilitaires : peuvent être utilisées par tout le monde # -########################################################################### - - -def construct_query_string(bound_word=" and ", **params) -> str: - """Construct a query string in the right format for the pandas 'query' - function. The different params are bounded together in the query string with the - bound word given by default. If one of the params is 'None', it is not - included in the final query string.""" - - # Instanciate query string - query_string = "" - - # Iterate over the params to construct the query string - for param_key, param in params.items(): - # Construct the param sub string if the param is not 'None' - if param: - query_sub_string = f'{param_key} == "{param}"' - - # Add to the query string - query_string += f"{query_sub_string}{bound_word}" - - # Strip any remaining " and " at the end of the query string - return query_string.strip(bound_word) - - -def scalable_filters(data_zds: pd.DataFrame, filters_params=ADOPTED_SPOTS_FILTERS_PARAMS) -> dict: - """Create Streamlit select box filters in the sidebar as specified by the filters_params list. - Create and return the filter dict used to filter the hotspots maps accordingly.""" - - filter_dict = {} - - with st.sidebar: - for filter_params in filters_params: - column = filter_params['filter_col'] - message = filter_params['filter_message'] - - # Check if the message contains a newline character - if '\n' in message: - # Split the message at the newline character - main_message, sub_message = message.split('\n') - st.markdown(f"**{main_message}**") # Display the main part as bold text - st.caption(sub_message) # Display the secondary part as caption - else: - st.markdown(f"**{message}**") # If no newline, display the message as bold text - - # No newline in the selectbox label, so we pass only the main_message - selected_value = st.selectbox("", data_zds[column].unique()) - - filter_dict[column] = selected_value - - return filter_dict - - -############################## -# 1/ Import and prepare data # -############################## - -# Load all regions from the GeoJSON file -regions = gpd.read_file(REGION_GEOJSON_PATH) - -# nb dechets : Unused for now -df_nb_dechets = pd.read_csv(NB_DECHETS_PATH) - -# data_zds : main source of data for the hotspots tab -data_zds = pd.read_csv(DATA_ZDS_PATH) - -# correction : corrected data for density map -correction = pd.read_excel(CORRECTION) - -# spot: -spot = pd.read_excel(DATA_SPOT) - -# Fusion and correction -data_correct = pd.merge(data_zds, correction, on='ID_RELEVE', how='left') -data_correct = data_correct[data_correct['SURFACE_OK'] == 'OUI'] -data_zds = data_correct[data_correct['VOLUME_TOTAL'] > 0] - -# Calculate the total VOLUME_TOTAL for each region without removing duplicate data -volume_total_sums = data_zds.groupby('LIEU_REGION')['VOLUME_TOTAL'].sum().reset_index() - -# Merge the waste data and the geographical data -volume_total_sums = pd.merge(regions, volume_total_sums, left_on='nom', right_on='LIEU_REGION', how='left') - -# Remove rows containing NaN -volume_total_sums = volume_total_sums.dropna() - -# Remove duplicate data and calculate SURFACE total -data_unique = data_zds.drop_duplicates(subset=['LIEU_COORD_GPS']) -surface_total_sums = data_unique.groupby('LIEU_REGION')['SURFACE'].sum().reset_index() - -# Combine two datasets and calculate DENSITE -data_choropleth_sums = pd.merge(volume_total_sums, surface_total_sums, on='LIEU_REGION') -data_choropleth_sums['DENSITE'] = data_choropleth_sums['VOLUME_TOTAL'] / data_choropleth_sums['SURFACE'] - -################## -# 2/ Hotspot tab # -################## - -# Tab title -st.markdown("""# 🔥 Hotspots : **Quelles sont les zones les plus impactées ?**""") - -################################ -# 2.1/ Carte des spots adoptés # -################################ - -# Create the filter dict for the adopted spots map and the streamlit filter boxes -filter_dict = scalable_filters(data_zds) - -# Create the map of the adopted spots -def plot_adopted_waste_spots( - data_zds: pd.DataFrame, - filter_dict: dict, - region_geojson_path: str, -) -> folium.Map: - """Show a folium innteractive map of adopted spots within a selected region, - filtered by environments of deposit. - Arguments: - - data_zds: The waste dataframe - - filter_dict: dictionary mapping the name of the column in the waste df and the value you want to filter by - """ - # 1/ Create the waste geodataframe # - - # Create a GeoDataFrame for waste points - gdf = gpd.GeoDataFrame( - data_zds, - geometry=gpd.points_from_xy( - data_zds["LIEU_COORD_GPS_X"], data_zds["LIEU_COORD_GPS_Y"] - ), - crs="EPSG:4326", - ) - - # Construct the query string - query_string = construct_query_string(**filter_dict) - - # Filter the geodataframe by region and by environment - gdf_filtered = gdf.query(query_string) - - # 2/ Create the regions geodataframe # - - # Unpack the region name - region = filter_dict["REGION"] - - # Load France regions from a GeoJSON file - regions = gpd.read_file(region_geojson_path) - regions = regions.loc[regions["nom"] == region, :] - - # Filter the region geodataframe for the specified region - selected_region = regions[regions["nom"].str.lower() == region.lower()] - if selected_region.empty: - raise KeyError(f"Region '{region}' not found.") - - # 3/ Initialize folium map # - - # Initialize a folium map, centered around the mean location of the waste points - map_center = [gdf_filtered.geometry.y.mean(), gdf_filtered.geometry.x.mean()] - - # Catch ValueError if the filtered geodataframe contain no rows - try: - m = folium.Map( - location=map_center, zoom_start=5 - ) # Adjust zoom_start as needed for the best initial view - - # Return None if ValueError - except ValueError as e: - st.markdown( - "Il n'y a pas de hotspots pour les valeurs de filtres selectionnés !" - ) - return - - # 4/ Add the markers # - - # Use MarkerCluster to manage markers if dealing with a large number of points - marker_cluster = MarkerCluster().add_to(m) - - # Add each waste point as a marker on the folium map - for _, row in gdf_filtered.iterrows(): - # Define the marker color: green for adopted spots, red for others - marker_color = "darkgreen" if row["SPOT_A1S"] else "red" - # Define the icon: check-circle for adopted, info-sign for others - icon_type = "check-circle" if row["SPOT_A1S"] else "info-sign" - - folium.Marker( - location=[row.geometry.y, row.geometry.x], - popup=f"Zone: {row['NOM_ZONE']}
Date: {row['DATE']}
Volume: {row['VOLUME_TOTAL']} litres", - icon=folium.Icon(color=marker_color, icon=icon_type, prefix="fa"), - ).add_to(marker_cluster) - - # 5/ Add the region boundary # - - # Add the region boundary to the map for context - folium.GeoJson( - selected_region, - name="Region Boundary", - style_function=lambda feature: { - "weight": 2, - "fillOpacity": 0.1, - }, - ).add_to(m) - - return m - - -######################################################## -# 2.1/ Carte densité de déchets sur les zones étudiées # -######################################################## - - - -######################################################## -# 2.2/ Carte choropleth de la densité de déchets # -######################################################## - -def plot_waste_density_choropleth( - data_zds: pd.DataFrame, - region_geojson_path: str, -) -> folium.Map: - - # Load all regions from the GeoJSON file - regions = gpd.read_file(region_geojson_path) - - # Calculate the total VOLUME_TOTAL for each region without removing duplicate data - volume_total_sums = data_zds.groupby('LIEU_REGION')['VOLUME_TOTAL'].sum().reset_index() - - # Merge the waste data and the geographical data - volume_total_sums = pd.merge(regions, volume_total_sums, left_on='nom', right_on='LIEU_REGION', how='left') - - # Remove rows containing NaN - volume_total_sums = volume_total_sums.dropna() - - # Remove duplicate data and calculate SURFACE total - data_unique = data_zds.drop_duplicates(subset=['LIEU_COORD_GPS']) - surface_total_sums = data_unique.groupby('LIEU_REGION')['SURFACE'].sum().reset_index() - - # Combine two datasets and calculate DENSITE - data_choropleth_sums = pd.merge(volume_total_sums, surface_total_sums, on='LIEU_REGION') - data_choropleth_sums['DENSITE'] = data_choropleth_sums['VOLUME_TOTAL'] / data_choropleth_sums['SURFACE'] - - # Set bins for the choropleth - min_bin = data_choropleth_sums['DENSITE'][data_choropleth_sums['DENSITE'] > 0].min() - min_bin = max(min_bin, 1e-10) - max_bin = data_choropleth_sums['DENSITE'].max() * 1.01 - num_bins = 6 - bins = np.logspace(np.log10(min_bin), np.log10(max_bin), num_bins) - - # Initialize the map centered on France - map_center = [46.2276, 2.2137] # Coordinates for France - m = folium.Map(location=map_center, zoom_start=6) - - # Create the choropleth map - folium.Choropleth( - geo_data=regions.to_json(), - name='Densité de Déchets', - data=data_choropleth_sums, - columns=['LIEU_REGION', 'DENSITE'], - key_on='feature.properties.nom', - fill_color='Reds', - bins=bins, - fill_opacity=0.7, - line_opacity=0.2, - legend_name='Densité de Déchets' - ).add_to(m) - - return m - - - -def make_density_choropleth(data_choropleth_sums, region_geojson_path): - # Load all regions from the GeoJSON file - regions_geojson = requests.get(region_geojson_path).json() - - # Set bins for the choropleth - min_density = data_choropleth_sums['DENSITE'].min() - max_density = data_choropleth_sums['DENSITE'].max() - - # Create the choropleth map using Plotly Express - choropleth = px.choropleth( - data_choropleth_sums, - geojson=regions_geojson, - featureidkey="properties.nom", - locations='LIEU_REGION', - color='DENSITE', - color_continuous_scale='Reds', - #color_continuous_midpoint=np.median(data_choropleth_sums['DENSITE']), - #range_color=(min_density, max_density), - labels={'DENSITE': 'Densité de Déchets'} - ) - - # Update layout to fit the map to the boundaries of the GeoJSON - choropleth.update_layout( - geo=dict( - fitbounds="locations", - visible=False - ), - margin=dict(l=0, r=0, t=0, b=0) - ) - - # Disable axis ticks and labels and set country borders to red - choropleth.update_geos( - resolution=50, - showcountries=True, countrycolor="red" - ) - - # Disable the display of other countries' borders - choropleth.update_geos( - showcountries=False, - showcoastlines=False, - showland=False, - showocean=False - ) - - return choropleth - - - -####################### -# Dashboard Main Panel# -####################### - -col = st.columns((1.5, 4.5, 2), gap='medium') - -# Construct the map -with col[1]: - st.markdown('### Spots Adoptés') - m = plot_adopted_waste_spots(data_zds, filter_dict, REGION_GEOJSON_PATH) - # Show the adopted spots map on the streamlit tab - if m: - folium_static(m) - - st.markdown('### Densité des déchets') - choropleth = make_density_choropleth(data_choropleth_sums, REGION_GEOJSON_PATH) - st.plotly_chart(choropleth, use_container_width=True) - - - st.markdown('### Densité des déchets') - m = plot_waste_density_choropleth(data_zds, REGION_GEOJSON_PATH) - if m: - folium_static(m) From 55134fea6efc9f8284d27b5c9bc2a51b76e0ae8d Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Fri, 19 Apr 2024 11:32:49 +0200 Subject: [PATCH 045/147] rectification harmonisation requirements.txt onglets --- dashboards/app/requirements.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index 28dbd01..e75f8b9 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -1,3 +1,7 @@ pandas==2.0.3 +geopandas==0.14.3 +folium==0.16.0 duckdb==0.10.0 streamlit==1.32.2 +streamlit-folium==0.19.1 +plotly==5.19.0 From b384b556064c5ff445eb4277fced9defd88e3c27 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Fri, 19 Apr 2024 12:04:48 +0200 Subject: [PATCH 046/147] tg - test styles css et ajout theme config.toml --- dashboards/app/.streamlit/config.toml | 2 ++ dashboards/app/home.py | 14 ++++++++++++++ dashboards/app/style.css | 13 +++++++++++++ 3 files changed, 29 insertions(+) create mode 100644 dashboards/app/.streamlit/config.toml create mode 100644 dashboards/app/style.css diff --git a/dashboards/app/.streamlit/config.toml b/dashboards/app/.streamlit/config.toml new file mode 100644 index 0000000..b37a144 --- /dev/null +++ b/dashboards/app/.streamlit/config.toml @@ -0,0 +1,2 @@ +[theme] +base = "light" \ No newline at end of file diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 72176f6..77d4bbd 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -1,6 +1,20 @@ +from pathlib import Path + import pandas as pd import streamlit as st + +# load and apply CSS styles +def load_css(file_name: str) -> None: + with Path.open(file_name) as f: + st.markdown(f"", unsafe_allow_html=True) + + +# Load and apply the CSS file at the start of your app +# local debug +load_css("style.css") + + st.markdown( """ # Bienvenue 👋 diff --git a/dashboards/app/style.css b/dashboards/app/style.css new file mode 100644 index 0000000..3fb0486 --- /dev/null +++ b/dashboards/app/style.css @@ -0,0 +1,13 @@ +@import url('https://fonts.googleapis.com/css2?family=Montserrat:wght@500;700&display=swap'); + +/* GLOBAL FONT CHANGE */ +html, body, [class*="css"] { + font-family: 'Montserrat', sans-serif; +} + + +/* Sidebar color change */ +[data-testid="stSidebar"] { + background-color: #003463 !important; + color: #FFFFFF !important; +} From 253cbe4cbccf4ea8597a2f0ed9592da1983d68f6 Mon Sep 17 00:00:00 2001 From: linh dinh Date: Fri, 19 Apr 2024 17:00:42 +0200 Subject: [PATCH 047/147] =?UTF-8?q?Ajoute=20carte=20de=20densit=C3=A9=20Fr?= =?UTF-8?q?ance=20+=20Tableaux=20lieu=20et=20milieu?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/hotspots.py | 291 ++++++++++++++++++++++++++++--- 1 file changed, 269 insertions(+), 22 deletions(-) diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index a64d50b..fbe23f5 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -2,15 +2,34 @@ # import altair as alt # Unused for now import pandas as pd +import numpy as np import geopandas as gpd # import duckdb # Unused for now -import folium -from folium.plugins import MarkerCluster + +# import for choropleth map +import requests +import plotly.express as px + # To show folium maps on streamlit +import folium +from folium.plugins import MarkerCluster from streamlit_folium import folium_static, st_folium +###################### +# Page configuration # +###################### +st.set_page_config( + page_title="Hotspots", + layout="wide", + initial_sidebar_state="expanded", + menu_items={ + 'Get Help': 'https://www.extremelycoolapp.com/help', + 'Report a bug': "https://www.extremelycoolapp.com/bug", + 'About': "# This is a header. This is an *extremely* cool app!" + } + ) ###################################### # 0/ Parameters for the hotspots tab # @@ -37,13 +56,32 @@ "-avec-outre-mer.geojson" ) +# Data path for Correction +CORRECTION = ( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/" + "1-exploration-des-donn%C3%A9es/Exploration_visualisation/data/" + "releves_corrects_surf_lineaire.xlsx" +) + +# Data path for Data Spot +DATA_SPOT = ( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/" + "raw/1-exploration-des-donn%C3%A9es/Exploration_visualisation" + "/data/export_structures_29022024.xlsx" +) + # Params for the adopted spots map filters ADOPTED_SPOTS_FILTERS_PARAMS = [ { "filter_col": "REGION", - "filter_message": "Seléctionnez une région (par défaut votre région) :", - }, - {"filter_col": "TYPE_MILIEU", "filter_message": "Seléctionnez un milieu :"}, + "filter_message": "Sélectionnez une région (par défaut votre région) :" + }, + { + "filter_col": "TYPE_MILIEU", "filter_message": "Sélectionnez un milieu :" + }, + { + "filter_col":"ANNEE", "filter_message": "Sélectionnez une année :" + } ] ########################################################################### @@ -63,8 +101,14 @@ def construct_query_string(bound_word=" and ", **params) -> str: # Iterate over the params to construct the query string for param_key, param in params.items(): # Construct the param sub string if the param is not 'None' - if param: - query_sub_string = f'{param_key} == "{param}"' + if param is not None: + # Check if the parameter value is of type int + if isinstance(param, int): + # If it's an integer, use integer comparison + query_sub_string = f'{param_key} == {param}' + else: + # If it's not an integer, treat it as a string + query_sub_string = f'{param_key} == "{param}"' # Add to the query string query_string += f"{query_sub_string}{bound_word}" @@ -90,11 +134,11 @@ def scalable_filters( # Set the filter column and the filter message column, message = filter_params["filter_col"], filter_params["filter_message"] - # Set the list of choices - x = data_zds[column].unique() + # Sort the unique values of the column in ascending order + sorted_values = sorted(data_zds[column].unique(), reverse=True) - # Create the streamlit select box - s = columns[i].selectbox(message, x) + # Create the streamlit select box with sorted values + s = columns[i].selectbox(message, sorted_values) # Show the select box on screen columns[i].write(s) @@ -109,12 +153,25 @@ def scalable_filters( # 1/ Import data # ################## +# Load all regions from the GeoJSON file +regions = gpd.read_file(REGION_GEOJSON_PATH) + # nb dechets : Unused for now df_nb_dechets = pd.read_csv(NB_DECHETS_PATH) # data_zds : main source of data for the hotspots tab data_zds = pd.read_csv(DATA_ZDS_PATH) +# spot: +#spot = pd.read_excel(DATA_SPOT) + +# correction : corrected data for density map +#correction = pd.read_excel(CORRECTION) + +# Fusion and correction +#data_correct = pd.merge(data_zds, correction, on='ID_RELEVE', how='left') +#data_correct = data_correct[data_correct['SURFACE_OK'] == 'OUI'] +#data_zds = data_correct[data_correct['VOLUME_TOTAL'] > 0] ################## # 2/ Hotspot tab # @@ -123,7 +180,6 @@ def scalable_filters( # Tab title st.markdown("""# 🔥 Hotspots : **Quelles sont les zones les plus impactées ?**""") - ################################ # 2.1/ Carte des spots adoptés # ################################ @@ -143,8 +199,9 @@ def plot_adopted_waste_spots( - data_zds: The waste dataframe - filter_dict: dictionary mapping the name of the column in the waste df and the value you want to filter by """ - # 1/ Create the waste geodataframe # + print("Filter Dictionary:", filter_dict) # Check the filter dictionary + # 1/ Create the waste geodataframe # # Create a GeoDataFrame for waste points gdf = gpd.GeoDataFrame( data_zds, @@ -154,14 +211,18 @@ def plot_adopted_waste_spots( crs="EPSG:4326", ) + # Convert ANNEE values to integers + if "ANNEE" in filter_dict: + filter_dict["ANNEE"] = int(filter_dict["ANNEE"]) + # Construct the query string query_string = construct_query_string(**filter_dict) + print("Query String:", query_string) # Check the constructed query string # Filter the geodataframe by region and by environment gdf_filtered = gdf.query(query_string) # 2/ Create the regions geodataframe # - # Unpack the region name region = filter_dict["REGION"] @@ -175,7 +236,6 @@ def plot_adopted_waste_spots( raise KeyError(f"Region '{region}' not found.") # 3/ Initialize folium map # - # Initialize a folium map, centered around the mean location of the waste points map_center = [gdf_filtered.geometry.y.mean(), gdf_filtered.geometry.x.mean()] @@ -193,7 +253,6 @@ def plot_adopted_waste_spots( return # 4/ Add the markers # - # Use MarkerCluster to manage markers if dealing with a large number of points marker_cluster = MarkerCluster().add_to(m) @@ -211,7 +270,6 @@ def plot_adopted_waste_spots( ).add_to(marker_cluster) # 5/ Add the region boundary # - # Add the region boundary to the map for context folium.GeoJson( selected_region, @@ -225,14 +283,203 @@ def plot_adopted_waste_spots( return m -# Construct the map -m = plot_adopted_waste_spots(data_zds, filter_dict, REGION_GEOJSON_PATH) +#################################################################################### +# 2.1/ Tableaux de la densité par milieu et lieu de déchets sur les zones étudiées # +#################################################################################### + +def data_density_lieu_preparation(data): + + # Calculate waste volume sum for each 'LIEU' + volume_total_lieu = data.groupby('TYPE_LIEU2')['VOLUME_TOTAL'].sum().reset_index() + + # Remove duplicate data and calculate SURFACE total + data_unique = data.drop_duplicates(subset=['LIEU_COORD_GPS']) + surface_total_lieu = data_unique.groupby('TYPE_LIEU2')['SURFACE'].sum().reset_index() + + # Merge volume and surface data for 'LIEU', calculate density, and sort + data_lieu = pd.merge(volume_total_lieu, surface_total_lieu, on='TYPE_LIEU2') + data_lieu['DENSITE_LIEU'] = (data_lieu['VOLUME_TOTAL'] / data_lieu['SURFACE']).round(5) + data_lieu_sorted = data_lieu.sort_values(by="DENSITE_LIEU", ascending=False) + + return data_lieu_sorted + +def data_density_milieu_preparation(data): + + # Calculate waste volume sum for each 'MILIEU' + volume_total_milieu = data.groupby('TYPE_MILIEU')['VOLUME_TOTAL'].sum().reset_index() + + # Remove duplicate data and calculate SURFACE total + data_unique = data.drop_duplicates(subset=['LIEU_COORD_GPS']) + surface_total_milieu = data_unique.groupby('TYPE_MILIEU')['SURFACE'].sum().reset_index() + + # Merge volume and surface data for 'MILIEU', calculate density, and sort + data_milieu = pd.merge(volume_total_milieu, surface_total_milieu, on='TYPE_MILIEU') + data_milieu['DENSITE_MILIEU'] = (data_milieu['VOLUME_TOTAL'] / data_milieu['SURFACE']).round(5) + data_milieu_sorted = data_milieu.sort_values(by="DENSITE_MILIEU", ascending=False) + + return data_milieu_sorted + +def density_lieu(data_zds: pd.DataFrame, filter_dict: dict): + """ + Calculate and display the density of waste by type of location ('LIEU') for a selected region. + """ + # Get the selected region from filter_dict + selected_region = filter_dict.get("REGION", None) + + if selected_region is not None: + # Filter data for selected region + data_selected_region = data_zds[data_zds['LIEU_REGION'] == selected_region] + + # Apply data preparation function + data_lieu_sorted = data_density_lieu_preparation(data_selected_region) + + # Display sorted DataFrame with specific configuration for 'data_lieu_sorted' + lieu = st.markdown('##### Densité des déchets par type de lieu (L/m2)') + st.dataframe(data_lieu_sorted, + column_order=("TYPE_LIEU2", "DENSITE_LIEU"), + hide_index=True, + width=None, + column_config={ + "TYPE_LIEU2": st.column_config.TextColumn( + "Lieu", + ), + "DENSITE_LIEU": st.column_config.ProgressColumn( + "Densité", + format="%f", + min_value=0, + max_value=max(data_lieu_sorted['DENSITE_LIEU']), + )} + ) + + return lieu + + + +def density_milieu(data_zds: pd.DataFrame, filter_dict: dict): + """ + Calculate and display the density of waste by type of location ('MILIEU') for a selected region. + """ + # Get the selected region from filter_dict + selected_region = filter_dict.get("REGION", None) + + if selected_region is not None: + # Filter data for selected region + data_selected_region = data_zds[data_zds['LIEU_REGION'] == selected_region] + + # Apply data preparation function + data_milieu_sorted = data_density_milieu_preparation(data_selected_region) + + # Display sorted DataFrame with specific configuration for 'data_milieu_sorted' + milieu = st.markdown('##### Densité des déchets par type de milieu (L/m2)') + st.dataframe(data_milieu_sorted, + column_order=("TYPE_MILIEU", "DENSITE_MILIEU"), + hide_index=True, + width=None, + column_config={ + "TYPE_MILIEU": st.column_config.TextColumn( + "Milieu", + ), + "DENSITE_MILIEU": st.column_config.ProgressColumn( + "Densité", + format="%f", + min_value=0, + max_value=max(data_milieu_sorted['DENSITE_MILIEU']), + )} + ) + + return milieu + + +######################################################## +# 2.2/ Carte densité de déchets sur les zones étudiées # +######################################################## + + + + +###################################################### +# 2.3/ Carte choropleth densité de déchets en France # +###################################################### + +def make_density_choropleth(data_zds, region_geojson_path): + # Load all regions from the GeoJSON file + regions_geojson = requests.get(region_geojson_path).json() + + # Data preparation + # Calculate the total VOLUME_TOTAL for each region without removing duplicate data + volume_total_sums = data_zds.groupby('LIEU_REGION')['VOLUME_TOTAL'].sum().reset_index() + + # Merge the waste data and the geographical data + volume_total_sums = pd.merge(regions, volume_total_sums, left_on='nom', right_on='LIEU_REGION', how='left') + + # Remove rows containing NaN + volume_total_sums = volume_total_sums.dropna() + + # Remove duplicate data and calculate SURFACE total + data_unique = data_zds.drop_duplicates(subset=['LIEU_COORD_GPS']) + surface_total_sums = data_unique.groupby('LIEU_REGION')['SURFACE'].sum().reset_index() + + # Combine two datasets and calculate DENSITE + data_choropleth_sums = pd.merge(volume_total_sums, surface_total_sums, on='LIEU_REGION') + data_choropleth_sums['DENSITE'] = data_choropleth_sums['VOLUME_TOTAL'] / data_choropleth_sums['SURFACE'] + + # Set bins for the choropleth + min_density = data_choropleth_sums['DENSITE'].min() + max_density = data_choropleth_sums['DENSITE'].max() + + # Create the choropleth map using Plotly Express + choropleth = px.choropleth( + data_choropleth_sums, + geojson=regions_geojson, + featureidkey="properties.nom", + locations='LIEU_REGION', + color='DENSITE', + color_continuous_scale='Reds', + range_color=(min_density, max_density), # set range using log scale + labels={'DENSITE': 'Densité de Déchets(L/m2)'} + ) + + # Update layout to fit the map to the boundaries of the GeoJSON + choropleth.update_layout( + geo=dict( + fitbounds="locations", + visible=False + ), + margin=dict(l=0, r=0, t=0, b=0) + ) + + + return choropleth + +######################## +# Dashboard Main Panel # +######################## + + +st.markdown('### Spots Adoptés') +m = plot_adopted_waste_spots(data_zds, filter_dict, REGION_GEOJSON_PATH) # Show the adopted spots map on the streamlit tab if m: folium_static(m) +col = st.columns((4, 4, 2), gap='medium') -######################################################## -# 2.1/ Carte densité de déchets sur les zones étudiées # -######################################################## +# Construct the map +with col[0]: + + density_lieu(data_zds, filter_dict) + +with col[1]: + + density_milieu(data_zds, filter_dict) + +with col[2]: + with st.expander('Notice ℹ️', expanded=True): + st.write(''' + Explication des diffférences entre Lieu et Milieu + ''') + +st.markdown('### Densité des déchets en France') +choropleth = make_density_choropleth(data_zds, REGION_GEOJSON_PATH) +st.plotly_chart(choropleth, use_container_width=True) From 49685f6fc2cd585cc1b7220421e0b92c95c52ab9 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Fri, 19 Apr 2024 18:27:27 +0200 Subject: [PATCH 048/147] tg - UI improvement (labels, etc) --- .gitignore | 5 +++- dashboards/app/home.py | 13 +++++++-- dashboards/app/pages/data.py | 52 ++++++++++++++++++++++++++++-------- 3 files changed, 56 insertions(+), 14 deletions(-) diff --git a/.gitignore b/.gitignore index b8fb0eb..67bb6de 100644 --- a/.gitignore +++ b/.gitignore @@ -160,4 +160,7 @@ dmypy.json cython_debug/ # Precommit hooks: ruff cache -.ruff_cache \ No newline at end of file +.ruff_cache + +# Dossier sauvegarde Thibaut +TG_sauv \ No newline at end of file diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 77d4bbd..b9fadb2 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -3,11 +3,20 @@ import pandas as pd import streamlit as st +# Configuration de la page +st.set_page_config( + layout="wide", + page_title="Dashboard Zéro Déchet Sauvage", + page_icon=":dolphin:", + menu_items={ + "About": "https://www.zero-dechet-sauvage.org/", + }, +) # load and apply CSS styles def load_css(file_name: str) -> None: - with Path.open(file_name) as f: - st.markdown(f"", unsafe_allow_html=True) + with Path(file_name).open() as f: + st.markdown(f"", unsafe_allow_html=True) # Load and apply the CSS file at the start of your app diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 6b3de88..8102b21 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -196,8 +196,12 @@ def load_df_dict_corr_dechet_materiau(): color_discrete_map=colors_map, ) - # Amélioration de l'affichage - fig.update_traces(textinfo="percent") + # Réglage du texte affiché, format et taille de police + fig.update_traces( + textinfo="percent", + texttemplate="%{percent:.0%}", + textfont_size=14, + ) fig.update_layout(autosize=True, legend_title_text="Matériau") # Affichage du graphique @@ -216,13 +220,20 @@ def load_df_dict_corr_dechet_materiau(): ) # Amélioration du graphique - fig2.update_traces(texttemplate="%{text:.2s}", textposition="inside") + fig2.update_traces( + texttemplate="%{text:.2s}", + textposition="inside", + textfont_size=14, + ) fig2.update_layout( autosize=True, - uniformtext_minsize=8, + # uniformtext_minsize=8, uniformtext_mode="hide", - xaxis_tickangle=90, + xaxis_tickangle=-45, showlegend=False, + yaxis_showgrid=False, + xaxis_title=None, + yaxis_title=None, ) # Affichage du graphique @@ -252,17 +263,28 @@ def load_df_dict_corr_dechet_materiau(): barnorm="percent", title="Part de chaque matériau en volume selon le milieu de collecte", color_discrete_map=colors_map, + text_auto=True, + ) + # Format d'affichage + fig3.update_layout( + bargap=0.2, + height=600, + yaxis_title="Part du volume collecté (en %)", + xaxis_title=None, + ) + fig3.update_xaxes(tickangle=-30) + # Etiquettes et formats de nombres + fig3.update_traces( + texttemplate="%{y:.0f}%", + textposition="inside", + hovertemplate="%{x}
Part du volume collecté dans ce milieu: %{y:.0f} %", + textfont_size=14, ) - fig3.update_layout(bargap=0.2, height=500) - fig3.update_layout(yaxis_title="% du volume collecté", xaxis_title=None) - fig3.update_xaxes(tickangle=-45) # Afficher le graphique with st.container(border=True): st.plotly_chart(fig3, use_container_width=True) - st.divider() - # Ligne 3 : Graphe par milieu , lieu et année st.write("**Détail par milieu, lieu ou année**") @@ -455,6 +477,11 @@ def load_df_dict_corr_dechet_materiau(): # Grouper par type de matériau pour les visualisations df_totals_sorted2 = df_volume2.groupby(["Matériau"], as_index=False)["Volume"].sum() df_totals_sorted2 = df_totals_sorted2.sort_values(["Volume"], ascending=False) + df_totals_sorted2["Volume_"] = ( + df_totals_sorted2["Volume"] + .apply(lambda x: "{0:,.0f}".format(x)) + .replace(",", " ") + ) # Étape 4: Création du Graphique @@ -472,8 +499,11 @@ def load_df_dict_corr_dechet_materiau(): ) fig4.update_traces( textinfo="label+value", - textfont=dict(size=16, family="Arial", color="black"), + texttemplate="%{label}
%{value:.0f} litres", + textfont=dict(size=16), + hovertemplate="%{label}
Volume: %{value:.0f}", ) + with st.container(border=True): st.plotly_chart(fig4, use_container_width=True) From 98d7050d02c9a429d91907ae38d8ab27142413da Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Fri, 19 Apr 2024 15:59:20 -0400 Subject: [PATCH 049/147] Revert "Merge branch 'staging' into 7-onglet-data" This reverts commit 434b8115ee4701c6c8faec35958994c4122ea255, reversing changes made to 49685f6fc2cd585cc1b7220421e0b92c95c52ab9. --- .gitignore | 5 +- dashboards/app/home.py | 237 ++-- dashboards/app/pages/actions.py | 361 +----- dashboards/app/pages/data.py | 1913 ++++++++++++++---------------- dashboards/app/pages/register.py | 47 - dashboards/app/requirements.txt | 3 - 6 files changed, 992 insertions(+), 1574 deletions(-) delete mode 100644 dashboards/app/pages/register.py diff --git a/.gitignore b/.gitignore index 4e6eb37..67bb6de 100644 --- a/.gitignore +++ b/.gitignore @@ -163,7 +163,4 @@ cython_debug/ .ruff_cache # Dossier sauvegarde Thibaut -TG_sauv - -# Streamlit: credentials -dashboards/app/.credentials.yml \ No newline at end of file +TG_sauv \ No newline at end of file diff --git a/dashboards/app/home.py b/dashboards/app/home.py index f052a2b..b9fadb2 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -2,10 +2,6 @@ import pandas as pd import streamlit as st -import streamlit_authenticator as stauth -import yaml -from st_pages import Page, show_pages -from yaml.loader import SafeLoader # Configuration de la page st.set_page_config( @@ -35,137 +31,106 @@ def load_css(file_name: str) -> None: """, ) -# Login -p_cred = Path(".credentials.yml") -with p_cred.open() as file: - config = yaml.load(file, Loader=SafeLoader) - -authenticator = stauth.Authenticate( - config["credentials"], - config["cookie"]["name"], - config["cookie"]["key"], - config["cookie"]["expiry_days"], - config["pre-authorized"], -) -authenticator.login( - fields={ - "Form name": "Connexion", - "Username": "Identifiant", - "Password": "Mot de passe", - "Login": "Connexion", - }, +st.markdown("""# À propos""") + + +# Chargement des données et filtre géographique à l'arrivée sur le dashboard +# Table des volumes par matériaux +@st.cache_data +def load_df_other() -> pd.DataFrame: + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv", + ) + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE + # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] + return df + + +# Table du nb de déchets +@st.cache_data +def load_df_nb_dechet() -> pd.DataFrame: + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_releve_nb_dechet.csv", + ) + + +# Appel des fonctions pour charger les données + +df_other = load_df_other() +df_nb_dechets = load_df_nb_dechet() + + +# Création du filtre par niveau géographique : correspondance labels et variables du dataframe +niveaux_admin_dict = { + "Région": "REGION", + "Département": "DEP_CODE_NOM", + "EPCI": "LIBEPCI", + "Commune": "COMMUNE_CODE_NOM", +} + +# 1ère étape : sélection du niveau administratif concerné (région, dép...) +# Si déjà saisi précédemment, initialiser le filtre avec les valeurs entrées précédemment +# Récupérer les index pour conserver la valeur des filtres au changement de pages +# Filtre niveau administratif +niveau_admin = st.session_state.get("niveau_admin", None) +index_admin = st.session_state.get("index_admin", None) +# Filtre collectivité +collectivite = st.session_state.get("collectivite", None) +index_collec = st.session_state.get("index_collec", None) + +# Initialiser la selectbox avec l'index récupéré +select_niveauadmin = st.selectbox( + "Niveau administratif : ", + niveaux_admin_dict.keys(), + index=index_admin, ) +if select_niveauadmin is not None: + # Filtrer la liste des collectivités en fonction du niveau admin + liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] + liste_collectivites = liste_collectivites.sort_values().unique() -if st.session_state["authentication_status"]: - show_pages( - [ - Page("home.py", "Accueil", "🏠"), - Page("pages/actions.py", "Actions", "👊"), - Page("pages/data.py", "Data", "🔍"), - Page("pages/hotspots.py", "Hotspots", "🔥"), - Page("pages/structures.py", "Structures", "🔭"), - ], + # 2ème filtre : sélection de la collectivité concernée + select_collectivite = st.selectbox( + "Collectivité : ", + liste_collectivites, + index=index_collec, ) - st.markdown("""# À propos""") - - # Chargement des données et filtre géographique à l'arrivée sur le dashboard - # Table des volumes par matériaux - @st.cache_data - def load_df_other() -> pd.DataFrame: - df = pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv", - ) - # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE - # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) - df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] - df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] - return df - - - # Table du nb de déchets - @st.cache_data - def load_df_nb_dechet() -> pd.DataFrame: - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_releve_nb_dechet.csv", - ) - - - # Appel des fonctions pour charger les données - - df_other = load_df_other() - df_nb_dechets = load_df_nb_dechet() - - - # Création du filtre par niveau géographique : correspondance labels et variables du dataframe - niveaux_admin_dict = { - "Région": "REGION", - "Département": "DEP_CODE_NOM", - "EPCI": "LIBEPCI", - "Commune": "COMMUNE_CODE_NOM", - } - - # 1ère étape : sélection du niveau administratif concerné (région, dép...) - # Si déjà saisi précédemment, initialiser le filtre avec les valeurs entrées précédemment - # Récupérer les index pour conserver la valeur des filtres au changement de pages - # Filtre niveau administratif - niveau_admin = st.session_state.get("niveau_admin", None) - index_admin = st.session_state.get("index_admin", None) - # Filtre collectivité - collectivite = st.session_state.get("collectivite", None) - index_collec = st.session_state.get("index_collec", None) - - # Initialiser la selectbox avec l'index récupéré - select_niveauadmin = st.selectbox( - "Niveau administratif : ", - niveaux_admin_dict.keys(), - index=index_admin, + + +if st.button("Enregistrer la sélection"): + # Enregistrer les valeurs sélectionnées dans le session.state + st.session_state["niveau_admin"] = select_niveauadmin + st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( + select_niveauadmin, ) - if select_niveauadmin is not None: - # Filtrer la liste des collectivités en fonction du niveau admin - liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] - liste_collectivites = liste_collectivites.sort_values().unique() - - # 2ème filtre : sélection de la collectivité concernée - select_collectivite = st.selectbox( - "Collectivité : ", - liste_collectivites, - index=index_collec, - ) - - - if st.button("Enregistrer la sélection"): - # Enregistrer les valeurs sélectionnées dans le session.state - st.session_state["niveau_admin"] = select_niveauadmin - st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( - select_niveauadmin, - ) - - st.session_state["collectivite"] = select_collectivite - st.session_state["index_collec"] = list(liste_collectivites).index( - select_collectivite, - ) - - # Afficher la collectivité sélectionnée - st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") - - # Filtrer et enregistrer le DataFrame dans un session state pour la suite - colonne_filtre = niveaux_admin_dict[select_niveauadmin] - df_other_filtre = df_other[df_other[colonne_filtre] == select_collectivite] - st.session_state["df_other_filtre"] = df_other_filtre - - # Filtrer et enregistrer le dataframe nb_dechets dans session.State - # Récuperer la liste des relevés - id_releves = df_other_filtre["ID_RELEVE"].unique() - # Filtrer df_nb_dechets sur la liste des relevés - st.session_state["df_nb_dechets_filtre"] = df_nb_dechets[ - df_nb_dechets["ID_RELEVE"].isin(id_releves) - ] + st.session_state["collectivite"] = select_collectivite + st.session_state["index_collec"] = list(liste_collectivites).index( + select_collectivite, + ) + + # Afficher la collectivité sélectionnée + st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") + + # Filtrer et enregistrer le DataFrame dans un session state pour la suite + colonne_filtre = niveaux_admin_dict[select_niveauadmin] + df_other_filtre = df_other[df_other[colonne_filtre] == select_collectivite] + st.session_state["df_other_filtre"] = df_other_filtre + + # Filtrer et enregistrer le dataframe nb_dechets dans session.State + # Récuperer la liste des relevés + id_releves = df_other_filtre["ID_RELEVE"].unique() + # Filtrer df_nb_dechets sur la liste des relevés + st.session_state["df_nb_dechets_filtre"] = df_nb_dechets[ + df_nb_dechets["ID_RELEVE"].isin(id_releves) + ] # Afficher le nombre de relevés disponibles nb_releves = len(st.session_state["df_other_filtre"]) @@ -173,17 +138,3 @@ def load_df_nb_dechet() -> pd.DataFrame: f"{nb_releves} relevés de collecte sont disponibles \ pour l'analyse sur votre territoire.", ) - - authenticator.logout() - -elif st.session_state["authentication_status"] is False: - st.error("Mauvais identifiants ou mot de passe.") -elif st.session_state["authentication_status"] is None: - st.warning("Veuillez entrer votre identifiant et mot de passe") - - show_pages( - [ - Page("home.py", "Home", "🏠 "), - Page("pages/register.py", "S'enregistrer", "🚀"), - ], - ) \ No newline at end of file diff --git a/dashboards/app/pages/actions.py b/dashboards/app/pages/actions.py index 6f6d3bf..208b092 100644 --- a/dashboards/app/pages/actions.py +++ b/dashboards/app/pages/actions.py @@ -1,344 +1,49 @@ -import pandas as pd -from datetime import datetime, timedelta -import plotly.express as px import streamlit as st -import folium +import altair as alt +import pandas as pd +import duckdb -# Page setting : wide layout -st.set_page_config( - layout="wide", page_title="Dashboard Zéro Déchet Sauvage : onglet Actions" +st.markdown( + """# 👊 Actions +*Quels sont les actions mises en place par les acteurs ?* +""" ) -# Session state -session_state = st.session_state - -# Récupérer les filtres géographiques s'ils ont été fixés -filtre_niveau = st.session_state.get("niveau_admin", "") -filtre_collectivite = st.session_state.get("collectivite", "") - -# Définition d'une fonction pour charger les données du nombre de déchets -@st.cache_data -def load_df_dict_corr_dechet_materiau(): - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" - "exploration-des-donn%C3%A9es/Exploration_visualisation/data/dict_de" - "chet_groupe_materiau.csv" - ) - - -@st.cache_data -def load_df_nb_dechet(): - return pd.read_csv( +df_nb_dechet = pd.read_csv( + ( "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" "sation/data/data_releve_nb_dechet.csv" ) +) - -# Définition d'une fonction pour charger les autres données -@st.cache_data -def load_df_other(): - df = pd.read_csv( +df_other = pd.read_csv( + ( "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" "sation/data/data_zds_enriched.csv" ) - - # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) - df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] - df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] - - return df - - -# Appel des fonctions pour charger les données -df_nb_dechet = load_df_nb_dechet() -# df_dict_corr_dechet_materiau = load_df_dict_corr_dechet_materiau() - -# Appeler le dataframe filtré depuis le session state -if "df_other_filtre" in st.session_state: - df_other = st.session_state["df_other_filtre"].copy() -else: - df_other = load_df_other() - -# Titre de l'onglet -st.markdown( - """# 🔎 Actions -Quels sont les actions mises en place par les acteurs ? -""" ) -# 2 Onglets : Evènements, Evènements à venir -tab1, tab2 = st.tabs( - [ - "Evènements", - "Evènements à venir", - ] +res_aggCategory_filGroup = duckdb.query( + ( + "SELECT categorie, sum(nb_dechet) AS total_dechet " + "FROM df_nb_dechet " + "WHERE type_regroupement = 'GROUPE' " + "GROUP BY categorie " + "HAVING sum(nb_dechet) > 10000 " + "ORDER BY total_dechet DESC;" + ) +).to_df() + +# st.bar_chart(data=res_aggCategory_filGroup, x="categorie", y="total_dechet") + +st.altair_chart( + alt.Chart(res_aggCategory_filGroup) + .mark_bar() + .encode( + x=alt.X("categorie", sort=None, title=""), + y=alt.Y("total_dechet", title="Total de déchet"), + ), + use_container_width=True, ) - -# Onglet 1 : Evènements -with tab1: - if filtre_niveau == "" and filtre_collectivite == "": - st.write( - "Aucune sélection de territoire n'ayant été effectuée les données sont globales" - ) - else: - st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") - - #################### - # @Valerie : J'ai comment pour éviter les errreur - # Les DF sont chargés au dessus comme dans l'onglet DATA - # Je n'ai pas trouvé de référence à 'df_nb_dechets_filtre' dans l'onglet DATA - #################### - - # Appeler les dataframes volumes et nb_dechets filtré depuis le session state - # if ("df_other_filtre" not in st.session_state) or ( - # "df_nb_dechets_filtre" not in st.session_state - # ): - # st.write( - # """ - # ### :warning: Merci de sélectionner une collectivité\ - # dans l'onglet Home pour afficher les données. :warning: - # """ - # ) - - # df_nb_dechet = pd.read_csv( - # ( - # "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - # "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - # "sation/data/data_releve_nb_dechet.csv" - # ) - # ) - - # df_other = pd.read_csv( - # ( - # "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - # "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - # "sation/data/data_zds_enriched.csv" - # ) - # ) - - # else: - # df_other = st.session_state["df_other_filtre"].copy() - # df_nb_dechet = st.session_state["df_nb_dechets_filtre"].copy() - - # Copier le df pour la partie filtrée par milieu/lieu/année - df_other_metrics_raw = df_other.copy() - - annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) - - # Filtre par année: - options = ["Aucune sélection"] + list(df_other["ANNEE"].unique()) - annee_choisie = st.selectbox("Choisissez l'année:", options, index=0) - - if annee_choisie == "Aucune sélection": - df_other_filtre = df_other.copy() - - if annee_choisie != "Aucune sélection": - df_other_filtre = df_other[df_other["ANNEE"] == annee_choisie].copy() - - # Copie des données pour transfo - df_events = df_other_filtre.copy() - - # Calcul des indicateurs clés de haut de tableau avant transformation - volume_total = df_events["VOLUME_TOTAL"].sum() - poids_total = df_events["POIDS_TOTAL"].sum() - nombre_participants = df_events["NB_PARTICIPANTS"].sum() - nb_collectes = len(df_events) - nombre_structures = df_events["ID_STRUCTURE"].nunique() - - # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page - l1_col1, l1_col2, l1_col3 = st.columns(3) - - # 1ère métrique : nombre de relevés - cell1 = l1_col1.container(border=True) - nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") - cell1.metric("Nombre de collectes réalisées", f"{nb_collectes}") - - # 2ème métrique : Nombre de Participants - cell2 = l1_col2.container(border=True) - nombre_participants = f"{nombre_participants:,.0f}".replace(",", " ") - cell2.metric("Nombre de participants", f"{nombre_participants}") - - # 3ème métrique : Nombre de Structures - cell3 = l1_col3.container(border=True) - nombre_structures = f"{nombre_structures:,.0f}".replace(",", " ") - cell3.metric("Nombre de structures", f"{nombre_structures}") - - # Ligne 2 : Carte - with st.container(): - # Création du DataFrame de travail pour la carte - df_map_evnenements = df_other_filtre.copy() - # Création de la carte centrée autour d'une localisation - # Calcul des limites à partir de vos données - min_lat = df_map_evnenements["LIEU_COORD_GPS_Y"].min() - max_lat = df_map_evnenements["LIEU_COORD_GPS_Y"].max() - min_lon = df_map_evnenements["LIEU_COORD_GPS_X"].min() - max_lon = df_map_evnenements["LIEU_COORD_GPS_X"].max() - - map_evenements = folium.Map( - location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], - zoom_start=8, - tiles="OpenStreetMap", - ) - # Facteur de normalisation pour ajuster la taille des bulles - normalisation_facteur = 100 - for index, row in df_map_evnenements.iterrows(): - # Application de la normalisation - radius = row["NB_PARTICIPANTS"] / normalisation_facteur - - # Application d'une limite minimale pour le rayon si nécessaire - radius = max(radius, 5) - - folium.CircleMarker( - location=(row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]), - radius=radius, # Utilisation du rayon ajusté - popup=f"{row['NOM_ZONE']}, {row['LIEU_VILLE']}, {row['NOM_EVENEMENT']}, {row['DATE']} : nombre de participants : {row['NB_PARTICIPANTS']}", - color="#3186cc", - fill=True, - fill_color="#3186cc", - ).add_to(map_evenements) - - # Affichage de la carte Folium dans Streamlit - st_folium = st.components.v1.html - st_folium( - folium.Figure().add_child(map_evenements).render(), # , width=1400 - height=750, - ) - - # Ligne 3 : 1 graphique donut chart et un graphique barplot horizontal nombre de relevés par types de milieux - # préparation du dataframe et figure niveaux de caracterisation - - df_carac = df_other_filtre.copy() - df_carac_counts = df_carac["NIVEAU_CARAC"].value_counts().reset_index() - df_carac_counts.columns = ["NIVEAU_CARAC", "counts"] - - fig1_actions = px.pie( - df_carac_counts, - values="counts", - names="NIVEAU_CARAC", - title="Répartition des niveaux de caractérisation", - hole=0.5, - ) - fig1_actions.update_traces(textposition="inside", textinfo="percent+label") - - # préparation du dataframe et figure releves types de milieux - - df_milieux = df_other_filtre.copy() - df_milieux_counts = df_milieux["TYPE_MILIEU"].value_counts().reset_index() - df_milieux_counts.columns = ["TYPE_MILIEU", "counts"] - df_milieux_counts_sorted = df_milieux_counts.sort_values( - by="counts", ascending=True - ) - - fig2_actions = px.bar( - df_milieux_counts_sorted, - y="TYPE_MILIEU", - x="counts", - title="Nombre de relevés par types de milieux", - text="counts", - orientation="h", - ) - - l3_col1, l3_col2 = st.columns(2) - cell4 = l3_col1.container(border=True) - cell5 = l3_col2.container(border=True) - - # Affichage donut - with cell4: - st.plotly_chart(fig1_actions, use_container_width=True) - - # Affichage barplot - with cell5: - st.plotly_chart(fig2_actions, use_container_width=True) - - # Ligne 3 : 2 graphiques en ligne : carte relevés et bar chart matériaux - l3_col1, l3_col2 = st.columns(2) - cell6 = l3_col1.container(border=True) - cell7 = l3_col2.container(border=True) - - # Ligne 4 : 2 graphiques en ligne : bar chart milieux et bar chart types déchets - l4_col1, l4_col2 = st.columns(2) - cell8 = l4_col1.container(border=True) - cell9 = l4_col2.container(border=True) - - # Ligne 5 : 2 graphiques en ligne : line chart volume + nb collectes et Pie niveau de caractérisation - l5_col1, l5_col2 = st.columns(2) - cell10 = l5_col1.container(border=True) - cell11 = l5_col2.container(border=True) - - -# onglet Evenements a venir -with tab2: - st.write(f"Votre territoire : Pays - France") - - # Définition d'une fonction pour charger les evenements à venir - @st.cache_data - def load_df_events_clean() -> pd.DataFrame: - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/export_events_cleaned.csv" - ) - - # Appel des fonctions pour charger les données - df_events = load_df_events_clean() - - df_events.DATE = pd.to_datetime(df_events.DATE) - - # Filtrer les événements à venir - df_events_a_venir = df_events[df_events.DATE > (datetime.now() - timedelta(days=5))] - - # Trie les events par date - df_events_a_venir.sort_values(by="DATE", inplace=True) - - # Coord approximatives du centre de la France - coord_centre_france = [46.603354, 1.888334] - - # Code couleurs de ZDS - color_ZDS_bleu = "#003463" - color_ZDS_rouge = "#e9003f" - - # Créer la carte - map_events = folium.Map( - location=coord_centre_france, - zoom_start=6, - ) - - # Ajouter des marqueurs pour chaque événement à venir sur la carte - for idx, row in df_events_a_venir.iterrows(): - folium.Marker( - location=[row.COORD_GPS_Y, row.COORD_GPS_X], - popup=folium.Popup(row.NOM_EVENEMENT, lazy=False), - # tooltip=row.NOM_EVENEMENT, - # icon=folium.Icon(icon_color=color_ZDS_bleu) - ).add_to(map_events) - - # Afficher la liste des événements à venir avec la date affichée avant le nom - st.subheader("Actions à venir :") - - with st.container(height=500, border=False): - for idx, row in df_events_a_venir.iterrows(): - with st.container(border=True): - # Bloc contenant la date - date_block = f"
{row.DATE.day}
{row.DATE.strftime('%b')}
" - # Bloc contenant le nom de l'événement - event_block = ( - f"
{row.NOM_EVENEMENT}
" - ) - # Bloc contenant le type d'événement et le nom de la structure - type_structure_block = f"{row.TYPE_EVENEMENT} | {row.NOM_STRUCTURE}" - - # Ajout de chaque événement dans la liste - st.write( - f"
{date_block}
{event_block}{type_structure_block}
", - unsafe_allow_html=True, - ) - - # Afficher la carte avec Streamlit - st_folium = st.components.v1.html - st_folium( - folium.Figure().add_child(map_events).render(), - width=800, - height=800, - ) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 86d35f7..8102b21 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -23,1114 +23,929 @@ """ ) -if st.session_state["authentication_status"]: - - if filtre_niveau == "" and filtre_collectivite == "": - st.write("Aucune sélection de territoire n'a été effectuée") - else: - st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") +if filtre_niveau == "" and filtre_collectivite == "": + st.write("Aucune sélection de territoire n'a été effectuée") +else: + st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") - # Définition d'une fonction pour charger les données du nombre de déchets@st.cache_data - def load_df_dict_corr_dechet_materiau(): - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" - "exploration-des-donn%C3%A9es/Exploration_visualisation/data/dict_de" - "chet_groupe_materiau.csv" - ) +# Définition d'une fonction pour charger les données du nombre de déchets@st.cache_data +def load_df_dict_corr_dechet_materiau(): + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" + "exploration-des-donn%C3%A9es/Exploration_visualisation/data/dict_de" + "chet_groupe_materiau.csv" + ) - # Appel des fonctions pour charger les données - df_dict_corr_dechet_materiau = load_df_dict_corr_dechet_materiau() +# Appel des fonctions pour charger les données +df_dict_corr_dechet_materiau = load_df_dict_corr_dechet_materiau() - # Appeler les dataframes volumes et nb_dechets filtré depuis le session state - if ("df_other_filtre" not in st.session_state) or ( - "df_nb_dechets_filtre" not in st.session_state - ): - st.write( +# Appeler les dataframes volumes et nb_dechets filtré depuis le session state +if ("df_other_filtre" not in st.session_state) or ( + "df_nb_dechets_filtre" not in st.session_state +): + st.write( + """ + ### :warning: Merci de sélectionner une collectivité\ + dans l'onglet Home pour afficher les données. :warning: """ - ### :warning: Merci de sélectionner une collectivité\ - dans l'onglet Home pour afficher les données. :warning: - """ - ) - st.stop() - else: - df_other = st.session_state["df_other_filtre"].copy() - df_nb_dechet = st.session_state["df_nb_dechets_filtre"].copy() - - # Copier le df pour la partie filtrée par milieu/lieu/année - df_other_metrics_raw = df_other.copy() - - - # 3 Onglets : Matériaux, Top déchets, Filières et marques - tab1, tab2, tab3 = st.tabs( - [ - "Matériaux :wood:", - "Top Déchets :wastebasket:", - "Secteurs et marques :womans_clothes:", - ] ) + st.stop() +else: + df_other = st.session_state["df_other_filtre"].copy() + df_nb_dechet = st.session_state["df_nb_dechets_filtre"].copy() - milieu_lieu_dict = ( - df_other.groupby("TYPE_MILIEU")["TYPE_LIEU"] - .unique() - .apply(lambda x: x.tolist()) - .to_dict() - ) +# Copier le df pour la partie filtrée par milieu/lieu/année +df_other_metrics_raw = df_other.copy() - annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) - - # Onglet 1 : Matériaux - with tab1: - - # Transformation du dataframe pour les graphiques - # Variables à conserver en ligne - cols_identifiers = [ - "ANNEE", - "TYPE_MILIEU", - "INSEE_COM", - "DEP", - "REG", - "EPCI", - "BV2022", - ] - # variables à décroiser de la base de données correspondant aux Volume global de chaque matériau - cols_volume = [k for k in df_other.columns if "GLOBAL_VOLUME_" in k] +# 3 Onglets : Matériaux, Top déchets, Filières et marques +tab1, tab2, tab3 = st.tabs( + [ + "Matériaux :wood:", + "Top Déchets :wastebasket:", + "Secteurs et marques :womans_clothes:", + ] +) - # Copie des données pour transfo - df_volume = df_other.copy() +milieu_lieu_dict = ( + df_other.groupby("TYPE_MILIEU")["TYPE_LIEU"] + .unique() + .apply(lambda x: x.tolist()) + .to_dict() +) - # Calcul des indicateurs clés de haut de tableau avant transformation - volume_total = df_volume["VOLUME_TOTAL"].sum() - poids_total = df_volume["POIDS_TOTAL"].sum() - volume_total_categorise = df_volume[cols_volume].sum().sum() - pct_volume_categorise = volume_total_categorise / volume_total - nb_collectes_int = len(df_volume) +annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) + +# Onglet 1 : Matériaux +with tab1: + + # Transformation du dataframe pour les graphiques + # Variables à conserver en ligne + cols_identifiers = [ + "ANNEE", + "TYPE_MILIEU", + "INSEE_COM", + "DEP", + "REG", + "EPCI", + "BV2022", + ] + + # variables à décroiser de la base de données correspondant aux Volume global de chaque matériau + cols_volume = [k for k in df_other.columns if "GLOBAL_VOLUME_" in k] + + # Copie des données pour transfo + df_volume = df_other.copy() + + # Calcul des indicateurs clés de haut de tableau avant transformation + volume_total = df_volume["VOLUME_TOTAL"].sum() + poids_total = df_volume["POIDS_TOTAL"].sum() + volume_total_categorise = df_volume[cols_volume].sum().sum() + pct_volume_categorise = volume_total_categorise / volume_total + nb_collectes_int = len(df_volume) + + # estimation du poids categorisée en utilisant pct_volume_categorise + poids_total_categorise = round(poids_total * pct_volume_categorise) + + # Dépivotage du tableau pour avoir une base de données exploitable + df_volume = df_volume.melt( + id_vars=cols_identifiers, + value_vars=cols_volume, + var_name="Matériau", + value_name="Volume", + ) - # estimation du poids categorisée en utilisant pct_volume_categorise - poids_total_categorise = round(poids_total * pct_volume_categorise) + # Nettoyer le nom du Type déchet pour le rendre plus lisible + df_volume["Matériau"] = ( + df_volume["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() + ) - # Dépivotage du tableau pour avoir une base de données exploitable - df_volume = df_volume.melt( - id_vars=cols_identifiers, - value_vars=cols_volume, - var_name="Matériau", - value_name="Volume", + # Grouper par type de matériau pour les visualisations + df_totals_sorted = df_volume.groupby(["Matériau"], as_index=False)["Volume"].sum() + df_totals_sorted = df_totals_sorted.sort_values(["Volume"], ascending=False) + + # Charte graphique MERTERRE : + colors_map = { + "Textile": "#C384B1", + "Papier": "#CAA674", + "Metal": "#A0A0A0", + "Verre": "#3DCE89", + "Autre": "#F3B900", + "Plastique": "#48BEF0", + "Caoutchouc": "#364E74", + "Bois": "#673C11", + "Papier/Carton": "#CAA674", + "Métal": "#A0A0A0", + "Verre/Céramique": "#3DCE89", + "Autre": "#F3B900", + } + + # Ligne 1 : 2 cellules avec les indicateurs clés en haut de page + l1_col1, l1_col2, l1_col3 = st.columns(3) + + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + + # 1ère métrique : volume total de déchets collectés + cell1 = l1_col1.container(border=True) + # Trick pour séparer les milliers + volume_total = f"{volume_total:,.0f}".replace(",", " ") + cell1.metric("Volume de déchets collectés", f"{volume_total} litres") + + # 2ème métrique : poids + cell2 = l1_col2.container(border=True) + poids_total = f"{poids_total:,.0f}".replace(",", " ") + + cell2.metric("Poids total collecté", f"{poids_total} kg") + + # 3ème métrique : nombre de relevés + cell3 = l1_col3.container(border=True) + nb_collectes = f"{nb_collectes_int:,.0f}".replace(",", " ") + cell3.metric("Nombre de collectes comptabilisées", f"{nb_collectes}") + + # Message d'avertissement nb de collectes en dessous de 5 + if nb_collectes_int == 1: + st.warning( + "⚠️ Il n'y a qu' " + + str(nb_collectes_int) + + " collecte considérées dans les données présentées." ) - - # Nettoyer le nom du Type déchet pour le rendre plus lisible - df_volume["Matériau"] = ( - df_volume["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() + elif nb_collectes_int <= 5: + st.warning( + "⚠️ Il n'y a que " + + str(nb_collectes_int) + + " collectes considérées dans les données présentées." ) - # Grouper par type de matériau pour les visualisations - df_totals_sorted = df_volume.groupby(["Matériau"], as_index=False)["Volume"].sum() - df_totals_sorted = df_totals_sorted.sort_values(["Volume"], ascending=False) - - # Charte graphique MERTERRE : - colors_map = { - "Textile": "#C384B1", - "Papier": "#CAA674", - "Metal": "#A0A0A0", - "Verre": "#3DCE89", - "Autre": "#F3B900", - "Plastique": "#48BEF0", - "Caoutchouc": "#364E74", - "Bois": "#673C11", - "Papier/Carton": "#CAA674", - "Métal": "#A0A0A0", - "Verre/Céramique": "#3DCE89", - "Autre": "#F3B900", - } - - # Ligne 1 : 2 cellules avec les indicateurs clés en haut de page - l1_col1, l1_col2, l1_col3 = st.columns(3) - - # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - - # 1ère métrique : volume total de déchets collectés - cell1 = l1_col1.container(border=True) - # Trick pour séparer les milliers - volume_total = f"{volume_total:,.0f}".replace(",", " ") - cell1.metric("Volume de déchets collectés", f"{volume_total} litres") - - # 2ème métrique : poids - cell2 = l1_col2.container(border=True) - poids_total = f"{poids_total:,.0f}".replace(",", " ") - - cell2.metric("Poids total collecté", f"{poids_total} kg") - - # 3ème métrique : nombre de relevés - cell3 = l1_col3.container(border=True) - nb_collectes = f"{nb_collectes_int:,.0f}".replace(",", " ") - cell3.metric("Nombre de collectes comptabilisées", f"{nb_collectes}") - - # Message d'avertissement nb de collectes en dessous de 5 - if nb_collectes_int == 1: - st.warning( - "⚠️ Il n'y a qu' " - + str(nb_collectes_int) - + " collecte considérées dans les données présentées." - ) - elif nb_collectes_int <= 5: - st.warning( - "⚠️ Il n'y a que " - + str(nb_collectes_int) - + " collectes considérées dans les données présentées." - ) - - # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux - - l2_col1, l2_col2 = st.columns(2) - cell4 = l2_col1.container(border=True) - cell5 = l2_col2.container(border=True) - with cell4: - - # Création du diagramme en donut en utilisant le dictionnaire de couleurs pour la correspondance - fig = px.pie( - df_totals_sorted, - values="Volume", - names="Matériau", - title="Répartition des matériaux en volume", - hole=0.4, - color="Matériau", - color_discrete_map=colors_map, - ) - - # Réglage du texte affiché, format et taille de police - fig.update_traces( - textinfo="percent", - texttemplate="%{percent:.0%}", - textfont_size=14, - ) - fig.update_layout(autosize=True, legend_title_text="Matériau") - - # Affichage du graphique - st.plotly_chart(fig, use_container_width=True) - - with cell5: - # Création du graphique en barres avec Plotly Express - fig2 = px.bar( - df_totals_sorted, - x="Matériau", - y="Volume", - text="Volume", - title="Volume total par materiau (en litres)", - color="Matériau", - color_discrete_map=colors_map, - ) - - # Amélioration du graphique - fig2.update_traces( - texttemplate="%{text:.2s}", - textposition="inside", - textfont_size=14, - ) - fig2.update_layout( - autosize=True, - # uniformtext_minsize=8, - uniformtext_mode="hide", - xaxis_tickangle=-45, - showlegend=False, - yaxis_showgrid=False, - xaxis_title=None, - yaxis_title=None, - ) - - # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) - df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] - df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] + # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux - st.write("") - st.caption( - f"Note : Cette analyse se base sur les déchets qui ont pu être classés par matériau : {volume_total_categorise:.0f} Litres, soit {pct_volume_categorise:.0%} du volume total collecté." + l2_col1, l2_col2 = st.columns(2) + cell4 = l2_col1.container(border=True) + cell5 = l2_col2.container(border=True) + with cell4: + + # Création du diagramme en donut en utilisant le dictionnaire de couleurs pour la correspondance + fig = px.pie( + df_totals_sorted, + values="Volume", + names="Matériau", + title="Répartition des matériaux en volume", + hole=0.4, + color="Matériau", + color_discrete_map=colors_map, ) - # Ligne 3 : Graphe par milieu de collecte - - # Appeler le dataframe filtré depuis le session state - if "df_other" in st.session_state: - df_other = st.session_state["df_other"].copy() - else: - df_other = load_df_other() - - # 3 Onglets : Matériaux, Top déchets, Filières et marques - tab1, tab2, tab3 = st.tabs( - [ - "Matériaux :wood:", - "Top Déchets :wastebasket:", - "Secteurs et marques :womans_clothes:", - ] + # Réglage du texte affiché, format et taille de police + fig.update_traces( + textinfo="percent", + texttemplate="%{percent:.0%}", + textfont_size=14, ) + fig.update_layout(autosize=True, legend_title_text="Matériau") + + # Affichage du graphique + st.plotly_chart(fig, use_container_width=True) - # Graphique à barre empilées du pourcentage de volume collecté par an et type de matériau - fig3 = px.histogram( - df_typemilieu, - x="TYPE_MILIEU", + with cell5: + # Création du graphique en barres avec Plotly Express + fig2 = px.bar( + df_totals_sorted, + x="Matériau", y="Volume", + text="Volume", + title="Volume total par materiau (en litres)", color="Matériau", - barnorm="percent", - title="Part de chaque matériau en volume selon le milieu de collecte", color_discrete_map=colors_map, - text_auto=True, - ) - # Format d'affichage - fig3.update_layout( - bargap=0.2, - height=600, - yaxis_title="Part du volume collecté (en %)", - xaxis_title=None, ) - fig3.update_xaxes(tickangle=-30) - # Etiquettes et formats de nombres - fig3.update_traces( - texttemplate="%{y:.0f}%", + + # Amélioration du graphique + fig2.update_traces( + texttemplate="%{text:.2s}", textposition="inside", - hovertemplate="%{x}
Part du volume collecté dans ce milieu: %{y:.0f} %", textfont_size=14, ) + fig2.update_layout( + autosize=True, + # uniformtext_minsize=8, + uniformtext_mode="hide", + xaxis_tickangle=-45, + showlegend=False, + yaxis_showgrid=False, + xaxis_title=None, + yaxis_title=None, + ) - # Afficher le graphique - with st.container(border=True): - st.plotly_chart(fig3, use_container_width=True) + # Affichage du graphique + st.plotly_chart(fig2, use_container_width=True) - # Ligne 3 : Graphe par milieu , lieu et année - st.write("**Détail par milieu, lieu ou année**") + st.write("") + st.caption( + f"Note : Cette analyse se base sur les déchets qui ont pu être classés par matériau : {volume_total_categorise:.0f} Litres, soit {pct_volume_categorise:.0%} du volume total collecté." + ) - # Étape 1: Création des filtres + # Ligne 3 : Graphe par milieu de collecte - df_other_metrics = df_other_metrics_raw.copy() - df_other_metrics = df_other_metrics.fillna(0) + # Grouper par année et type de matériau + df_typemilieu = df_volume.groupby(["TYPE_MILIEU", "Matériau"], as_index=False)[ + "Volume" + ].sum() + df_typemilieu = df_typemilieu.sort_values( + ["TYPE_MILIEU", "Volume"], ascending=False + ) - selected_annee = st.selectbox( - "Choisir une année:", - options=["Aucune sélection"] + list(df_other["ANNEE"].unique()), - ) - if selected_annee != "Aucune sélection": - filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee].copy() - filtered_metrics_milieu = df_other_metrics[ - df_other_metrics["ANNEE"] == selected_annee - ].copy() - else: - filtered_data_milieu = df_other.copy() - filtered_metrics_milieu = df_other_metrics.copy() - - # Onglet 1 : Matériaux - with tab1: - - # Transformation du dataframe pour les graphiques - # Variables à conserver en ligne - cols_identifiers = [ - "ANNEE", - "TYPE_MILIEU", - "INSEE_COM", - "DEP", - "REG", - "EPCI", - "BV2022", - ] - filtered_metrics_milieu = filtered_metrics_milieu[ - filtered_metrics_milieu["TYPE_MILIEU"] == selected_type_milieu - ] - else: - filtered_data_lieu = filtered_data_milieu.copy() - filtered_metrics_milieu = df_other_metrics.copy() - - # variables à décroiser de la base de données correspondant aux Volume global de chaque matériau - cols_volume = [k for k in df_other.columns if "GLOBAL_VOLUME_" in k] - - if ( - selected_annee == "Aucune sélection" - and selected_type_milieu == "Aucune sélection" - and selected_type_lieu == "Aucune sélection" - ): - df_filtered = df_other.copy() - df_filtered_metrics = df_other_metrics_raw.copy() - elif ( - selected_type_milieu == "Aucune sélection" - and selected_type_lieu == "Aucune sélection" - ): - df_filtered = df_other[df_other["ANNEE"] == selected_annee].copy() - df_filtered_metrics = df_other_metrics_raw[ - df_other_metrics["ANNEE"] == selected_annee - ].copy() - elif ( - selected_annee == "Aucune sélection" - and selected_type_lieu == "Aucune sélection" - and selected_type_milieu != "Aucune sélection" - ): - df_filtered = df_other[df_other["TYPE_MILIEU"] == selected_type_milieu].copy() - df_filtered_metrics = df_other_metrics_raw[ - df_other_metrics["TYPE_MILIEU"] == selected_type_milieu - ].copy() - - elif ( - selected_annee == "Aucune sélection" - and selected_type_lieu != "Aucune sélection" - and selected_type_milieu == "Aucune sélection" - ): - df_filtered = df_other[df_other["TYPE_LIEU"] == selected_type_lieu].copy() - df_filtered_metrics = df_other_metrics_raw[ - df_other_metrics["TYPE_LIEU"] == selected_type_lieu - ].copy() - - elif ( - selected_annee == "Aucune sélection" - and selected_type_lieu != "Aucune sélection" - and selected_type_milieu != "Aucune sélection" - ): - df_filtered = df_other[ - (df_other["TYPE_LIEU"] == selected_type_lieu) - & (df_other["TYPE_MILIEU"] == selected_type_milieu) - ].copy() - df_filtered_metrics = df_other_metrics_raw[ - (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) - & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) - ] - elif ( - selected_annee != "Aucune sélection" - and selected_type_lieu != "Aucune sélection" - and selected_type_milieu == "Aucune sélection" - ): - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee) - & (df_other["TYPE_LIEU"] == selected_type_lieu) - ].copy() - df_filtered_metrics = df_other_metrics_raw[ - (df_other_metrics["ANNEE"] == selected_annee) - & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) - ] - elif ( - selected_annee != "Aucune sélection" - and selected_type_lieu == "Aucune sélection" - and selected_type_milieu != "Aucune sélection" - ): - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee) - & (df_other["TYPE_MILIEU"] == selected_type_milieu) - ].copy() - df_filtered_metrics = df_other_metrics_raw[ - (df_other_metrics["ANNEE"] == selected_annee) - & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) - ] - - else: - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee) - & (df_other["TYPE_MILIEU"] == selected_type_milieu) - & (df_other["TYPE_LIEU"] == selected_type_lieu) - ].copy() - df_filtered_metrics = df_other_metrics_raw[ - (df_other_metrics["ANNEE"] == selected_annee) - & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) - & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) - ] - - # Ligne 5 : Metriques filtrés - l5_col1, l5_col2, l5_col3 = st.columns(3) - cell6 = l5_col1.container(border=True) - cell7 = l5_col2.container(border=True) - cell8 = l5_col3.container(border=True) - - poids_total_filtered = df_filtered_metrics["POIDS_TOTAL"].sum() - volume_total_filtered = df_filtered_metrics["VOLUME_TOTAL"].sum() - - volume_total_filtered = f"{volume_total_filtered:,.0f}".replace(",", " ") - cell6.metric("Volume de dechets collectés", f"{volume_total_filtered} litres") - - poids_total_filtered = f"{poids_total_filtered:,.0f}".replace(",", " ") - cell7.metric("Poids total collecté", f"{poids_total_filtered} kg") - - nombre_collectes_filtered = f"{len(df_filtered):,.0f}".replace(",", " ") - cell8.metric("Nombre de collectes", f"{nombre_collectes_filtered}") - - # Message d'avertissement nb de collectes en dessous de 5 - if len(df_filtered) == 1: - st.warning( - "⚠️ Il n'y a qu' " - + str(len(df_filtered)) - + " collecte considérées dans les données présentées." - ) - elif len(df_filtered) <= 5: - st.warning( - "⚠️ Il n'y a que " - + str(len(df_filtered)) - + " collectes considérées dans les données présentées." - ) - - # Étape 3: Preparation dataframe pour graphe - # Copie des données pour transfo - df_volume2 = df_filtered.copy() - - # Calcul des indicateurs clés de haut de tableau avant transformation - volume2_total = df_volume2["VOLUME_TOTAL"].sum() - poids2_total = df_volume2["POIDS_TOTAL"].sum() - volume2_total_categorise = df_volume2[cols_volume].sum().sum() - pct_volume2_categorise = volume2_total_categorise / volume2_total - nb_collectes2 = len(df_volume2) - - # estimation du poids categorisée en utilisant pct_volume_categorise - poids2_total_categorise = round(poids2_total * pct_volume2_categorise) - - # Dépivotage du tableau pour avoir une base de données exploitable - df_volume2 = df_volume2.melt( - id_vars=cols_identifiers, - value_vars=cols_volume, - var_name="Matériau", - value_name="Volume", - ) + # Graphique à barre empilées du pourcentage de volume collecté par an et type de matériau + fig3 = px.histogram( + df_typemilieu, + x="TYPE_MILIEU", + y="Volume", + color="Matériau", + barnorm="percent", + title="Part de chaque matériau en volume selon le milieu de collecte", + color_discrete_map=colors_map, + text_auto=True, + ) + # Format d'affichage + fig3.update_layout( + bargap=0.2, + height=600, + yaxis_title="Part du volume collecté (en %)", + xaxis_title=None, + ) + fig3.update_xaxes(tickangle=-30) + # Etiquettes et formats de nombres + fig3.update_traces( + texttemplate="%{y:.0f}%", + textposition="inside", + hovertemplate="%{x}
Part du volume collecté dans ce milieu: %{y:.0f} %", + textfont_size=14, + ) - # estimation du poids categorisée en utilisant pct_volume_categorise - poids_total_categorise = round(poids_total * pct_volume_categorise) + # Afficher le graphique + with st.container(border=True): + st.plotly_chart(fig3, use_container_width=True) - # Grouper par type de matériau pour les visualisations - df_totals_sorted2 = df_volume2.groupby(["Matériau"], as_index=False)["Volume"].sum() - df_totals_sorted2 = df_totals_sorted2.sort_values(["Volume"], ascending=False) - df_totals_sorted2["Volume_"] = ( - df_totals_sorted2["Volume"] - .apply(lambda x: "{0:,.0f}".format(x)) - .replace(",", " ") - ) + # Ligne 3 : Graphe par milieu , lieu et année + st.write("**Détail par milieu, lieu ou année**") - # Étape 4: Création du Graphique - - if not df_filtered.empty: - fig4 = px.treemap( - df_totals_sorted2, - path=["Matériau"], - values="Volume", - title="Répartition des matériaux en volume", - color="Matériau", - color_discrete_map=colors_map, - ) - fig4.update_layout( - margin=dict(t=50, l=25, r=25, b=25), autosize=True, height=600 - ) - fig4.update_traces( - textinfo="label+value", - texttemplate="%{label}
%{value:.0f} litres", - textfont=dict(size=16), - hovertemplate="%{label}
Volume: %{value:.0f}", - ) - - with st.container(border=True): - st.plotly_chart(fig4, use_container_width=True) - - else: - st.write("Aucune donnée à afficher pour les filtres sélectionnés.") - - - # Onglet 2 : Top Déchets - with tab2: - - # Préparation des datas pour l'onglet 2 - df_top = df_nb_dechet.copy() - df_top_data_releves = df_other.copy() - - # Calcul du nombre total de déchets catégorisés sur le territoier - nb_total_dechets = df_top[(df_top["type_regroupement"] == "GROUPE")][ - "nb_dechet" - ].sum() - nb_total_dechets = f"{nb_total_dechets:,.0f}".replace(",", " ") - - # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page - l1_col1, l1_col2, l1_col3 = st.columns(3) - # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - # 1ère métrique : volume total de déchets collectés - cell1 = l1_col1.container(border=True) - # Trick pour séparer les milliers - - # volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") - cell1.metric("Nombre de déchets catégorisés", f"{nb_total_dechets} déchets") - - # 2ème métrique : équivalent volume catégorisé - cell2 = l1_col2.container(border=True) - volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") - cell2.metric( - "Equivalent en volume ", - f"{volume_total_categorise} litres", - ) + # Étape 1: Création des filtres + + df_other_metrics = df_other_metrics_raw.copy() + df_other_metrics = df_other_metrics.fillna(0) + + selected_annee = st.selectbox( + "Choisir une année:", + options=["Aucune sélection"] + list(df_other["ANNEE"].unique()), + ) + if selected_annee != "Aucune sélection": + filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee].copy() + filtered_metrics_milieu = df_other_metrics[ + df_other_metrics["ANNEE"] == selected_annee + ].copy() + else: + filtered_data_milieu = df_other.copy() + filtered_metrics_milieu = df_other_metrics.copy() + + selected_type_milieu = st.selectbox( + "Choisir un type de milieu:", + options=["Aucune sélection"] + + list(filtered_data_milieu["TYPE_MILIEU"].unique()), + ) + + if selected_type_milieu != "Aucune sélection": + filtered_data_lieu = filtered_data_milieu[ + filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu + ] + filtered_metrics_milieu = filtered_metrics_milieu[ + filtered_metrics_milieu["TYPE_MILIEU"] == selected_type_milieu + ] + else: + filtered_data_lieu = filtered_data_milieu.copy() + filtered_metrics_milieu = df_other_metrics.copy() + + selected_type_lieu = st.selectbox( + "Choisir un type de lieu:", + options=["Aucune sélection"] + list(filtered_data_lieu["TYPE_LIEU"].unique()), + ) + + if ( + selected_annee == "Aucune sélection" + and selected_type_milieu == "Aucune sélection" + and selected_type_lieu == "Aucune sélection" + ): + df_filtered = df_other.copy() + df_filtered_metrics = df_other_metrics_raw.copy() + elif ( + selected_type_milieu == "Aucune sélection" + and selected_type_lieu == "Aucune sélection" + ): + df_filtered = df_other[df_other["ANNEE"] == selected_annee].copy() + df_filtered_metrics = df_other_metrics_raw[ + df_other_metrics["ANNEE"] == selected_annee + ].copy() + elif ( + selected_annee == "Aucune sélection" + and selected_type_lieu == "Aucune sélection" + and selected_type_milieu != "Aucune sélection" + ): + df_filtered = df_other[df_other["TYPE_MILIEU"] == selected_type_milieu].copy() + df_filtered_metrics = df_other_metrics_raw[ + df_other_metrics["TYPE_MILIEU"] == selected_type_milieu + ].copy() + + elif ( + selected_annee == "Aucune sélection" + and selected_type_lieu != "Aucune sélection" + and selected_type_milieu == "Aucune sélection" + ): + df_filtered = df_other[df_other["TYPE_LIEU"] == selected_type_lieu].copy() + df_filtered_metrics = df_other_metrics_raw[ + df_other_metrics["TYPE_LIEU"] == selected_type_lieu + ].copy() + + elif ( + selected_annee == "Aucune sélection" + and selected_type_lieu != "Aucune sélection" + and selected_type_milieu != "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["TYPE_LIEU"] == selected_type_lieu) + & (df_other["TYPE_MILIEU"] == selected_type_milieu) + ].copy() + df_filtered_metrics = df_other_metrics_raw[ + (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) + & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) + ] + elif ( + selected_annee != "Aucune sélection" + and selected_type_lieu != "Aucune sélection" + and selected_type_milieu == "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee) + & (df_other["TYPE_LIEU"] == selected_type_lieu) + ].copy() + df_filtered_metrics = df_other_metrics_raw[ + (df_other_metrics["ANNEE"] == selected_annee) + & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) + ] + elif ( + selected_annee != "Aucune sélection" + and selected_type_lieu == "Aucune sélection" + and selected_type_milieu != "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee) + & (df_other["TYPE_MILIEU"] == selected_type_milieu) + ].copy() + df_filtered_metrics = df_other_metrics_raw[ + (df_other_metrics["ANNEE"] == selected_annee) + & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) + ] + + else: + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee) + & (df_other["TYPE_MILIEU"] == selected_type_milieu) + & (df_other["TYPE_LIEU"] == selected_type_lieu) + ].copy() + df_filtered_metrics = df_other_metrics_raw[ + (df_other_metrics["ANNEE"] == selected_annee) + & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) + & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) + ] + + # Ligne 5 : Metriques filtrés + l5_col1, l5_col2, l5_col3 = st.columns(3) + cell6 = l5_col1.container(border=True) + cell7 = l5_col2.container(border=True) + cell8 = l5_col3.container(border=True) + + poids_total_filtered = df_filtered_metrics["POIDS_TOTAL"].sum() + volume_total_filtered = df_filtered_metrics["VOLUME_TOTAL"].sum() + + volume_total_filtered = f"{volume_total_filtered:,.0f}".replace(",", " ") + cell6.metric("Volume de dechets collectés", f"{volume_total_filtered} litres") - # 3ème métrique : nombre de relevés - cell3 = l1_col3.container(border=True) - cell3.metric("Nombre de collectes comptabilisées", f"{nb_collectes}") - - # Message d'avertissement nb de collectes en dessous de 5 - if nb_collectes_int == 1: - st.warning( - "⚠️ Il n'y a qu' " - + str(nb_collectes) - + " collecte considérées dans les données présentées." - ) - elif nb_collectes_int <= 5: - st.warning( - "⚠️ Il n'y a que " - + str(nb_collectes) - + " collectes considérées dans les données présentées." - ) - - # Ligne 2 : graphique top déchets - - # Filtration des données pour nb_dechets - df_top10 = pd.merge(df_top, df_top_data_releves, on="ID_RELEVE", how="inner") - # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement - df_dechets_groupe = df_top10[df_top10["type_regroupement"].isin(["GROUPE"])] - # Group by 'categorie', sum 'nb_dechet', et top 10 - df_top10_dechets = ( - df_dechets_groupe.groupby("categorie") - .agg({"nb_dechet": "sum"}) - .sort_values(by="nb_dechet", ascending=False) - .head(10) + poids_total_filtered = f"{poids_total_filtered:,.0f}".replace(",", " ") + cell7.metric("Poids total collecté", f"{poids_total_filtered} kg") + + nombre_collectes_filtered = f"{len(df_filtered):,.0f}".replace(",", " ") + cell8.metric("Nombre de collectes", f"{nombre_collectes_filtered}") + + # Message d'avertissement nb de collectes en dessous de 5 + if len(df_filtered) == 1: + st.warning( + "⚠️ Il n'y a qu' " + + str(len(df_filtered)) + + " collecte considérées dans les données présentées." ) - # recuperation de ces 10 dechets dans une liste pour filtration bubble map - noms_top10_dechets = df_top10_dechets.index.tolist() - # Preparation des datas pour l'onglet 3# ajout de la colonne materiau - df_top10_dechets = df_top10_dechets.merge( - df_dict_corr_dechet_materiau, on="categorie", how="left" + elif len(df_filtered) <= 5: + st.warning( + "⚠️ Il n'y a que " + + str(len(df_filtered)) + + " collectes considérées dans les données présentées." ) - # Preparation de la figure barplot - df_top10_dechets.reset_index(inplace=True) - # Création du graphique en barres avec Plotly Express - fig5 = px.bar( - df_top10_dechets, - y="categorie", - x="nb_dechet", - labels={"categorie": "Dechet", "nb_dechet": "Nombre total"}, - title="Top 10 dechets ramassés ", - text="nb_dechet", - color="Materiau", + # Étape 3: Preparation dataframe pour graphe + # Copie des données pour transfo + df_volume2 = df_filtered.copy() + + # Calcul des indicateurs clés de haut de tableau avant transformation + volume2_total = df_volume2["VOLUME_TOTAL"].sum() + poids2_total = df_volume2["POIDS_TOTAL"].sum() + volume2_total_categorise = df_volume2[cols_volume].sum().sum() + pct_volume2_categorise = volume2_total_categorise / volume2_total + nb_collectes2 = len(df_volume2) + + # estimation du poids categorisée en utilisant pct_volume_categorise + poids2_total_categorise = round(poids2_total * pct_volume2_categorise) + + # Dépivotage du tableau pour avoir une base de données exploitable + df_volume2 = df_volume2.melt( + id_vars=cols_identifiers, + value_vars=cols_volume, + var_name="Matériau", + value_name="Volume", + ) + + # Nettoyer le nom du Type déchet pour le rendre plus lisible + df_volume2["Matériau"] = ( + df_volume2["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() + ) + + # Grouper par type de matériau pour les visualisations + df_totals_sorted2 = df_volume2.groupby(["Matériau"], as_index=False)["Volume"].sum() + df_totals_sorted2 = df_totals_sorted2.sort_values(["Volume"], ascending=False) + df_totals_sorted2["Volume_"] = ( + df_totals_sorted2["Volume"] + .apply(lambda x: "{0:,.0f}".format(x)) + .replace(",", " ") + ) + + # Étape 4: Création du Graphique + + if not df_filtered.empty: + fig4 = px.treemap( + df_totals_sorted2, + path=["Matériau"], + values="Volume", + title="Répartition des matériaux en volume", + color="Matériau", color_discrete_map=colors_map, - category_orders={"categorie": df_top10_dechets["categorie"].tolist()}, ) - fig5.update_layout(xaxis_type="log") - # Amélioration du visuel du graphique - fig5.update_traces( - # texttemplate="%{text:.2f}", - textposition="inside", - textfont_color="white", - textfont_size=20, + fig4.update_layout( + margin=dict(t=50, l=25, r=25, b=25), autosize=True, height=600 ) - fig5.update_layout( - width=1400, - height=900, - uniformtext_minsize=8, - uniformtext_mode="hide", - xaxis_tickangle=90, - legend=dict(x=1, y=0, xanchor="right", yanchor="bottom"), + fig4.update_traces( + textinfo="label+value", + texttemplate="%{label}
%{value:.0f} litres", + textfont=dict(size=16), + hovertemplate="%{label}
Volume: %{value:.0f}", ) - # Suppression de la colonne categorie - del df_top10_dechets["Materiau"] - with st.container(border=True): - st.plotly_chart(fig5, use_container_width=True) - - st.write("") - st.caption( - f"Note : Analyse basée sur les collectes qui ont fait l'objet d'un comptage détaillé par déchet,\ - soit {volume_total_categorise} litres équivalent à {pct_volume_categorise:.0%} du volume collecté\ - sur le territoire." - ) - with st.container(): - # Ajout de la selectbox - selected_dechet = st.selectbox( - "Choisir un type de déchet :", noms_top10_dechets, index=0 - ) - - st.divider() - - # Ligne 3 : Graphe par milieu de collecte - st.write("**Volume collecté par matériau en fonction du milieu de collecte**") - - # Part de volume collecté par type de milieu - - map_data = folium.Map( - location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], - zoom_start=8, - tiles="OpenStreetMap", - ) - - # Graphique à barre empilées du pourcentage de volume collecté par an et type de matériau - fig3 = px.histogram( - df_typemilieu, - x="TYPE_MILIEU", - y="Volume", - color="Matériau", - barnorm="percent", - title="Répartition des matériaux en fonction du milieu de collecte", - text_auto=False, - color_discrete_map=colors_map, - ) - - # Application d'une limite minimale pour le rayon si nécessaire - radius = max(radius, 5) - - folium.CircleMarker( - location=(row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]), - radius=radius, # Utilisation du rayon ajusté - popup=f"{row['NOM_ZONE']}, {row['LIEU_VILLE']}, {row['DATE']} : {row['nb_dechet']} {selected_dechet}", - color="#3186cc", - fill=True, - fill_color="#3186cc", - ).add_to(map_data) - - # Affichage de la carte Folium dans Streamlit - st_folium = st.components.v1.html - st_folium( - folium.Figure().add_child(map_data).render(), # , width=1400 - height=750, - ) - - # Ligne 3 : Graphe par milieu , lieu et année - st.write("**Détail par milieu, lieu ou année**") - - # Étape 1: Création des filtres - selected_annee = st.selectbox( - "Choisir une année:", - options=["Aucune sélection"] + list(df_other["ANNEE"].unique()), - ) - if selected_annee != "Aucune sélection": - filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee] - else: - filtered_data_milieu = df_other - - selected_type_milieu = st.selectbox( - "Choisir un type de milieu:", - options=["Aucune sélection"] - + list(filtered_data_milieu["TYPE_MILIEU"].unique()), - ) - - # Préparation des données - df_dechet_copy = df_nb_dechet.copy() - df_filtre_copy = df_other.copy() - - # Étape 1: Création des filtres - selected_annee_onglet_3 = st.selectbox( - "Choisir une année:", - options=["Aucune sélection"] + list(df_other["ANNEE"].sort_values().unique()), - key="année_select", + st.plotly_chart(fig4, use_container_width=True) + + else: + st.write("Aucune donnée à afficher pour les filtres sélectionnés.") + + +# Onglet 2 : Top Déchets +with tab2: + + # Préparation des datas pour l'onglet 2 + df_top = df_nb_dechet.copy() + df_top_data_releves = df_other.copy() + + # Calcul du nombre total de déchets catégorisés sur le territoier + nb_total_dechets = df_top[(df_top["type_regroupement"] == "GROUPE")][ + "nb_dechet" + ].sum() + nb_total_dechets = f"{nb_total_dechets:,.0f}".replace(",", " ") + + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page + l1_col1, l1_col2, l1_col3 = st.columns(3) + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + # 1ère métrique : volume total de déchets collectés + cell1 = l1_col1.container(border=True) + # Trick pour séparer les milliers + + # volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") + cell1.metric("Nombre de déchets catégorisés", f"{nb_total_dechets} déchets") + + # 2ème métrique : équivalent volume catégorisé + cell2 = l1_col2.container(border=True) + volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") + cell2.metric( + "Equivalent en volume ", + f"{volume_total_categorise} litres", + ) + + # 3ème métrique : nombre de relevés + cell3 = l1_col3.container(border=True) + cell3.metric("Nombre de collectes comptabilisées", f"{nb_collectes}") + + # Message d'avertissement nb de collectes en dessous de 5 + if nb_collectes_int == 1: + st.warning( + "⚠️ Il n'y a qu' " + + str(nb_collectes) + + " collecte considérées dans les données présentées." ) - if selected_annee_onglet_3 != "Aucune sélection": - filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee_onglet_3] - else: - filtered_data_milieu = df_other.copy() - - # Grouper par type de matériau pour les visualisations - df_totals_sorted2 = df_volume2.groupby(["Matériau"], as_index=False)[ - "Volume" - ].sum() - df_totals_sorted2 = df_totals_sorted2.sort_values(["Volume"], ascending=False) - - # Étape 4: Création du Graphique - if not df_filtered.empty: - fig4 = px.pie( - df_totals_sorted2, - values="Volume", - names="Matériau", - title="Répartition des matériaux en volume", - hole=0.4, - color="Matériau", - color_discrete_map=colors_map, - ) - - # Amélioration de l'affichage - fig4.update_traces(textinfo="percent") - fig4.update_layout(autosize=True, legend_title_text="Matériau") - with st.container(border=True): - st.plotly_chart(fig4, use_container_width=True) - else: - st.write("Aucune donnée à afficher pour les filtres sélectionnés.") - - # 2ème option de graphique, à choisir - if not df_filtered.empty: - fig5 = px.treemap( - df_totals_sorted2, - path=["Matériau"], - values="Volume", - title="2ème option : treemap de répartition des matériaux en volume", - color="Matériau", - color_discrete_map=colors_map, - ) - fig5.update_layout( - margin=dict(t=50, l=25, r=25, b=25), autosize=True, height=600 - ) - fig5.update_traces(textinfo="label+value") - with st.container(border=True): - st.plotly_chart(fig5, use_container_width=True) - else: - st.write("Aucune donnée à afficher pour les filtres sélectionnés.") - - # Onglet 2 : Top Déchets - with tab2: - - # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page - l1_col1, l1_col2, l1_col3 = st.columns(3) - # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - # 1ère métrique : volume total de déchets collectés - cell1 = l1_col1.container(border=True) - # Trick pour séparer les milliers - - volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") - cell1.metric( - "Volume de déchets catégorisés", f"{volume_total_categorise} litres" - ) - - # 2ème métrique : poids - cell2 = l1_col2.container(border=True) - poids_total_categorise = f"{poids_total_categorise:,.0f}".replace(",", " ") - # poids_total = f"{poids_total:,.0f}".replace(",", " ") - cell2.metric( - "Poids estimé de déchets categorisés", - f"{poids_total_categorise} kg", - ) - - # 3ème métrique : nombre de relevés - cell3 = l1_col3.container(border=True) - # nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") - cell3.metric("Nombre de collectes réalisées", f"{nb_collectes}") - - # Ligne 2 : graphique top déchets - - # Préparation des datas pour l'onglet 2 - df_top = df_nb_dechet.copy() - - df_top_data_releves = df_other.copy() - # Filtration des données pour nb_dechets - df_top10 = pd.merge(df_top, df_top_data_releves, on="ID_RELEVE", how="inner") - # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement - df_dechets_groupe = df_top10[df_top10["type_regroupement"].isin(["GROUPE"])] - # Group by 'categorie', sum 'nb_dechet', et top 10 - df_top10_dechets = ( - df_dechets_groupe.groupby("categorie") - .agg({"nb_dechet": "sum"}) - .sort_values(by="nb_dechet", ascending=False) - .head(10) - ) - # recuperation de ces 10 dechets dans une liste pour filtration bubble map - noms_top10_dechets = df_top10_dechets.index.tolist() - # Preparation des datas pour l'onglet 3# ajout de la colonne materiau - df_top10_dechets = df_top10_dechets.merge( - df_dict_corr_dechet_materiau, on="categorie", how="left" - ) - # Preparation de la figure barplot - df_top10_dechets.reset_index(inplace=True) - # Création du graphique en barres avec Plotly Express - fig = px.bar( - df_top10_dechets, - x="categorie", - y="nb_dechet", - labels={"categorie": "Dechet", "nb_dechet": "Nombre total"}, - title="Top 10 dechets ramassés", - color="Materiau", - color_discrete_map=colors_map, - category_orders={"categorie": df_top10_dechets["categorie"].tolist()}, - ) - fig.update_layout(yaxis_type="log") - # Amélioration du visuel du graphique - fig.update_traces( - # texttemplate="%{text:.2f}", - textposition="outside" - ) - fig.update_layout( - width=1400, - height=900, - uniformtext_minsize=8, - uniformtext_mode="hide", - xaxis_tickangle=90, - ) - # Suppression de la colonne categorie - del df_top10_dechets["Materiau"] - - if ( - selected_annee_onglet_3 == "Aucune sélection" - and selected_type_milieu_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" - ): - df_filtered = df_other.copy() - elif ( - selected_type_milieu_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" - ): - df_filtered = df_other[df_other["ANNEE"] == selected_annee_onglet_3].copy() - elif ( - selected_annee_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" - and selected_type_milieu_onglet_3 != "Aucune sélection" - ): - df_filtered = df_other[ - df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3 - ].copy() - elif ( - selected_annee_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 != "Aucune sélection" - and selected_type_milieu_onglet_3 == "Aucune sélection" - ): - df_filtered = df_other[ - df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3 - ].copy() - elif ( - selected_annee_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 != "Aucune sélection" - and selected_type_milieu_onglet_3 != "Aucune sélection" - ): - df_filtered = df_other[ - (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - ].copy() - elif ( - selected_annee_onglet_3 != "Aucune sélection" - and selected_type_lieu_onglet_3 != "Aucune sélection" - and selected_type_milieu_onglet_3 == "Aucune sélection" - ): - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) - ].copy() - elif ( - selected_annee_onglet_3 != "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" - and selected_type_milieu_onglet_3 != "Aucune sélection" - ): - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - ].copy() - - elif selected_type_lieu_onglet_3 == "Aucune sélection": - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - ].copy() - else: - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) - ].copy() - - if selected_type_milieu_onglet_3 != "Aucune sélection": - filtered_data_lieu = filtered_data_milieu[ - filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu_onglet_3 - ] - else: - filtered_data_lieu = filtered_data_milieu - - selected_type_lieu_onglet_3 = st.selectbox( - "Choisir un type de lieu:", - options=["Aucune sélection"] - + list(filtered_data_lieu["TYPE_LIEU"].unique()), - key="type_lieu_select", - ) - - if ( - selected_annee_onglet_3 == "Aucune sélection" - and selected_type_milieu_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" - ): - df_filtered = df_other - elif ( - selected_type_milieu_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" - ): - df_filtered = df_other[df_other["ANNEE"] == selected_annee_onglet_3] - elif selected_type_lieu_onglet_3 == "Aucune sélection": - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - ] - else: - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) - ] - - # Filtration des données pour nb_dechets - df_init = pd.merge(df_dechet_copy, df_filtered, on="ID_RELEVE", how="inner") - - # Data pour le plot secteur - secteur_df = df_init[df_init["type_regroupement"].isin(["SECTEUR"])] - top_secteur_df = ( - secteur_df.groupby("categorie")["nb_dechet"] - .sum() - .sort_values(ascending=True) - ) - top_secteur_df = top_secteur_df.reset_index() - top_secteur_df.columns = ["Secteur", "Nombre de déchets"] - - # Data pour le plot marque - marque_df = df_init[df_init["type_regroupement"].isin(["MARQUE"])] - top_marque_df = ( - marque_df.groupby("categorie")["nb_dechet"] - .sum() - .sort_values(ascending=True) - ) - top_marque_df = top_marque_df.reset_index() - top_marque_df.columns = ["Marque", "Nombre de déchets"] - - # Chiffres clés - nb_dechet_secteur = secteur_df["nb_dechet"].sum() - nb_secteurs = len(top_secteur_df["Secteur"].unique()) - - nb_dechet_marque = marque_df["nb_dechet"].sum() - nb_marques = len(top_marque_df["Marque"].unique()) - - nb_dechet_marque = marque_df["nb_dechet"].sum() - nb_marques = len(top_marque_df["Marque"].unique()) - collectes = len(df_filtered) - - l1_col1, l1_col2 = st.columns(2) - # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - # 1ère métrique : volume total de déchets collectés - cell1 = l1_col1.container(border=True) - # Trick pour séparer les milliers - - l1_col1, l1_col2, l1_col3 = st.columns(3) - # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - # 1ère métrique : volume total de déchets collectés - cell1 = l1_col1.container(border=True) - - # Trick pour séparer les milliers - nb_dechet_secteur = f"{nb_dechet_secteur:,.0f}".replace(",", " ") - cell1.metric( - "Nombre de déchets catégorisés par secteur", f"{nb_dechet_secteur} dechets" + elif nb_collectes_int <= 5: + st.warning( + "⚠️ Il n'y a que " + + str(nb_collectes) + + " collectes considérées dans les données présentées." ) - fig_secteur = px.bar( - top_secteur_df.tail(10), - x="Nombre de déchets", - y="Secteur", - title="Top 10 des secteurs les plus ramassés", - orientation="h", - ) - # add log scale to x axis - fig_secteur.update_layout(xaxis_type="log") - fig_secteur.update_traces( - # texttemplate="%{text:.2f}", - textposition="outside" - ) - fig_secteur.update_layout( - width=800, height=500, uniformtext_minsize=8, uniformtext_mode="hide" - ) - with st.container(border=True): - st.plotly_chart(fig_secteur, use_container_width=True) - - # 3ème métrique : nombre de collectes - cell3 = l1_col3.container(border=True) - collectes_formatted = f"{collectes:,.0f}".replace(",", " ") - cell3.metric( - "Nombre de collectes comptabilisées", - f"{collectes_formatted} collectes", - ) + # Ligne 2 : graphique top déchets + + # Filtration des données pour nb_dechets + df_top10 = pd.merge(df_top, df_top_data_releves, on="ID_RELEVE", how="inner") + # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement + df_dechets_groupe = df_top10[df_top10["type_regroupement"].isin(["GROUPE"])] + # Group by 'categorie', sum 'nb_dechet', et top 10 + df_top10_dechets = ( + df_dechets_groupe.groupby("categorie") + .agg({"nb_dechet": "sum"}) + .sort_values(by="nb_dechet", ascending=False) + .head(10) + ) + # recuperation de ces 10 dechets dans une liste pour filtration bubble map + noms_top10_dechets = df_top10_dechets.index.tolist() + # Preparation des datas pour l'onglet 3# ajout de la colonne materiau + df_top10_dechets = df_top10_dechets.merge( + df_dict_corr_dechet_materiau, on="categorie", how="left" + ) + # Preparation de la figure barplot + df_top10_dechets.reset_index(inplace=True) + # Création du graphique en barres avec Plotly Express + + fig5 = px.bar( + df_top10_dechets, + y="categorie", + x="nb_dechet", + labels={"categorie": "Dechet", "nb_dechet": "Nombre total"}, + title="Top 10 dechets ramassés ", + text="nb_dechet", + color="Materiau", + color_discrete_map=colors_map, + category_orders={"categorie": df_top10_dechets["categorie"].tolist()}, + ) + fig5.update_layout(xaxis_type="log") + # Amélioration du visuel du graphique + fig5.update_traces( + # texttemplate="%{text:.2f}", + textposition="inside", + textfont_color="white", + textfont_size=20, + ) + fig5.update_layout( + width=1400, + height=900, + uniformtext_minsize=8, + uniformtext_mode="hide", + xaxis_tickangle=90, + legend=dict(x=1, y=0, xanchor="right", yanchor="bottom"), + ) - # Message d'avertissement nb de collectes en dessous de 5 - if collectes == 1: - st.warning( - "⚠️ Il n'y a qu' " - + str(collectes) - + " collecte considérées dans les données présentées." - ) - elif collectes <= 5: - st.warning( - "⚠️ Il n'y a que " - + str(collectes) - + " collectes considérées dans les données présentées." - ) - - # Ligne 2 : 3 cellules avec les indicateurs clés en bas de page - colors_map_secteur = { - "AGRICULTURE": "#156644", - "ALIMENTATION": "#F7D156", - "AMEUBLEMENT, DÉCORATION ET ÉQUIPEMENT DE LA MAISON": "#F79D65", - "AQUACULTURE": "#0067C2", - "BÂTIMENT, TRAVAUX ET MATÉRIAUX DE CONSTRUCTION": "#FF9900", - "CHASSE ET ARMEMENT": "#23A76F", - "COSMÉTIQUES, HYGIÈNE ET SOINS PERSONNELS": "#BF726B", - "DÉTERGENTS ET PRODUITS D'ENTRETIENS": "#506266", - "EMBALLAGE INDUSTRIEL ET COLIS": "#754B30", - "GRAPHIQUE ET PAPETERIE ET FOURNITURES DE BUREAU": "#EFEFEF", - "INDÉTERMINÉ": "#967EA1", - "INFORMATIQUE ET HIGHTECH": "#E351F7", - "JOUETS ET LOISIR": "#A64D79", - "MATÉRIEL ÉLECTRIQUE ET ÉLECTROMÉNAGER": "#AE05C3", - "MÉTALLURGIE": "#EC4773", - "PÊCHE": "#003463", - "PETROCHIMIE": "#0D0D0D", - "PHARMACEUTIQUE/PARAMÉDICAL": "#61BF5E", - "PLASTURGIE": "#05A2AD", - "TABAC": "#E9003F", - "TEXTILE ET HABILLEMENT": "#FA9EE5", - "TRAITEMENT DES EAUX": "#4AA6F7", - "TRANSPORT / AUTOMOBILE": "#6C2775", - "VAISSELLE À USAGE UNIQUE": "#732D3A", - "AUTRES SECTEURS": "#D9C190", - } - - fig_secteur = px.bar( - top_secteur_df.tail(10).sort_values(by="Nombre de déchets", ascending=False), - x="Nombre de déchets", - y="Secteur", - color="Secteur", - title="Top 10 des secteurs les plus ramassés", - orientation="h", - color_discrete_map=colors_map_secteur, - text_auto=True, + # Suppression de la colonne categorie + del df_top10_dechets["Materiau"] + + with st.container(border=True): + st.plotly_chart(fig5, use_container_width=True) + + st.write("") + st.caption( + f"Note : Analyse basée sur les collectes qui ont fait l'objet d'un comptage détaillé par déchet,\ + soit {volume_total_categorise} litres équivalent à {pct_volume_categorise:.0%} du volume collecté\ + sur le territoire." ) - # add log scale to x axis - fig_secteur.update_layout(xaxis_type="log") - fig_secteur.update_traces(texttemplate="%{value:.0f}", textposition="inside") - fig_secteur.update_layout( - width=800, - height=500, - uniformtext_mode="hide", - showlegend=False, - yaxis_title=None, + with st.container(): + # Ajout de la selectbox + selected_dechet = st.selectbox( + "Choisir un type de déchet :", noms_top10_dechets, index=0 ) - with st.container(border=True): - st.plotly_chart(fig_secteur, use_container_width=True) - l1_col1, l1_col2 = st.columns(2) - cell1 = l1_col1.container(border=True) + # Filtration sur le dechet top 10 sélectionné + df_top_map = df_top[df_top["categorie"] == selected_dechet] - # Trick pour séparer les milliers - nb_dechet_marque = f"{nb_dechet_marque:,.0f}".replace(",", " ") - cell1.metric( - "Nombre de déchets catégorisés par marque", f"{nb_dechet_marque} dechets" + # Création du DataFrame de travail pour la carte + df_map_data = pd.merge( + df_top_map, df_top_data_releves, on="ID_RELEVE", how="inner" ) - # 2ème métrique : poids - cell2 = l1_col2.container(border=True) - nb_marques = f"{nb_marques:,.0f}".replace(",", " ") - cell2.metric( - "Nombre de marques identifiés lors des collectes", - f"{nb_marques} marques", + # Création de la carte centrée autour d'une localisation + # Calcul des limites à partir de vos données + min_lat = df_map_data["LIEU_COORD_GPS_Y"].min() + max_lat = df_map_data["LIEU_COORD_GPS_Y"].max() + min_lon = df_map_data["LIEU_COORD_GPS_X"].min() + max_lon = df_map_data["LIEU_COORD_GPS_X"].max() + + map_data = folium.Map( + location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], + zoom_start=8, + tiles="OpenStreetMap", ) - fig_marque = px.bar( - top_marque_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), - x="Nombre de déchets", - y="Marque", - title="Top 10 des marques les plus ramassées", - color_discrete_sequence=["#1951A0"], - orientation="h", - text_auto=False, - text=top_marque_df.tail(10)["Marque"] - + ": " - + top_marque_df.tail(10)["Nombre de déchets"].astype(str), + # Facteur de normalisation pour ajuster la taille des bulles + normalisation_facteur = 1000 + + for index, row in df_map_data.iterrows(): + # Application de la normalisation + radius = row["nb_dechet"] / normalisation_facteur + + # Application d'une limite minimale pour le rayon si nécessaire + radius = max(radius, 5) + + folium.CircleMarker( + location=(row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]), + radius=radius, # Utilisation du rayon ajusté + popup=f"{row['NOM_ZONE']}, {row['LIEU_VILLE']}, {row['DATE']} : {row['nb_dechet']} {selected_dechet}", + color="#3186cc", + fill=True, + fill_color="#3186cc", + ).add_to(map_data) + + # Affichage de la carte Folium dans Streamlit + st_folium = st.components.v1.html + st_folium( + folium.Figure().add_child(map_data).render(), # , width=1400 + height=750, ) - # add log scale to x axis - fig_marque.update_layout(xaxis_type="log") - # fig_marque.update_traces(texttemplate="%{value:.0f}", textposition="inside") - - fig_marque.update_layout( - width=800, - height=500, - uniformtext_minsize=8, - uniformtext_mode="hide", - yaxis_title=None, + + +# Onglet 3 : Secteurs et marques +with tab3: + st.write("") + + # Préparation des données + df_dechet_copy = df_nb_dechet.copy() + df_filtre_copy = df_other.copy() + + # Étape 1: Création des filtres + selected_annee_onglet_3 = st.selectbox( + "Choisir une année:", + options=["Aucune sélection"] + list(df_other["ANNEE"].sort_values().unique()), + key="année_select", + ) + if selected_annee_onglet_3 != "Aucune sélection": + filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee_onglet_3] + else: + filtered_data_milieu = df_other.copy() + + selected_type_milieu_onglet_3 = st.selectbox( + "Choisir un type de milieu:", + options=["Aucune sélection"] + + list(filtered_data_milieu["TYPE_MILIEU"].unique()), + key="type_milieu_select", + ) + + if selected_type_milieu_onglet_3 != "Aucune sélection": + filtered_data_lieu = filtered_data_milieu[ + filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu_onglet_3 + ] + else: + filtered_data_lieu = filtered_data_milieu + + selected_type_lieu_onglet_3 = st.selectbox( + "Choisir un type de lieu:", + options=["Aucune sélection"] + list(filtered_data_lieu["TYPE_LIEU"].unique()), + key="type_lieu_select", + ) + + if ( + selected_annee_onglet_3 == "Aucune sélection" + and selected_type_milieu_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 == "Aucune sélection" + ): + df_filtered = df_other.copy() + elif ( + selected_type_milieu_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 == "Aucune sélection" + ): + df_filtered = df_other[df_other["ANNEE"] == selected_annee_onglet_3].copy() + elif ( + selected_annee_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 == "Aucune sélection" + and selected_type_milieu_onglet_3 != "Aucune sélection" + ): + df_filtered = df_other[ + df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3 + ].copy() + elif ( + selected_annee_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 != "Aucune sélection" + and selected_type_milieu_onglet_3 == "Aucune sélection" + ): + df_filtered = df_other[ + df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3 + ].copy() + elif ( + selected_annee_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 != "Aucune sélection" + and selected_type_milieu_onglet_3 != "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) + & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + ].copy() + elif ( + selected_annee_onglet_3 != "Aucune sélection" + and selected_type_lieu_onglet_3 != "Aucune sélection" + and selected_type_milieu_onglet_3 == "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee_onglet_3) + & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) + ].copy() + elif ( + selected_annee_onglet_3 != "Aucune sélection" + and selected_type_lieu_onglet_3 == "Aucune sélection" + and selected_type_milieu_onglet_3 != "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee_onglet_3) + & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + ].copy() + + elif selected_type_lieu_onglet_3 == "Aucune sélection": + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee_onglet_3) + & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + ].copy() + else: + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee_onglet_3) + & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) + ].copy() + + # Filtration des données pour nb_dechets + df_init = pd.merge(df_dechet_copy, df_filtered, on="ID_RELEVE", how="inner") + + # Data pour le plot secteur + secteur_df = df_init[df_init["type_regroupement"].isin(["SECTEUR"])] + top_secteur_df = ( + secteur_df.groupby("categorie")["nb_dechet"].sum().sort_values(ascending=True) + ) + top_secteur_df = top_secteur_df.reset_index() + top_secteur_df.columns = ["Secteur", "Nombre de déchets"] + + # Data pour le plot marque + marque_df = df_init[df_init["type_regroupement"].isin(["MARQUE"])] + top_marque_df = ( + marque_df.groupby("categorie")["nb_dechet"].sum().sort_values(ascending=True) + ) + top_marque_df = top_marque_df.reset_index() + top_marque_df.columns = ["Marque", "Nombre de déchets"] + + # Chiffres clés + nb_dechet_secteur = secteur_df["nb_dechet"].sum() + nb_secteurs = len(top_secteur_df["Secteur"].unique()) + + nb_dechet_marque = marque_df["nb_dechet"].sum() + nb_marques = len(top_marque_df["Marque"].unique()) + collectes = len(df_filtered) + + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page + + l1_col1, l1_col2, l1_col3 = st.columns(3) + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + # 1ère métrique : volume total de déchets collectés + cell1 = l1_col1.container(border=True) + + # Trick pour séparer les milliers + nb_dechet_secteur = f"{nb_dechet_secteur:,.0f}".replace(",", " ") + cell1.metric( + "Nombre de déchets catégorisés par secteur", f"{nb_dechet_secteur} dechets" + ) + + # 2ème métrique : poids + cell2 = l1_col2.container(border=True) + nb_secteurs = f"{nb_secteurs:,.0f}".replace(",", " ") + # poids_total = f"{poids_total:,.0f}".replace(",", " ") + cell2.metric( + "Nombre de secteurs identifiés lors des collectes", + f"{nb_secteurs} secteurs", + ) + + # 3ème métrique : nombre de collectes + cell3 = l1_col3.container(border=True) + collectes_formatted = f"{collectes:,.0f}".replace(",", " ") + cell3.metric( + "Nombre de collectes comptabilisées", + f"{collectes_formatted} collectes", + ) + + # Message d'avertissement nb de collectes en dessous de 5 + if collectes == 1: + st.warning( + "⚠️ Il n'y a qu' " + + str(collectes) + + " collecte considérées dans les données présentées." + ) + elif collectes <= 5: + st.warning( + "⚠️ Il n'y a que " + + str(collectes) + + " collectes considérées dans les données présentées." ) -else: - st.markdown("## 🚨 Veuillez vous connecter pour accéder à l'onglet 🚨") + # Ligne 2 : 3 cellules avec les indicateurs clés en bas de page + colors_map_secteur = { + "AGRICULTURE": "#156644", + "ALIMENTATION": "#F7D156", + "AMEUBLEMENT, DÉCORATION ET ÉQUIPEMENT DE LA MAISON": "#F79D65", + "AQUACULTURE": "#0067C2", + "BÂTIMENT, TRAVAUX ET MATÉRIAUX DE CONSTRUCTION": "#FF9900", + "CHASSE ET ARMEMENT": "#23A76F", + "COSMÉTIQUES, HYGIÈNE ET SOINS PERSONNELS": "#BF726B", + "DÉTERGENTS ET PRODUITS D'ENTRETIENS": "#506266", + "EMBALLAGE INDUSTRIEL ET COLIS": "#754B30", + "GRAPHIQUE ET PAPETERIE ET FOURNITURES DE BUREAU": "#EFEFEF", + "INDÉTERMINÉ": "#967EA1", + "INFORMATIQUE ET HIGHTECH": "#E351F7", + "JOUETS ET LOISIR": "#A64D79", + "MATÉRIEL ÉLECTRIQUE ET ÉLECTROMÉNAGER": "#AE05C3", + "MÉTALLURGIE": "#EC4773", + "PÊCHE": "#003463", + "PETROCHIMIE": "#0D0D0D", + "PHARMACEUTIQUE/PARAMÉDICAL": "#61BF5E", + "PLASTURGIE": "#05A2AD", + "TABAC": "#E9003F", + "TEXTILE ET HABILLEMENT": "#FA9EE5", + "TRAITEMENT DES EAUX": "#4AA6F7", + "TRANSPORT / AUTOMOBILE": "#6C2775", + "VAISSELLE À USAGE UNIQUE": "#732D3A", + "AUTRES SECTEURS": "#D9C190", + } + + fig_secteur = px.bar( + top_secteur_df.tail(10).sort_values(by="Nombre de déchets", ascending=False), + x="Nombre de déchets", + y="Secteur", + color="Secteur", + title="Top 10 des secteurs les plus ramassés", + orientation="h", + color_discrete_map=colors_map_secteur, + text_auto=True, + ) + # add log scale to x axis + fig_secteur.update_layout(xaxis_type="log") + fig_secteur.update_traces(texttemplate="%{value:.0f}", textposition="inside") + fig_secteur.update_layout( + width=800, + height=500, + uniformtext_mode="hide", + showlegend=False, + yaxis_title=None, + ) + with st.container(border=True): + st.plotly_chart(fig_secteur, use_container_width=True) + + l1_col1, l1_col2 = st.columns(2) + cell1 = l1_col1.container(border=True) + + # Trick pour séparer les milliers + nb_dechet_marque = f"{nb_dechet_marque:,.0f}".replace(",", " ") + cell1.metric( + "Nombre de déchets catégorisés par marque", f"{nb_dechet_marque} dechets" + ) + + # 2ème métrique : poids + cell2 = l1_col2.container(border=True) + nb_marques = f"{nb_marques:,.0f}".replace(",", " ") + cell2.metric( + "Nombre de marques identifiés lors des collectes", + f"{nb_marques} marques", + ) + + fig_marque = px.bar( + top_marque_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), + x="Nombre de déchets", + y="Marque", + title="Top 10 des marques les plus ramassées", + color_discrete_sequence=["#1951A0"], + orientation="h", + text_auto=False, + text=top_marque_df.tail(10)["Marque"] + + ": " + + top_marque_df.tail(10)["Nombre de déchets"].astype(str), + ) + # add log scale to x axis + fig_marque.update_layout(xaxis_type="log") + # fig_marque.update_traces(texttemplate="%{value:.0f}", textposition="inside") + + fig_marque.update_layout( + width=800, + height=500, + uniformtext_minsize=8, + uniformtext_mode="hide", + yaxis_title=None, + ) + + with st.container(border=True): + st.plotly_chart(fig_marque, use_container_width=True) diff --git a/dashboards/app/pages/register.py b/dashboards/app/pages/register.py deleted file mode 100644 index be54cb4..0000000 --- a/dashboards/app/pages/register.py +++ /dev/null @@ -1,47 +0,0 @@ -from pathlib import Path -import yaml -from yaml.loader import SafeLoader -import streamlit as st -import streamlit_authenticator as stauth - -st.markdown( - """ -# Bienvenue 👋 -#### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! -""", -) - -p_cred = Path(".credentials.yml") -with p_cred.open() as file: - config = yaml.load(file, Loader=SafeLoader) - -authenticator = stauth.Authenticate( - config["credentials"], - config["cookie"]["name"], - config["cookie"]["key"], - config["cookie"]["expiry_days"], - config["pre-authorized"], -) - -try: - ( - email_of_registered_user, - username_of_registered_user, - name_of_registered_user, - ) = authenticator.register_user( - pre_authorization=False, - fields={ - "Form name": "S'enregistrer", - "Email": "Email", - "Username": "Identifiant", - "Password": "Mot de passe", - "Repeat password": "Répeter le mot de passe", - "Register": "S'enregistrer", - }, - ) - if email_of_registered_user: - with open(".credentials.yml", "w") as file: - yaml.dump(config, file, default_flow_style=False) - st.success("Utilisateur enregistré") -except Exception as e: - st.error(e) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index 67eeedb..e75f8b9 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -5,6 +5,3 @@ duckdb==0.10.0 streamlit==1.32.2 streamlit-folium==0.19.1 plotly==5.19.0 -streamlit-dynamic-filters==0.1.6 -streamlit-authenticator==0.3.2 -st-pages==0.4.5 \ No newline at end of file From f24af16fc5a9347c063e205057fa419297800fc3 Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Fri, 19 Apr 2024 16:32:52 -0400 Subject: [PATCH 050/147] =?UTF-8?q?[kb]=20=F0=9F=9A=91=20Add=20authenticat?= =?UTF-8?q?ion?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/home.py | 256 +++-- dashboards/app/pages/data.py | 1737 ++++++++++++++++--------------- dashboards/app/requirements.txt | 3 + 3 files changed, 1029 insertions(+), 967 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index b9fadb2..00c5d07 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -2,6 +2,10 @@ import pandas as pd import streamlit as st +import streamlit_authenticator as stauth +import yaml +from st_pages import Page, show_pages +from yaml.loader import SafeLoader # Configuration de la page st.set_page_config( @@ -19,122 +23,162 @@ def load_css(file_name: str) -> None: st.markdown(f"", unsafe_allow_html=True) -# Load and apply the CSS file at the start of your app -# local debug -load_css("style.css") +# Login +p_cred = Path(".credentials.yml") +with p_cred.open() as file: + config = yaml.load(file, Loader=SafeLoader) - -st.markdown( - """ -# Bienvenue 👋 -#### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! -""", +authenticator = stauth.Authenticate( + config["credentials"], + config["cookie"]["name"], + config["cookie"]["key"], + config["cookie"]["expiry_days"], + config["pre-authorized"], ) - -st.markdown("""# À propos""") - - -# Chargement des données et filtre géographique à l'arrivée sur le dashboard -# Table des volumes par matériaux -@st.cache_data -def load_df_other() -> pd.DataFrame: - df = pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv", - ) - # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE - # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) - df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] - df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] - return df - - -# Table du nb de déchets -@st.cache_data -def load_df_nb_dechet() -> pd.DataFrame: - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_releve_nb_dechet.csv", - ) - - -# Appel des fonctions pour charger les données - -df_other = load_df_other() -df_nb_dechets = load_df_nb_dechet() - - -# Création du filtre par niveau géographique : correspondance labels et variables du dataframe -niveaux_admin_dict = { - "Région": "REGION", - "Département": "DEP_CODE_NOM", - "EPCI": "LIBEPCI", - "Commune": "COMMUNE_CODE_NOM", -} - -# 1ère étape : sélection du niveau administratif concerné (région, dép...) -# Si déjà saisi précédemment, initialiser le filtre avec les valeurs entrées précédemment -# Récupérer les index pour conserver la valeur des filtres au changement de pages -# Filtre niveau administratif -niveau_admin = st.session_state.get("niveau_admin", None) -index_admin = st.session_state.get("index_admin", None) -# Filtre collectivité -collectivite = st.session_state.get("collectivite", None) -index_collec = st.session_state.get("index_collec", None) - -# Initialiser la selectbox avec l'index récupéré -select_niveauadmin = st.selectbox( - "Niveau administratif : ", - niveaux_admin_dict.keys(), - index=index_admin, +authenticator.login( + fields={ + "Form name": "Connexion", + "Username": "Identifiant", + "Password": "Mot de passe", + "Login": "Connexion", + }, ) -if select_niveauadmin is not None: - # Filtrer la liste des collectivités en fonction du niveau admin - liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] - liste_collectivites = liste_collectivites.sort_values().unique() - # 2ème filtre : sélection de la collectivité concernée - select_collectivite = st.selectbox( - "Collectivité : ", - liste_collectivites, - index=index_collec, +if st.session_state["authentication_status"]: + show_pages( + [ + Page("home.py", "Accueil", "🏠"), + Page("pages/actions.py", "Actions", "👊"), + Page("pages/data.py", "Data", "🔍"), + Page("pages/hotspots.py", "Hotspots", "🔥"), + Page("pages/structures.py", "Structures", "🔭"), + ], ) + # Load and apply the CSS file at the start of your app + # local debug + load_css("style.css") -if st.button("Enregistrer la sélection"): - # Enregistrer les valeurs sélectionnées dans le session.state - st.session_state["niveau_admin"] = select_niveauadmin - st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( - select_niveauadmin, + st.markdown( + """ + # Bienvenue 👋 + #### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! + """, ) - st.session_state["collectivite"] = select_collectivite - st.session_state["index_collec"] = list(liste_collectivites).index( - select_collectivite, + st.markdown("""# À propos""") + + # Chargement des données et filtre géographique à l'arrivée sur le dashboard + # Table des volumes par matériaux + @st.cache_data + def load_df_other() -> pd.DataFrame: + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv", + ) + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE + # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] + return df + + # Table du nb de déchets + @st.cache_data + def load_df_nb_dechet() -> pd.DataFrame: + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_releve_nb_dechet.csv", + ) + + # Appel des fonctions pour charger les données + + df_other = load_df_other() + df_nb_dechets = load_df_nb_dechet() + + # Création du filtre par niveau géographique : correspondance labels et variables du df + niveaux_admin_dict = { + "Région": "REGION", + "Département": "DEP_CODE_NOM", + "EPCI": "LIBEPCI", + "Commune": "COMMUNE_CODE_NOM", + } + + # 1ère étape : sélection du niveau administratif concerné (région, dép...) + # Si déjà saisi précédemment, initialiser le filtre avec les valeurs entrées précédemment + # Récupérer les index pour conserver la valeur des filtres au changement de pages + # Filtre niveau administratif + niveau_admin = st.session_state.get("niveau_admin", None) + index_admin = st.session_state.get("index_admin", None) + # Filtre collectivité + collectivite = st.session_state.get("collectivite", None) + index_collec = st.session_state.get("index_collec", None) + + # Initialiser la selectbox avec l'index récupéré + select_niveauadmin = st.selectbox( + "Niveau administratif : ", + niveaux_admin_dict.keys(), + index=index_admin, ) - # Afficher la collectivité sélectionnée - st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") - - # Filtrer et enregistrer le DataFrame dans un session state pour la suite - colonne_filtre = niveaux_admin_dict[select_niveauadmin] - df_other_filtre = df_other[df_other[colonne_filtre] == select_collectivite] - st.session_state["df_other_filtre"] = df_other_filtre - - # Filtrer et enregistrer le dataframe nb_dechets dans session.State - # Récuperer la liste des relevés - id_releves = df_other_filtre["ID_RELEVE"].unique() - # Filtrer df_nb_dechets sur la liste des relevés - st.session_state["df_nb_dechets_filtre"] = df_nb_dechets[ - df_nb_dechets["ID_RELEVE"].isin(id_releves) - ] - - # Afficher le nombre de relevés disponibles - nb_releves = len(st.session_state["df_other_filtre"]) - st.write( - f"{nb_releves} relevés de collecte sont disponibles \ - pour l'analyse sur votre territoire.", + if select_niveauadmin is not None: + # Filtrer la liste des collectivités en fonction du niveau admin + liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] + liste_collectivites = liste_collectivites.sort_values().unique() + + # 2ème filtre : sélection de la collectivité concernée + select_collectivite = st.selectbox( + "Collectivité : ", + liste_collectivites, + index=index_collec, + ) + + if st.button("Enregistrer la sélection"): + # Enregistrer les valeurs sélectionnées dans le session.state + st.session_state["niveau_admin"] = select_niveauadmin + st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( + select_niveauadmin, + ) + + st.session_state["collectivite"] = select_collectivite + st.session_state["index_collec"] = list(liste_collectivites).index( + select_collectivite, + ) + + # Afficher la collectivité sélectionnée + st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") + + # Filtrer et enregistrer le DataFrame dans un session state pour la suite + colonne_filtre = niveaux_admin_dict[select_niveauadmin] + df_other_filtre = df_other[df_other[colonne_filtre] == select_collectivite] + st.session_state["df_other_filtre"] = df_other_filtre + + # Filtrer et enregistrer le dataframe nb_dechets dans session.State + # Récuperer la liste des relevés + id_releves = df_other_filtre["ID_RELEVE"].unique() + # Filtrer df_nb_dechets sur la liste des relevés + st.session_state["df_nb_dechets_filtre"] = df_nb_dechets[ + df_nb_dechets["ID_RELEVE"].isin(id_releves) + ] + + # Afficher le nombre de relevés disponibles + nb_releves = len(st.session_state["df_other_filtre"]) + st.write( + f"{nb_releves} relevés de collecte sont disponibles \ + pour l'analyse sur votre territoire.", + ) + + authenticator.logout() +elif st.session_state["authentication_status"] is False: + st.error("Mauvais identifiants ou mot de passe.") +elif st.session_state["authentication_status"] is None: + st.warning("Veuillez entrer votre identifiant et mot de passe") + + show_pages( + [ + Page("home.py", "Home", "🏠 "), + Page("pages/register.py", "S'enregistrer", "🚀"), + ], ) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 8102b21..6117c47 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -23,929 +23,944 @@ """ ) -if filtre_niveau == "" and filtre_collectivite == "": - st.write("Aucune sélection de territoire n'a été effectuée") -else: - st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") +if st.session_state["authentication_status"]: + if filtre_niveau == "" and filtre_collectivite == "": + st.write("Aucune sélection de territoire n'a été effectuée") + else: + st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") + + # Définition d'une fonction pour charger les données du nombre de déchets@st.cache_data + def load_df_dict_corr_dechet_materiau(): + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" + "exploration-des-donn%C3%A9es/Exploration_visualisation/data/dict_de" + "chet_groupe_materiau.csv" + ) + # Appel des fonctions pour charger les données + df_dict_corr_dechet_materiau = load_df_dict_corr_dechet_materiau() -# Définition d'une fonction pour charger les données du nombre de déchets@st.cache_data -def load_df_dict_corr_dechet_materiau(): - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" - "exploration-des-donn%C3%A9es/Exploration_visualisation/data/dict_de" - "chet_groupe_materiau.csv" + # Appeler les dataframes volumes et nb_dechets filtré depuis le session state + if ("df_other_filtre" not in st.session_state) or ( + "df_nb_dechets_filtre" not in st.session_state + ): + st.write( + """ + ### :warning: Merci de sélectionner une collectivité\ + dans l'onglet Home pour afficher les données. :warning: + """ + ) + st.stop() + else: + df_other = st.session_state["df_other_filtre"].copy() + df_nb_dechet = st.session_state["df_nb_dechets_filtre"].copy() + + # Copier le df pour la partie filtrée par milieu/lieu/année + df_other_metrics_raw = df_other.copy() + + # 3 Onglets : Matériaux, Top déchets, Filières et marques + tab1, tab2, tab3 = st.tabs( + [ + "Matériaux :wood:", + "Top Déchets :wastebasket:", + "Secteurs et marques :womans_clothes:", + ] ) + milieu_lieu_dict = ( + df_other.groupby("TYPE_MILIEU")["TYPE_LIEU"] + .unique() + .apply(lambda x: x.tolist()) + .to_dict() + ) -# Appel des fonctions pour charger les données -df_dict_corr_dechet_materiau = load_df_dict_corr_dechet_materiau() + annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) -# Appeler les dataframes volumes et nb_dechets filtré depuis le session state -if ("df_other_filtre" not in st.session_state) or ( - "df_nb_dechets_filtre" not in st.session_state -): - st.write( - """ - ### :warning: Merci de sélectionner une collectivité\ - dans l'onglet Home pour afficher les données. :warning: - """ - ) - st.stop() -else: - df_other = st.session_state["df_other_filtre"].copy() - df_nb_dechet = st.session_state["df_nb_dechets_filtre"].copy() + # Onglet 1 : Matériaux + with tab1: -# Copier le df pour la partie filtrée par milieu/lieu/année -df_other_metrics_raw = df_other.copy() + # Transformation du dataframe pour les graphiques + # Variables à conserver en ligne + cols_identifiers = [ + "ANNEE", + "TYPE_MILIEU", + "INSEE_COM", + "DEP", + "REG", + "EPCI", + "BV2022", + ] + # variables à décroiser de la base de données correspondant aux Volume global de chaque matériau + cols_volume = [k for k in df_other.columns if "GLOBAL_VOLUME_" in k] -# 3 Onglets : Matériaux, Top déchets, Filières et marques -tab1, tab2, tab3 = st.tabs( - [ - "Matériaux :wood:", - "Top Déchets :wastebasket:", - "Secteurs et marques :womans_clothes:", - ] -) + # Copie des données pour transfo + df_volume = df_other.copy() -milieu_lieu_dict = ( - df_other.groupby("TYPE_MILIEU")["TYPE_LIEU"] - .unique() - .apply(lambda x: x.tolist()) - .to_dict() -) + # Calcul des indicateurs clés de haut de tableau avant transformation + volume_total = df_volume["VOLUME_TOTAL"].sum() + poids_total = df_volume["POIDS_TOTAL"].sum() + volume_total_categorise = df_volume[cols_volume].sum().sum() + pct_volume_categorise = volume_total_categorise / volume_total + nb_collectes_int = len(df_volume) -annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) - -# Onglet 1 : Matériaux -with tab1: - - # Transformation du dataframe pour les graphiques - # Variables à conserver en ligne - cols_identifiers = [ - "ANNEE", - "TYPE_MILIEU", - "INSEE_COM", - "DEP", - "REG", - "EPCI", - "BV2022", - ] - - # variables à décroiser de la base de données correspondant aux Volume global de chaque matériau - cols_volume = [k for k in df_other.columns if "GLOBAL_VOLUME_" in k] - - # Copie des données pour transfo - df_volume = df_other.copy() - - # Calcul des indicateurs clés de haut de tableau avant transformation - volume_total = df_volume["VOLUME_TOTAL"].sum() - poids_total = df_volume["POIDS_TOTAL"].sum() - volume_total_categorise = df_volume[cols_volume].sum().sum() - pct_volume_categorise = volume_total_categorise / volume_total - nb_collectes_int = len(df_volume) - - # estimation du poids categorisée en utilisant pct_volume_categorise - poids_total_categorise = round(poids_total * pct_volume_categorise) - - # Dépivotage du tableau pour avoir une base de données exploitable - df_volume = df_volume.melt( - id_vars=cols_identifiers, - value_vars=cols_volume, - var_name="Matériau", - value_name="Volume", - ) + # estimation du poids categorisée en utilisant pct_volume_categorise + poids_total_categorise = round(poids_total * pct_volume_categorise) - # Nettoyer le nom du Type déchet pour le rendre plus lisible - df_volume["Matériau"] = ( - df_volume["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() - ) + # Dépivotage du tableau pour avoir une base de données exploitable + df_volume = df_volume.melt( + id_vars=cols_identifiers, + value_vars=cols_volume, + var_name="Matériau", + value_name="Volume", + ) - # Grouper par type de matériau pour les visualisations - df_totals_sorted = df_volume.groupby(["Matériau"], as_index=False)["Volume"].sum() - df_totals_sorted = df_totals_sorted.sort_values(["Volume"], ascending=False) - - # Charte graphique MERTERRE : - colors_map = { - "Textile": "#C384B1", - "Papier": "#CAA674", - "Metal": "#A0A0A0", - "Verre": "#3DCE89", - "Autre": "#F3B900", - "Plastique": "#48BEF0", - "Caoutchouc": "#364E74", - "Bois": "#673C11", - "Papier/Carton": "#CAA674", - "Métal": "#A0A0A0", - "Verre/Céramique": "#3DCE89", - "Autre": "#F3B900", - } - - # Ligne 1 : 2 cellules avec les indicateurs clés en haut de page - l1_col1, l1_col2, l1_col3 = st.columns(3) - - # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - - # 1ère métrique : volume total de déchets collectés - cell1 = l1_col1.container(border=True) - # Trick pour séparer les milliers - volume_total = f"{volume_total:,.0f}".replace(",", " ") - cell1.metric("Volume de déchets collectés", f"{volume_total} litres") - - # 2ème métrique : poids - cell2 = l1_col2.container(border=True) - poids_total = f"{poids_total:,.0f}".replace(",", " ") - - cell2.metric("Poids total collecté", f"{poids_total} kg") - - # 3ème métrique : nombre de relevés - cell3 = l1_col3.container(border=True) - nb_collectes = f"{nb_collectes_int:,.0f}".replace(",", " ") - cell3.metric("Nombre de collectes comptabilisées", f"{nb_collectes}") - - # Message d'avertissement nb de collectes en dessous de 5 - if nb_collectes_int == 1: - st.warning( - "⚠️ Il n'y a qu' " - + str(nb_collectes_int) - + " collecte considérées dans les données présentées." - ) - elif nb_collectes_int <= 5: - st.warning( - "⚠️ Il n'y a que " - + str(nb_collectes_int) - + " collectes considérées dans les données présentées." - ) - - # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux - - l2_col1, l2_col2 = st.columns(2) - cell4 = l2_col1.container(border=True) - cell5 = l2_col2.container(border=True) - with cell4: - - # Création du diagramme en donut en utilisant le dictionnaire de couleurs pour la correspondance - fig = px.pie( - df_totals_sorted, - values="Volume", - names="Matériau", - title="Répartition des matériaux en volume", - hole=0.4, - color="Matériau", - color_discrete_map=colors_map, + # Nettoyer le nom du Type déchet pour le rendre plus lisible + df_volume["Matériau"] = ( + df_volume["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() ) - # Réglage du texte affiché, format et taille de police - fig.update_traces( - textinfo="percent", - texttemplate="%{percent:.0%}", - textfont_size=14, + # Grouper par type de matériau pour les visualisations + df_totals_sorted = df_volume.groupby(["Matériau"], as_index=False)[ + "Volume" + ].sum() + df_totals_sorted = df_totals_sorted.sort_values(["Volume"], ascending=False) + + # Charte graphique MERTERRE : + colors_map = { + "Textile": "#C384B1", + "Papier": "#CAA674", + "Metal": "#A0A0A0", + "Verre": "#3DCE89", + "Autre": "#F3B900", + "Plastique": "#48BEF0", + "Caoutchouc": "#364E74", + "Bois": "#673C11", + "Papier/Carton": "#CAA674", + "Métal": "#A0A0A0", + "Verre/Céramique": "#3DCE89", + "Autre": "#F3B900", + } + + # Ligne 1 : 2 cellules avec les indicateurs clés en haut de page + l1_col1, l1_col2, l1_col3 = st.columns(3) + + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + + # 1ère métrique : volume total de déchets collectés + cell1 = l1_col1.container(border=True) + # Trick pour séparer les milliers + volume_total = f"{volume_total:,.0f}".replace(",", " ") + cell1.metric("Volume de déchets collectés", f"{volume_total} litres") + + # 2ème métrique : poids + cell2 = l1_col2.container(border=True) + poids_total = f"{poids_total:,.0f}".replace(",", " ") + + cell2.metric("Poids total collecté", f"{poids_total} kg") + + # 3ème métrique : nombre de relevés + cell3 = l1_col3.container(border=True) + nb_collectes = f"{nb_collectes_int:,.0f}".replace(",", " ") + cell3.metric("Nombre de collectes comptabilisées", f"{nb_collectes}") + + # Message d'avertissement nb de collectes en dessous de 5 + if nb_collectes_int == 1: + st.warning( + "⚠️ Il n'y a qu' " + + str(nb_collectes_int) + + " collecte considérées dans les données présentées." + ) + elif nb_collectes_int <= 5: + st.warning( + "⚠️ Il n'y a que " + + str(nb_collectes_int) + + " collectes considérées dans les données présentées." + ) + + # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux + + l2_col1, l2_col2 = st.columns(2) + cell4 = l2_col1.container(border=True) + cell5 = l2_col2.container(border=True) + with cell4: + + # Création du diagramme en donut en utilisant le dictionnaire de couleurs pour la correspondance + fig = px.pie( + df_totals_sorted, + values="Volume", + names="Matériau", + title="Répartition des matériaux en volume", + hole=0.4, + color="Matériau", + color_discrete_map=colors_map, + ) + + # Réglage du texte affiché, format et taille de police + fig.update_traces( + textinfo="percent", + texttemplate="%{percent:.0%}", + textfont_size=14, + ) + fig.update_layout(autosize=True, legend_title_text="Matériau") + + # Affichage du graphique + st.plotly_chart(fig, use_container_width=True) + + with cell5: + # Création du graphique en barres avec Plotly Express + fig2 = px.bar( + df_totals_sorted, + x="Matériau", + y="Volume", + text="Volume", + title="Volume total par materiau (en litres)", + color="Matériau", + color_discrete_map=colors_map, + ) + + # Amélioration du graphique + fig2.update_traces( + texttemplate="%{text:.2s}", + textposition="inside", + textfont_size=14, + ) + fig2.update_layout( + autosize=True, + # uniformtext_minsize=8, + uniformtext_mode="hide", + xaxis_tickangle=-45, + showlegend=False, + yaxis_showgrid=False, + xaxis_title=None, + yaxis_title=None, + ) + + # Affichage du graphique + st.plotly_chart(fig2, use_container_width=True) + + st.write("") + st.caption( + f"Note : Cette analyse se base sur les déchets qui ont pu être classés par matériau : {volume_total_categorise:.0f} Litres, soit {pct_volume_categorise:.0%} du volume total collecté." ) - fig.update_layout(autosize=True, legend_title_text="Matériau") - # Affichage du graphique - st.plotly_chart(fig, use_container_width=True) + # Ligne 3 : Graphe par milieu de collecte - with cell5: - # Création du graphique en barres avec Plotly Express - fig2 = px.bar( - df_totals_sorted, - x="Matériau", + # Grouper par année et type de matériau + df_typemilieu = df_volume.groupby(["TYPE_MILIEU", "Matériau"], as_index=False)[ + "Volume" + ].sum() + df_typemilieu = df_typemilieu.sort_values( + ["TYPE_MILIEU", "Volume"], ascending=False + ) + + # Graphique à barre empilées du pourcentage de volume collecté par an et type de matériau + fig3 = px.histogram( + df_typemilieu, + x="TYPE_MILIEU", y="Volume", - text="Volume", - title="Volume total par materiau (en litres)", color="Matériau", + barnorm="percent", + title="Part de chaque matériau en volume selon le milieu de collecte", color_discrete_map=colors_map, + text_auto=True, ) - - # Amélioration du graphique - fig2.update_traces( - texttemplate="%{text:.2s}", + # Format d'affichage + fig3.update_layout( + bargap=0.2, + height=600, + yaxis_title="Part du volume collecté (en %)", + xaxis_title=None, + ) + fig3.update_xaxes(tickangle=-30) + # Etiquettes et formats de nombres + fig3.update_traces( + texttemplate="%{y:.0f}%", textposition="inside", + hovertemplate="%{x}
Part du volume collecté dans ce milieu: %{y:.0f} %", textfont_size=14, ) - fig2.update_layout( - autosize=True, - # uniformtext_minsize=8, - uniformtext_mode="hide", - xaxis_tickangle=-45, - showlegend=False, - yaxis_showgrid=False, - xaxis_title=None, - yaxis_title=None, - ) - - # Affichage du graphique - st.plotly_chart(fig2, use_container_width=True) - - st.write("") - st.caption( - f"Note : Cette analyse se base sur les déchets qui ont pu être classés par matériau : {volume_total_categorise:.0f} Litres, soit {pct_volume_categorise:.0%} du volume total collecté." - ) - - # Ligne 3 : Graphe par milieu de collecte - # Grouper par année et type de matériau - df_typemilieu = df_volume.groupby(["TYPE_MILIEU", "Matériau"], as_index=False)[ - "Volume" - ].sum() - df_typemilieu = df_typemilieu.sort_values( - ["TYPE_MILIEU", "Volume"], ascending=False - ) - - # Graphique à barre empilées du pourcentage de volume collecté par an et type de matériau - fig3 = px.histogram( - df_typemilieu, - x="TYPE_MILIEU", - y="Volume", - color="Matériau", - barnorm="percent", - title="Part de chaque matériau en volume selon le milieu de collecte", - color_discrete_map=colors_map, - text_auto=True, - ) - # Format d'affichage - fig3.update_layout( - bargap=0.2, - height=600, - yaxis_title="Part du volume collecté (en %)", - xaxis_title=None, - ) - fig3.update_xaxes(tickangle=-30) - # Etiquettes et formats de nombres - fig3.update_traces( - texttemplate="%{y:.0f}%", - textposition="inside", - hovertemplate="%{x}
Part du volume collecté dans ce milieu: %{y:.0f} %", - textfont_size=14, - ) - - # Afficher le graphique - with st.container(border=True): - st.plotly_chart(fig3, use_container_width=True) - - # Ligne 3 : Graphe par milieu , lieu et année - st.write("**Détail par milieu, lieu ou année**") - - # Étape 1: Création des filtres - - df_other_metrics = df_other_metrics_raw.copy() - df_other_metrics = df_other_metrics.fillna(0) + # Afficher le graphique + with st.container(border=True): + st.plotly_chart(fig3, use_container_width=True) - selected_annee = st.selectbox( - "Choisir une année:", - options=["Aucune sélection"] + list(df_other["ANNEE"].unique()), - ) - if selected_annee != "Aucune sélection": - filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee].copy() - filtered_metrics_milieu = df_other_metrics[ - df_other_metrics["ANNEE"] == selected_annee - ].copy() - else: - filtered_data_milieu = df_other.copy() - filtered_metrics_milieu = df_other_metrics.copy() + # Ligne 3 : Graphe par milieu , lieu et année + st.write("**Détail par milieu, lieu ou année**") - selected_type_milieu = st.selectbox( - "Choisir un type de milieu:", - options=["Aucune sélection"] - + list(filtered_data_milieu["TYPE_MILIEU"].unique()), - ) + # Étape 1: Création des filtres - if selected_type_milieu != "Aucune sélection": - filtered_data_lieu = filtered_data_milieu[ - filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu - ] - filtered_metrics_milieu = filtered_metrics_milieu[ - filtered_metrics_milieu["TYPE_MILIEU"] == selected_type_milieu - ] - else: - filtered_data_lieu = filtered_data_milieu.copy() - filtered_metrics_milieu = df_other_metrics.copy() + df_other_metrics = df_other_metrics_raw.copy() + df_other_metrics = df_other_metrics.fillna(0) - selected_type_lieu = st.selectbox( - "Choisir un type de lieu:", - options=["Aucune sélection"] + list(filtered_data_lieu["TYPE_LIEU"].unique()), - ) + selected_annee = st.selectbox( + "Choisir une année:", + options=["Aucune sélection"] + list(df_other["ANNEE"].unique()), + ) + if selected_annee != "Aucune sélection": + filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee].copy() + filtered_metrics_milieu = df_other_metrics[ + df_other_metrics["ANNEE"] == selected_annee + ].copy() + else: + filtered_data_milieu = df_other.copy() + filtered_metrics_milieu = df_other_metrics.copy() + + selected_type_milieu = st.selectbox( + "Choisir un type de milieu:", + options=["Aucune sélection"] + + list(filtered_data_milieu["TYPE_MILIEU"].unique()), + ) - if ( - selected_annee == "Aucune sélection" - and selected_type_milieu == "Aucune sélection" - and selected_type_lieu == "Aucune sélection" - ): - df_filtered = df_other.copy() - df_filtered_metrics = df_other_metrics_raw.copy() - elif ( - selected_type_milieu == "Aucune sélection" - and selected_type_lieu == "Aucune sélection" - ): - df_filtered = df_other[df_other["ANNEE"] == selected_annee].copy() - df_filtered_metrics = df_other_metrics_raw[ - df_other_metrics["ANNEE"] == selected_annee - ].copy() - elif ( - selected_annee == "Aucune sélection" - and selected_type_lieu == "Aucune sélection" - and selected_type_milieu != "Aucune sélection" - ): - df_filtered = df_other[df_other["TYPE_MILIEU"] == selected_type_milieu].copy() - df_filtered_metrics = df_other_metrics_raw[ - df_other_metrics["TYPE_MILIEU"] == selected_type_milieu - ].copy() - - elif ( - selected_annee == "Aucune sélection" - and selected_type_lieu != "Aucune sélection" - and selected_type_milieu == "Aucune sélection" - ): - df_filtered = df_other[df_other["TYPE_LIEU"] == selected_type_lieu].copy() - df_filtered_metrics = df_other_metrics_raw[ - df_other_metrics["TYPE_LIEU"] == selected_type_lieu - ].copy() - - elif ( - selected_annee == "Aucune sélection" - and selected_type_lieu != "Aucune sélection" - and selected_type_milieu != "Aucune sélection" - ): - df_filtered = df_other[ - (df_other["TYPE_LIEU"] == selected_type_lieu) - & (df_other["TYPE_MILIEU"] == selected_type_milieu) - ].copy() - df_filtered_metrics = df_other_metrics_raw[ - (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) - & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) - ] - elif ( - selected_annee != "Aucune sélection" - and selected_type_lieu != "Aucune sélection" - and selected_type_milieu == "Aucune sélection" - ): - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee) - & (df_other["TYPE_LIEU"] == selected_type_lieu) - ].copy() - df_filtered_metrics = df_other_metrics_raw[ - (df_other_metrics["ANNEE"] == selected_annee) - & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) - ] - elif ( - selected_annee != "Aucune sélection" - and selected_type_lieu == "Aucune sélection" - and selected_type_milieu != "Aucune sélection" - ): - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee) - & (df_other["TYPE_MILIEU"] == selected_type_milieu) - ].copy() - df_filtered_metrics = df_other_metrics_raw[ - (df_other_metrics["ANNEE"] == selected_annee) - & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) - ] + if selected_type_milieu != "Aucune sélection": + filtered_data_lieu = filtered_data_milieu[ + filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu + ] + filtered_metrics_milieu = filtered_metrics_milieu[ + filtered_metrics_milieu["TYPE_MILIEU"] == selected_type_milieu + ] + else: + filtered_data_lieu = filtered_data_milieu.copy() + filtered_metrics_milieu = df_other_metrics.copy() + + selected_type_lieu = st.selectbox( + "Choisir un type de lieu:", + options=["Aucune sélection"] + + list(filtered_data_lieu["TYPE_LIEU"].unique()), + ) - else: - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee) - & (df_other["TYPE_MILIEU"] == selected_type_milieu) - & (df_other["TYPE_LIEU"] == selected_type_lieu) - ].copy() - df_filtered_metrics = df_other_metrics_raw[ - (df_other_metrics["ANNEE"] == selected_annee) - & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) - & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) - ] + if ( + selected_annee == "Aucune sélection" + and selected_type_milieu == "Aucune sélection" + and selected_type_lieu == "Aucune sélection" + ): + df_filtered = df_other.copy() + df_filtered_metrics = df_other_metrics_raw.copy() + elif ( + selected_type_milieu == "Aucune sélection" + and selected_type_lieu == "Aucune sélection" + ): + df_filtered = df_other[df_other["ANNEE"] == selected_annee].copy() + df_filtered_metrics = df_other_metrics_raw[ + df_other_metrics["ANNEE"] == selected_annee + ].copy() + elif ( + selected_annee == "Aucune sélection" + and selected_type_lieu == "Aucune sélection" + and selected_type_milieu != "Aucune sélection" + ): + df_filtered = df_other[ + df_other["TYPE_MILIEU"] == selected_type_milieu + ].copy() + df_filtered_metrics = df_other_metrics_raw[ + df_other_metrics["TYPE_MILIEU"] == selected_type_milieu + ].copy() + + elif ( + selected_annee == "Aucune sélection" + and selected_type_lieu != "Aucune sélection" + and selected_type_milieu == "Aucune sélection" + ): + df_filtered = df_other[df_other["TYPE_LIEU"] == selected_type_lieu].copy() + df_filtered_metrics = df_other_metrics_raw[ + df_other_metrics["TYPE_LIEU"] == selected_type_lieu + ].copy() + + elif ( + selected_annee == "Aucune sélection" + and selected_type_lieu != "Aucune sélection" + and selected_type_milieu != "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["TYPE_LIEU"] == selected_type_lieu) + & (df_other["TYPE_MILIEU"] == selected_type_milieu) + ].copy() + df_filtered_metrics = df_other_metrics_raw[ + (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) + & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) + ] + elif ( + selected_annee != "Aucune sélection" + and selected_type_lieu != "Aucune sélection" + and selected_type_milieu == "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee) + & (df_other["TYPE_LIEU"] == selected_type_lieu) + ].copy() + df_filtered_metrics = df_other_metrics_raw[ + (df_other_metrics["ANNEE"] == selected_annee) + & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) + ] + elif ( + selected_annee != "Aucune sélection" + and selected_type_lieu == "Aucune sélection" + and selected_type_milieu != "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee) + & (df_other["TYPE_MILIEU"] == selected_type_milieu) + ].copy() + df_filtered_metrics = df_other_metrics_raw[ + (df_other_metrics["ANNEE"] == selected_annee) + & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) + ] + + else: + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee) + & (df_other["TYPE_MILIEU"] == selected_type_milieu) + & (df_other["TYPE_LIEU"] == selected_type_lieu) + ].copy() + df_filtered_metrics = df_other_metrics_raw[ + (df_other_metrics["ANNEE"] == selected_annee) + & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) + & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) + ] + + # Ligne 5 : Metriques filtrés + l5_col1, l5_col2, l5_col3 = st.columns(3) + cell6 = l5_col1.container(border=True) + cell7 = l5_col2.container(border=True) + cell8 = l5_col3.container(border=True) + + poids_total_filtered = df_filtered_metrics["POIDS_TOTAL"].sum() + volume_total_filtered = df_filtered_metrics["VOLUME_TOTAL"].sum() + + volume_total_filtered = f"{volume_total_filtered:,.0f}".replace(",", " ") + cell6.metric("Volume de dechets collectés", f"{volume_total_filtered} litres") + + poids_total_filtered = f"{poids_total_filtered:,.0f}".replace(",", " ") + cell7.metric("Poids total collecté", f"{poids_total_filtered} kg") + + nombre_collectes_filtered = f"{len(df_filtered):,.0f}".replace(",", " ") + cell8.metric("Nombre de collectes", f"{nombre_collectes_filtered}") + + # Message d'avertissement nb de collectes en dessous de 5 + if len(df_filtered) == 1: + st.warning( + "⚠️ Il n'y a qu' " + + str(len(df_filtered)) + + " collecte considérées dans les données présentées." + ) + elif len(df_filtered) <= 5: + st.warning( + "⚠️ Il n'y a que " + + str(len(df_filtered)) + + " collectes considérées dans les données présentées." + ) + + # Étape 3: Preparation dataframe pour graphe + # Copie des données pour transfo + df_volume2 = df_filtered.copy() + + # Calcul des indicateurs clés de haut de tableau avant transformation + volume2_total = df_volume2["VOLUME_TOTAL"].sum() + poids2_total = df_volume2["POIDS_TOTAL"].sum() + volume2_total_categorise = df_volume2[cols_volume].sum().sum() + pct_volume2_categorise = volume2_total_categorise / volume2_total + nb_collectes2 = len(df_volume2) + + # estimation du poids categorisée en utilisant pct_volume_categorise + poids2_total_categorise = round(poids2_total * pct_volume2_categorise) + + # Dépivotage du tableau pour avoir une base de données exploitable + df_volume2 = df_volume2.melt( + id_vars=cols_identifiers, + value_vars=cols_volume, + var_name="Matériau", + value_name="Volume", + ) - # Ligne 5 : Metriques filtrés - l5_col1, l5_col2, l5_col3 = st.columns(3) - cell6 = l5_col1.container(border=True) - cell7 = l5_col2.container(border=True) - cell8 = l5_col3.container(border=True) - - poids_total_filtered = df_filtered_metrics["POIDS_TOTAL"].sum() - volume_total_filtered = df_filtered_metrics["VOLUME_TOTAL"].sum() - - volume_total_filtered = f"{volume_total_filtered:,.0f}".replace(",", " ") - cell6.metric("Volume de dechets collectés", f"{volume_total_filtered} litres") - - poids_total_filtered = f"{poids_total_filtered:,.0f}".replace(",", " ") - cell7.metric("Poids total collecté", f"{poids_total_filtered} kg") - - nombre_collectes_filtered = f"{len(df_filtered):,.0f}".replace(",", " ") - cell8.metric("Nombre de collectes", f"{nombre_collectes_filtered}") - - # Message d'avertissement nb de collectes en dessous de 5 - if len(df_filtered) == 1: - st.warning( - "⚠️ Il n'y a qu' " - + str(len(df_filtered)) - + " collecte considérées dans les données présentées." - ) - elif len(df_filtered) <= 5: - st.warning( - "⚠️ Il n'y a que " - + str(len(df_filtered)) - + " collectes considérées dans les données présentées." - ) - - # Étape 3: Preparation dataframe pour graphe - # Copie des données pour transfo - df_volume2 = df_filtered.copy() - - # Calcul des indicateurs clés de haut de tableau avant transformation - volume2_total = df_volume2["VOLUME_TOTAL"].sum() - poids2_total = df_volume2["POIDS_TOTAL"].sum() - volume2_total_categorise = df_volume2[cols_volume].sum().sum() - pct_volume2_categorise = volume2_total_categorise / volume2_total - nb_collectes2 = len(df_volume2) - - # estimation du poids categorisée en utilisant pct_volume_categorise - poids2_total_categorise = round(poids2_total * pct_volume2_categorise) - - # Dépivotage du tableau pour avoir une base de données exploitable - df_volume2 = df_volume2.melt( - id_vars=cols_identifiers, - value_vars=cols_volume, - var_name="Matériau", - value_name="Volume", - ) + # Nettoyer le nom du Type déchet pour le rendre plus lisible + df_volume2["Matériau"] = ( + df_volume2["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() + ) - # Nettoyer le nom du Type déchet pour le rendre plus lisible - df_volume2["Matériau"] = ( - df_volume2["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() - ) + # Grouper par type de matériau pour les visualisations + df_totals_sorted2 = df_volume2.groupby(["Matériau"], as_index=False)[ + "Volume" + ].sum() + df_totals_sorted2 = df_totals_sorted2.sort_values(["Volume"], ascending=False) + df_totals_sorted2["Volume_"] = ( + df_totals_sorted2["Volume"] + .apply(lambda x: "{0:,.0f}".format(x)) + .replace(",", " ") + ) - # Grouper par type de matériau pour les visualisations - df_totals_sorted2 = df_volume2.groupby(["Matériau"], as_index=False)["Volume"].sum() - df_totals_sorted2 = df_totals_sorted2.sort_values(["Volume"], ascending=False) - df_totals_sorted2["Volume_"] = ( - df_totals_sorted2["Volume"] - .apply(lambda x: "{0:,.0f}".format(x)) - .replace(",", " ") - ) + # Étape 4: Création du Graphique + + if not df_filtered.empty: + fig4 = px.treemap( + df_totals_sorted2, + path=["Matériau"], + values="Volume", + title="Répartition des matériaux en volume", + color="Matériau", + color_discrete_map=colors_map, + ) + fig4.update_layout( + margin=dict(t=50, l=25, r=25, b=25), autosize=True, height=600 + ) + fig4.update_traces( + textinfo="label+value", + texttemplate="%{label}
%{value:.0f} litres", + textfont=dict(size=16), + hovertemplate="%{label}
Volume: %{value:.0f}", + ) + + with st.container(border=True): + st.plotly_chart(fig4, use_container_width=True) + + else: + st.write("Aucune donnée à afficher pour les filtres sélectionnés.") + + # Onglet 2 : Top Déchets + with tab2: + + # Préparation des datas pour l'onglet 2 + df_top = df_nb_dechet.copy() + df_top_data_releves = df_other.copy() + + # Calcul du nombre total de déchets catégorisés sur le territoier + nb_total_dechets = df_top[(df_top["type_regroupement"] == "GROUPE")][ + "nb_dechet" + ].sum() + nb_total_dechets = f"{nb_total_dechets:,.0f}".replace(",", " ") + + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page + l1_col1, l1_col2, l1_col3 = st.columns(3) + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + # 1ère métrique : volume total de déchets collectés + cell1 = l1_col1.container(border=True) + # Trick pour séparer les milliers + + # volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") + cell1.metric("Nombre de déchets catégorisés", f"{nb_total_dechets} déchets") + + # 2ème métrique : équivalent volume catégorisé + cell2 = l1_col2.container(border=True) + volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") + cell2.metric( + "Equivalent en volume ", + f"{volume_total_categorise} litres", + ) - # Étape 4: Création du Graphique + # 3ème métrique : nombre de relevés + cell3 = l1_col3.container(border=True) + cell3.metric("Nombre de collectes comptabilisées", f"{nb_collectes}") + + # Message d'avertissement nb de collectes en dessous de 5 + if nb_collectes_int == 1: + st.warning( + "⚠️ Il n'y a qu' " + + str(nb_collectes) + + " collecte considérées dans les données présentées." + ) + elif nb_collectes_int <= 5: + st.warning( + "⚠️ Il n'y a que " + + str(nb_collectes) + + " collectes considérées dans les données présentées." + ) + + # Ligne 2 : graphique top déchets + + # Filtration des données pour nb_dechets + df_top10 = pd.merge(df_top, df_top_data_releves, on="ID_RELEVE", how="inner") + # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement + df_dechets_groupe = df_top10[df_top10["type_regroupement"].isin(["GROUPE"])] + # Group by 'categorie', sum 'nb_dechet', et top 10 + df_top10_dechets = ( + df_dechets_groupe.groupby("categorie") + .agg({"nb_dechet": "sum"}) + .sort_values(by="nb_dechet", ascending=False) + .head(10) + ) + # recuperation de ces 10 dechets dans une liste pour filtration bubble map + noms_top10_dechets = df_top10_dechets.index.tolist() + # Preparation des datas pour l'onglet 3# ajout de la colonne materiau + df_top10_dechets = df_top10_dechets.merge( + df_dict_corr_dechet_materiau, on="categorie", how="left" + ) + # Preparation de la figure barplot + df_top10_dechets.reset_index(inplace=True) + # Création du graphique en barres avec Plotly Express - if not df_filtered.empty: - fig4 = px.treemap( - df_totals_sorted2, - path=["Matériau"], - values="Volume", - title="Répartition des matériaux en volume", - color="Matériau", + fig5 = px.bar( + df_top10_dechets, + y="categorie", + x="nb_dechet", + labels={"categorie": "Dechet", "nb_dechet": "Nombre total"}, + title="Top 10 dechets ramassés ", + text="nb_dechet", + color="Materiau", color_discrete_map=colors_map, + category_orders={"categorie": df_top10_dechets["categorie"].tolist()}, ) - fig4.update_layout( - margin=dict(t=50, l=25, r=25, b=25), autosize=True, height=600 + fig5.update_layout(xaxis_type="log") + # Amélioration du visuel du graphique + fig5.update_traces( + # texttemplate="%{text:.2f}", + textposition="inside", + textfont_color="white", + textfont_size=20, ) - fig4.update_traces( - textinfo="label+value", - texttemplate="%{label}
%{value:.0f} litres", - textfont=dict(size=16), - hovertemplate="%{label}
Volume: %{value:.0f}", + fig5.update_layout( + width=1400, + height=900, + uniformtext_minsize=8, + uniformtext_mode="hide", + xaxis_tickangle=90, + legend=dict(x=1, y=0, xanchor="right", yanchor="bottom"), ) - with st.container(border=True): - st.plotly_chart(fig4, use_container_width=True) - - else: - st.write("Aucune donnée à afficher pour les filtres sélectionnés.") - - -# Onglet 2 : Top Déchets -with tab2: - - # Préparation des datas pour l'onglet 2 - df_top = df_nb_dechet.copy() - df_top_data_releves = df_other.copy() - - # Calcul du nombre total de déchets catégorisés sur le territoier - nb_total_dechets = df_top[(df_top["type_regroupement"] == "GROUPE")][ - "nb_dechet" - ].sum() - nb_total_dechets = f"{nb_total_dechets:,.0f}".replace(",", " ") - - # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page - l1_col1, l1_col2, l1_col3 = st.columns(3) - # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - # 1ère métrique : volume total de déchets collectés - cell1 = l1_col1.container(border=True) - # Trick pour séparer les milliers - - # volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") - cell1.metric("Nombre de déchets catégorisés", f"{nb_total_dechets} déchets") - - # 2ème métrique : équivalent volume catégorisé - cell2 = l1_col2.container(border=True) - volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") - cell2.metric( - "Equivalent en volume ", - f"{volume_total_categorise} litres", - ) - - # 3ème métrique : nombre de relevés - cell3 = l1_col3.container(border=True) - cell3.metric("Nombre de collectes comptabilisées", f"{nb_collectes}") - - # Message d'avertissement nb de collectes en dessous de 5 - if nb_collectes_int == 1: - st.warning( - "⚠️ Il n'y a qu' " - + str(nb_collectes) - + " collecte considérées dans les données présentées." - ) - elif nb_collectes_int <= 5: - st.warning( - "⚠️ Il n'y a que " - + str(nb_collectes) - + " collectes considérées dans les données présentées." - ) - - # Ligne 2 : graphique top déchets - - # Filtration des données pour nb_dechets - df_top10 = pd.merge(df_top, df_top_data_releves, on="ID_RELEVE", how="inner") - # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement - df_dechets_groupe = df_top10[df_top10["type_regroupement"].isin(["GROUPE"])] - # Group by 'categorie', sum 'nb_dechet', et top 10 - df_top10_dechets = ( - df_dechets_groupe.groupby("categorie") - .agg({"nb_dechet": "sum"}) - .sort_values(by="nb_dechet", ascending=False) - .head(10) - ) - # recuperation de ces 10 dechets dans une liste pour filtration bubble map - noms_top10_dechets = df_top10_dechets.index.tolist() - # Preparation des datas pour l'onglet 3# ajout de la colonne materiau - df_top10_dechets = df_top10_dechets.merge( - df_dict_corr_dechet_materiau, on="categorie", how="left" - ) - # Preparation de la figure barplot - df_top10_dechets.reset_index(inplace=True) - # Création du graphique en barres avec Plotly Express - - fig5 = px.bar( - df_top10_dechets, - y="categorie", - x="nb_dechet", - labels={"categorie": "Dechet", "nb_dechet": "Nombre total"}, - title="Top 10 dechets ramassés ", - text="nb_dechet", - color="Materiau", - color_discrete_map=colors_map, - category_orders={"categorie": df_top10_dechets["categorie"].tolist()}, - ) - fig5.update_layout(xaxis_type="log") - # Amélioration du visuel du graphique - fig5.update_traces( - # texttemplate="%{text:.2f}", - textposition="inside", - textfont_color="white", - textfont_size=20, - ) - fig5.update_layout( - width=1400, - height=900, - uniformtext_minsize=8, - uniformtext_mode="hide", - xaxis_tickangle=90, - legend=dict(x=1, y=0, xanchor="right", yanchor="bottom"), - ) + # Suppression de la colonne categorie + del df_top10_dechets["Materiau"] - # Suppression de la colonne categorie - del df_top10_dechets["Materiau"] + with st.container(border=True): + st.plotly_chart(fig5, use_container_width=True) + + st.write("") + st.caption( + f"Note : Analyse basée sur les collectes qui ont fait l'objet d'un comptage détaillé par déchet,\ + soit {volume_total_categorise} litres équivalent à {pct_volume_categorise:.0%} du volume collecté\ + sur le territoire." + ) + with st.container(): + # Ajout de la selectbox + selected_dechet = st.selectbox( + "Choisir un type de déchet :", noms_top10_dechets, index=0 + ) + + # Filtration sur le dechet top 10 sélectionné + df_top_map = df_top[df_top["categorie"] == selected_dechet] + + # Création du DataFrame de travail pour la carte + df_map_data = pd.merge( + df_top_map, df_top_data_releves, on="ID_RELEVE", how="inner" + ) + + # Création de la carte centrée autour d'une localisation + # Calcul des limites à partir de vos données + min_lat = df_map_data["LIEU_COORD_GPS_Y"].min() + max_lat = df_map_data["LIEU_COORD_GPS_Y"].max() + min_lon = df_map_data["LIEU_COORD_GPS_X"].min() + max_lon = df_map_data["LIEU_COORD_GPS_X"].max() + + map_data = folium.Map( + location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], + zoom_start=8, + tiles="OpenStreetMap", + ) + + # Facteur de normalisation pour ajuster la taille des bulles + normalisation_facteur = 1000 + + for index, row in df_map_data.iterrows(): + # Application de la normalisation + radius = row["nb_dechet"] / normalisation_facteur + + # Application d'une limite minimale pour le rayon si nécessaire + radius = max(radius, 5) + + folium.CircleMarker( + location=(row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]), + radius=radius, # Utilisation du rayon ajusté + popup=f"{row['NOM_ZONE']}, {row['LIEU_VILLE']}, {row['DATE']} : {row['nb_dechet']} {selected_dechet}", + color="#3186cc", + fill=True, + fill_color="#3186cc", + ).add_to(map_data) + + # Affichage de la carte Folium dans Streamlit + st_folium = st.components.v1.html + st_folium( + folium.Figure().add_child(map_data).render(), # , width=1400 + height=750, + ) + + # Onglet 3 : Secteurs et marques + with tab3: + st.write("") - with st.container(border=True): - st.plotly_chart(fig5, use_container_width=True) + # Préparation des données + df_dechet_copy = df_nb_dechet.copy() + df_filtre_copy = df_other.copy() - st.write("") - st.caption( - f"Note : Analyse basée sur les collectes qui ont fait l'objet d'un comptage détaillé par déchet,\ - soit {volume_total_categorise} litres équivalent à {pct_volume_categorise:.0%} du volume collecté\ - sur le territoire." + # Étape 1: Création des filtres + selected_annee_onglet_3 = st.selectbox( + "Choisir une année:", + options=["Aucune sélection"] + + list(df_other["ANNEE"].sort_values().unique()), + key="année_select", ) - with st.container(): - # Ajout de la selectbox - selected_dechet = st.selectbox( - "Choisir un type de déchet :", noms_top10_dechets, index=0 + if selected_annee_onglet_3 != "Aucune sélection": + filtered_data_milieu = df_other[ + df_other["ANNEE"] == selected_annee_onglet_3 + ] + else: + filtered_data_milieu = df_other.copy() + + selected_type_milieu_onglet_3 = st.selectbox( + "Choisir un type de milieu:", + options=["Aucune sélection"] + + list(filtered_data_milieu["TYPE_MILIEU"].unique()), + key="type_milieu_select", ) - # Filtration sur le dechet top 10 sélectionné - df_top_map = df_top[df_top["categorie"] == selected_dechet] - - # Création du DataFrame de travail pour la carte - df_map_data = pd.merge( - df_top_map, df_top_data_releves, on="ID_RELEVE", how="inner" + if selected_type_milieu_onglet_3 != "Aucune sélection": + filtered_data_lieu = filtered_data_milieu[ + filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu_onglet_3 + ] + else: + filtered_data_lieu = filtered_data_milieu + + selected_type_lieu_onglet_3 = st.selectbox( + "Choisir un type de lieu:", + options=["Aucune sélection"] + + list(filtered_data_lieu["TYPE_LIEU"].unique()), + key="type_lieu_select", ) - # Création de la carte centrée autour d'une localisation - # Calcul des limites à partir de vos données - min_lat = df_map_data["LIEU_COORD_GPS_Y"].min() - max_lat = df_map_data["LIEU_COORD_GPS_Y"].max() - min_lon = df_map_data["LIEU_COORD_GPS_X"].min() - max_lon = df_map_data["LIEU_COORD_GPS_X"].max() - - map_data = folium.Map( - location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], - zoom_start=8, - tiles="OpenStreetMap", + if ( + selected_annee_onglet_3 == "Aucune sélection" + and selected_type_milieu_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 == "Aucune sélection" + ): + df_filtered = df_other.copy() + elif ( + selected_type_milieu_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 == "Aucune sélection" + ): + df_filtered = df_other[df_other["ANNEE"] == selected_annee_onglet_3].copy() + elif ( + selected_annee_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 == "Aucune sélection" + and selected_type_milieu_onglet_3 != "Aucune sélection" + ): + df_filtered = df_other[ + df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3 + ].copy() + elif ( + selected_annee_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 != "Aucune sélection" + and selected_type_milieu_onglet_3 == "Aucune sélection" + ): + df_filtered = df_other[ + df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3 + ].copy() + elif ( + selected_annee_onglet_3 == "Aucune sélection" + and selected_type_lieu_onglet_3 != "Aucune sélection" + and selected_type_milieu_onglet_3 != "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) + & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + ].copy() + elif ( + selected_annee_onglet_3 != "Aucune sélection" + and selected_type_lieu_onglet_3 != "Aucune sélection" + and selected_type_milieu_onglet_3 == "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee_onglet_3) + & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) + ].copy() + elif ( + selected_annee_onglet_3 != "Aucune sélection" + and selected_type_lieu_onglet_3 == "Aucune sélection" + and selected_type_milieu_onglet_3 != "Aucune sélection" + ): + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee_onglet_3) + & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + ].copy() + + elif selected_type_lieu_onglet_3 == "Aucune sélection": + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee_onglet_3) + & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + ].copy() + else: + df_filtered = df_other[ + (df_other["ANNEE"] == selected_annee_onglet_3) + & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) + ].copy() + + # Filtration des données pour nb_dechets + df_init = pd.merge(df_dechet_copy, df_filtered, on="ID_RELEVE", how="inner") + + # Data pour le plot secteur + secteur_df = df_init[df_init["type_regroupement"].isin(["SECTEUR"])] + top_secteur_df = ( + secteur_df.groupby("categorie")["nb_dechet"] + .sum() + .sort_values(ascending=True) ) - - # Facteur de normalisation pour ajuster la taille des bulles - normalisation_facteur = 1000 - - for index, row in df_map_data.iterrows(): - # Application de la normalisation - radius = row["nb_dechet"] / normalisation_facteur - - # Application d'une limite minimale pour le rayon si nécessaire - radius = max(radius, 5) - - folium.CircleMarker( - location=(row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]), - radius=radius, # Utilisation du rayon ajusté - popup=f"{row['NOM_ZONE']}, {row['LIEU_VILLE']}, {row['DATE']} : {row['nb_dechet']} {selected_dechet}", - color="#3186cc", - fill=True, - fill_color="#3186cc", - ).add_to(map_data) - - # Affichage de la carte Folium dans Streamlit - st_folium = st.components.v1.html - st_folium( - folium.Figure().add_child(map_data).render(), # , width=1400 - height=750, + top_secteur_df = top_secteur_df.reset_index() + top_secteur_df.columns = ["Secteur", "Nombre de déchets"] + + # Data pour le plot marque + marque_df = df_init[df_init["type_regroupement"].isin(["MARQUE"])] + top_marque_df = ( + marque_df.groupby("categorie")["nb_dechet"] + .sum() + .sort_values(ascending=True) ) + top_marque_df = top_marque_df.reset_index() + top_marque_df.columns = ["Marque", "Nombre de déchets"] + # Chiffres clés + nb_dechet_secteur = secteur_df["nb_dechet"].sum() + nb_secteurs = len(top_secteur_df["Secteur"].unique()) -# Onglet 3 : Secteurs et marques -with tab3: - st.write("") - - # Préparation des données - df_dechet_copy = df_nb_dechet.copy() - df_filtre_copy = df_other.copy() - - # Étape 1: Création des filtres - selected_annee_onglet_3 = st.selectbox( - "Choisir une année:", - options=["Aucune sélection"] + list(df_other["ANNEE"].sort_values().unique()), - key="année_select", - ) - if selected_annee_onglet_3 != "Aucune sélection": - filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee_onglet_3] - else: - filtered_data_milieu = df_other.copy() - - selected_type_milieu_onglet_3 = st.selectbox( - "Choisir un type de milieu:", - options=["Aucune sélection"] - + list(filtered_data_milieu["TYPE_MILIEU"].unique()), - key="type_milieu_select", - ) - - if selected_type_milieu_onglet_3 != "Aucune sélection": - filtered_data_lieu = filtered_data_milieu[ - filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu_onglet_3 - ] - else: - filtered_data_lieu = filtered_data_milieu - - selected_type_lieu_onglet_3 = st.selectbox( - "Choisir un type de lieu:", - options=["Aucune sélection"] + list(filtered_data_lieu["TYPE_LIEU"].unique()), - key="type_lieu_select", - ) - - if ( - selected_annee_onglet_3 == "Aucune sélection" - and selected_type_milieu_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" - ): - df_filtered = df_other.copy() - elif ( - selected_type_milieu_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" - ): - df_filtered = df_other[df_other["ANNEE"] == selected_annee_onglet_3].copy() - elif ( - selected_annee_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" - and selected_type_milieu_onglet_3 != "Aucune sélection" - ): - df_filtered = df_other[ - df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3 - ].copy() - elif ( - selected_annee_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 != "Aucune sélection" - and selected_type_milieu_onglet_3 == "Aucune sélection" - ): - df_filtered = df_other[ - df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3 - ].copy() - elif ( - selected_annee_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 != "Aucune sélection" - and selected_type_milieu_onglet_3 != "Aucune sélection" - ): - df_filtered = df_other[ - (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - ].copy() - elif ( - selected_annee_onglet_3 != "Aucune sélection" - and selected_type_lieu_onglet_3 != "Aucune sélection" - and selected_type_milieu_onglet_3 == "Aucune sélection" - ): - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) - ].copy() - elif ( - selected_annee_onglet_3 != "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" - and selected_type_milieu_onglet_3 != "Aucune sélection" - ): - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - ].copy() - - elif selected_type_lieu_onglet_3 == "Aucune sélection": - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - ].copy() - else: - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) - ].copy() - - # Filtration des données pour nb_dechets - df_init = pd.merge(df_dechet_copy, df_filtered, on="ID_RELEVE", how="inner") - - # Data pour le plot secteur - secteur_df = df_init[df_init["type_regroupement"].isin(["SECTEUR"])] - top_secteur_df = ( - secteur_df.groupby("categorie")["nb_dechet"].sum().sort_values(ascending=True) - ) - top_secteur_df = top_secteur_df.reset_index() - top_secteur_df.columns = ["Secteur", "Nombre de déchets"] - - # Data pour le plot marque - marque_df = df_init[df_init["type_regroupement"].isin(["MARQUE"])] - top_marque_df = ( - marque_df.groupby("categorie")["nb_dechet"].sum().sort_values(ascending=True) - ) - top_marque_df = top_marque_df.reset_index() - top_marque_df.columns = ["Marque", "Nombre de déchets"] - - # Chiffres clés - nb_dechet_secteur = secteur_df["nb_dechet"].sum() - nb_secteurs = len(top_secteur_df["Secteur"].unique()) - - nb_dechet_marque = marque_df["nb_dechet"].sum() - nb_marques = len(top_marque_df["Marque"].unique()) - collectes = len(df_filtered) + nb_dechet_marque = marque_df["nb_dechet"].sum() + nb_marques = len(top_marque_df["Marque"].unique()) + collectes = len(df_filtered) - # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page - l1_col1, l1_col2, l1_col3 = st.columns(3) - # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - # 1ère métrique : volume total de déchets collectés - cell1 = l1_col1.container(border=True) + l1_col1, l1_col2, l1_col3 = st.columns(3) + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + # 1ère métrique : volume total de déchets collectés + cell1 = l1_col1.container(border=True) - # Trick pour séparer les milliers - nb_dechet_secteur = f"{nb_dechet_secteur:,.0f}".replace(",", " ") - cell1.metric( - "Nombre de déchets catégorisés par secteur", f"{nb_dechet_secteur} dechets" - ) + # Trick pour séparer les milliers + nb_dechet_secteur = f"{nb_dechet_secteur:,.0f}".replace(",", " ") + cell1.metric( + "Nombre de déchets catégorisés par secteur", f"{nb_dechet_secteur} dechets" + ) - # 2ème métrique : poids - cell2 = l1_col2.container(border=True) - nb_secteurs = f"{nb_secteurs:,.0f}".replace(",", " ") - # poids_total = f"{poids_total:,.0f}".replace(",", " ") - cell2.metric( - "Nombre de secteurs identifiés lors des collectes", - f"{nb_secteurs} secteurs", - ) + # 2ème métrique : poids + cell2 = l1_col2.container(border=True) + nb_secteurs = f"{nb_secteurs:,.0f}".replace(",", " ") + # poids_total = f"{poids_total:,.0f}".replace(",", " ") + cell2.metric( + "Nombre de secteurs identifiés lors des collectes", + f"{nb_secteurs} secteurs", + ) - # 3ème métrique : nombre de collectes - cell3 = l1_col3.container(border=True) - collectes_formatted = f"{collectes:,.0f}".replace(",", " ") - cell3.metric( - "Nombre de collectes comptabilisées", - f"{collectes_formatted} collectes", - ) + # 3ème métrique : nombre de collectes + cell3 = l1_col3.container(border=True) + collectes_formatted = f"{collectes:,.0f}".replace(",", " ") + cell3.metric( + "Nombre de collectes comptabilisées", + f"{collectes_formatted} collectes", + ) - # Message d'avertissement nb de collectes en dessous de 5 - if collectes == 1: - st.warning( - "⚠️ Il n'y a qu' " - + str(collectes) - + " collecte considérées dans les données présentées." - ) - elif collectes <= 5: - st.warning( - "⚠️ Il n'y a que " - + str(collectes) - + " collectes considérées dans les données présentées." - ) - - # Ligne 2 : 3 cellules avec les indicateurs clés en bas de page - colors_map_secteur = { - "AGRICULTURE": "#156644", - "ALIMENTATION": "#F7D156", - "AMEUBLEMENT, DÉCORATION ET ÉQUIPEMENT DE LA MAISON": "#F79D65", - "AQUACULTURE": "#0067C2", - "BÂTIMENT, TRAVAUX ET MATÉRIAUX DE CONSTRUCTION": "#FF9900", - "CHASSE ET ARMEMENT": "#23A76F", - "COSMÉTIQUES, HYGIÈNE ET SOINS PERSONNELS": "#BF726B", - "DÉTERGENTS ET PRODUITS D'ENTRETIENS": "#506266", - "EMBALLAGE INDUSTRIEL ET COLIS": "#754B30", - "GRAPHIQUE ET PAPETERIE ET FOURNITURES DE BUREAU": "#EFEFEF", - "INDÉTERMINÉ": "#967EA1", - "INFORMATIQUE ET HIGHTECH": "#E351F7", - "JOUETS ET LOISIR": "#A64D79", - "MATÉRIEL ÉLECTRIQUE ET ÉLECTROMÉNAGER": "#AE05C3", - "MÉTALLURGIE": "#EC4773", - "PÊCHE": "#003463", - "PETROCHIMIE": "#0D0D0D", - "PHARMACEUTIQUE/PARAMÉDICAL": "#61BF5E", - "PLASTURGIE": "#05A2AD", - "TABAC": "#E9003F", - "TEXTILE ET HABILLEMENT": "#FA9EE5", - "TRAITEMENT DES EAUX": "#4AA6F7", - "TRANSPORT / AUTOMOBILE": "#6C2775", - "VAISSELLE À USAGE UNIQUE": "#732D3A", - "AUTRES SECTEURS": "#D9C190", - } - - fig_secteur = px.bar( - top_secteur_df.tail(10).sort_values(by="Nombre de déchets", ascending=False), - x="Nombre de déchets", - y="Secteur", - color="Secteur", - title="Top 10 des secteurs les plus ramassés", - orientation="h", - color_discrete_map=colors_map_secteur, - text_auto=True, - ) - # add log scale to x axis - fig_secteur.update_layout(xaxis_type="log") - fig_secteur.update_traces(texttemplate="%{value:.0f}", textposition="inside") - fig_secteur.update_layout( - width=800, - height=500, - uniformtext_mode="hide", - showlegend=False, - yaxis_title=None, - ) - with st.container(border=True): - st.plotly_chart(fig_secteur, use_container_width=True) + # Message d'avertissement nb de collectes en dessous de 5 + if collectes == 1: + st.warning( + "⚠️ Il n'y a qu' " + + str(collectes) + + " collecte considérées dans les données présentées." + ) + elif collectes <= 5: + st.warning( + "⚠️ Il n'y a que " + + str(collectes) + + " collectes considérées dans les données présentées." + ) + + # Ligne 2 : 3 cellules avec les indicateurs clés en bas de page + colors_map_secteur = { + "AGRICULTURE": "#156644", + "ALIMENTATION": "#F7D156", + "AMEUBLEMENT, DÉCORATION ET ÉQUIPEMENT DE LA MAISON": "#F79D65", + "AQUACULTURE": "#0067C2", + "BÂTIMENT, TRAVAUX ET MATÉRIAUX DE CONSTRUCTION": "#FF9900", + "CHASSE ET ARMEMENT": "#23A76F", + "COSMÉTIQUES, HYGIÈNE ET SOINS PERSONNELS": "#BF726B", + "DÉTERGENTS ET PRODUITS D'ENTRETIENS": "#506266", + "EMBALLAGE INDUSTRIEL ET COLIS": "#754B30", + "GRAPHIQUE ET PAPETERIE ET FOURNITURES DE BUREAU": "#EFEFEF", + "INDÉTERMINÉ": "#967EA1", + "INFORMATIQUE ET HIGHTECH": "#E351F7", + "JOUETS ET LOISIR": "#A64D79", + "MATÉRIEL ÉLECTRIQUE ET ÉLECTROMÉNAGER": "#AE05C3", + "MÉTALLURGIE": "#EC4773", + "PÊCHE": "#003463", + "PETROCHIMIE": "#0D0D0D", + "PHARMACEUTIQUE/PARAMÉDICAL": "#61BF5E", + "PLASTURGIE": "#05A2AD", + "TABAC": "#E9003F", + "TEXTILE ET HABILLEMENT": "#FA9EE5", + "TRAITEMENT DES EAUX": "#4AA6F7", + "TRANSPORT / AUTOMOBILE": "#6C2775", + "VAISSELLE À USAGE UNIQUE": "#732D3A", + "AUTRES SECTEURS": "#D9C190", + } + + fig_secteur = px.bar( + top_secteur_df.tail(10).sort_values( + by="Nombre de déchets", ascending=False + ), + x="Nombre de déchets", + y="Secteur", + color="Secteur", + title="Top 10 des secteurs les plus ramassés", + orientation="h", + color_discrete_map=colors_map_secteur, + text_auto=True, + ) + # add log scale to x axis + fig_secteur.update_layout(xaxis_type="log") + fig_secteur.update_traces(texttemplate="%{value:.0f}", textposition="inside") + fig_secteur.update_layout( + width=800, + height=500, + uniformtext_mode="hide", + showlegend=False, + yaxis_title=None, + ) + with st.container(border=True): + st.plotly_chart(fig_secteur, use_container_width=True) - l1_col1, l1_col2 = st.columns(2) - cell1 = l1_col1.container(border=True) + l1_col1, l1_col2 = st.columns(2) + cell1 = l1_col1.container(border=True) - # Trick pour séparer les milliers - nb_dechet_marque = f"{nb_dechet_marque:,.0f}".replace(",", " ") - cell1.metric( - "Nombre de déchets catégorisés par marque", f"{nb_dechet_marque} dechets" - ) + # Trick pour séparer les milliers + nb_dechet_marque = f"{nb_dechet_marque:,.0f}".replace(",", " ") + cell1.metric( + "Nombre de déchets catégorisés par marque", f"{nb_dechet_marque} dechets" + ) - # 2ème métrique : poids - cell2 = l1_col2.container(border=True) - nb_marques = f"{nb_marques:,.0f}".replace(",", " ") - cell2.metric( - "Nombre de marques identifiés lors des collectes", - f"{nb_marques} marques", - ) + # 2ème métrique : poids + cell2 = l1_col2.container(border=True) + nb_marques = f"{nb_marques:,.0f}".replace(",", " ") + cell2.metric( + "Nombre de marques identifiés lors des collectes", + f"{nb_marques} marques", + ) - fig_marque = px.bar( - top_marque_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), - x="Nombre de déchets", - y="Marque", - title="Top 10 des marques les plus ramassées", - color_discrete_sequence=["#1951A0"], - orientation="h", - text_auto=False, - text=top_marque_df.tail(10)["Marque"] - + ": " - + top_marque_df.tail(10)["Nombre de déchets"].astype(str), - ) - # add log scale to x axis - fig_marque.update_layout(xaxis_type="log") - # fig_marque.update_traces(texttemplate="%{value:.0f}", textposition="inside") - - fig_marque.update_layout( - width=800, - height=500, - uniformtext_minsize=8, - uniformtext_mode="hide", - yaxis_title=None, - ) + fig_marque = px.bar( + top_marque_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), + x="Nombre de déchets", + y="Marque", + title="Top 10 des marques les plus ramassées", + color_discrete_sequence=["#1951A0"], + orientation="h", + text_auto=False, + text=top_marque_df.tail(10)["Marque"] + + ": " + + top_marque_df.tail(10)["Nombre de déchets"].astype(str), + ) + # add log scale to x axis + fig_marque.update_layout(xaxis_type="log") + # fig_marque.update_traces(texttemplate="%{value:.0f}", textposition="inside") + + fig_marque.update_layout( + width=800, + height=500, + uniformtext_minsize=8, + uniformtext_mode="hide", + yaxis_title=None, + ) - with st.container(border=True): - st.plotly_chart(fig_marque, use_container_width=True) + with st.container(border=True): + st.plotly_chart(fig_marque, use_container_width=True) +else: + st.markdown("## 🚨 Veuillez vous connecter pour accéder à l'onglet 🚨") diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index e75f8b9..2314da2 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -5,3 +5,6 @@ duckdb==0.10.0 streamlit==1.32.2 streamlit-folium==0.19.1 plotly==5.19.0 +streamlit-dynamic-filters==0.1.6 +streamlit-authenticator==0.3.2 +st-pages==0.4.5 From 8853ade6555183749df0cc18d389856ee22f9f5d Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Fri, 19 Apr 2024 17:01:05 -0400 Subject: [PATCH 051/147] =?UTF-8?q?[kb]=20=F0=9F=9A=91=20Add=20authenticat?= =?UTF-8?q?ion?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/home.py | 247 +++++++++---- dashboards/app/pages/actions.py | 623 ++++++++++++++++---------------- dashboards/app/requirements.txt | 3 + 3 files changed, 482 insertions(+), 391 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index f557554..00c5d07 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -1,97 +1,184 @@ +from pathlib import Path + import pandas as pd import streamlit as st - -st.markdown( - """ -# Bienvenue 👋 -#### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! -""", +import streamlit_authenticator as stauth +import yaml +from st_pages import Page, show_pages +from yaml.loader import SafeLoader + +# Configuration de la page +st.set_page_config( + layout="wide", + page_title="Dashboard Zéro Déchet Sauvage", + page_icon=":dolphin:", + menu_items={ + "About": "https://www.zero-dechet-sauvage.org/", + }, ) -st.markdown("""# À propos""") +# load and apply CSS styles +def load_css(file_name: str) -> None: + with Path(file_name).open() as f: + st.markdown(f"", unsafe_allow_html=True) -# Chargement des données géographiques pour le filtre : une seule fois à l'arrivée -@st.cache_data -def load_df_other() -> pd.DataFrame: - df = pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv", - ) - # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE - # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) - df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] - df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] - return df - - -# Appel des fonctions pour charger les données - -df_other = load_df_other() - - -# Création du filtre par niveau géographique : correspondance labels et variables du dataframe -niveaux_admin_dict = { - "Région": "REGION", - "Département": "DEP_CODE_NOM", - "EPCI": "LIBEPCI", - "Commune": "COMMUNE_CODE_NOM", -} - -# 1ère étape : sélection du niveau administratif concerné (région, dép...) -# Si déjà saisi précédemment, initialiser le filtre avec les valeurs entrées précédemment -# Récupérer les index pour conserver la valeur des filtres au changement de pages -# Filtre niveau administratif -niveau_admin = st.session_state.get("niveau_admin", None) -index_admin = st.session_state.get("index_admin", None) -# Filtre collectivité -collectivite = st.session_state.get("collectivite", None) -index_collec = st.session_state.get("index_collec", None) - -# Initialiser la selectbox avec l'index récupéré -select_niveauadmin = st.selectbox( - "Niveau administratif : ", - niveaux_admin_dict.keys(), - index=index_admin, +# Login +p_cred = Path(".credentials.yml") +with p_cred.open() as file: + config = yaml.load(file, Loader=SafeLoader) + +authenticator = stauth.Authenticate( + config["credentials"], + config["cookie"]["name"], + config["cookie"]["key"], + config["cookie"]["expiry_days"], + config["pre-authorized"], +) +authenticator.login( + fields={ + "Form name": "Connexion", + "Username": "Identifiant", + "Password": "Mot de passe", + "Login": "Connexion", + }, ) -if select_niveauadmin is not None: - # Filtrer la liste des collectivités en fonction du niveau admin - liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] - liste_collectivites = liste_collectivites.sort_values().unique() - # 2ème filtre : sélection de la collectivité concernée - select_collectivite = st.selectbox( - "Collectivité : ", - liste_collectivites, - index=index_collec, +if st.session_state["authentication_status"]: + show_pages( + [ + Page("home.py", "Accueil", "🏠"), + Page("pages/actions.py", "Actions", "👊"), + Page("pages/data.py", "Data", "🔍"), + Page("pages/hotspots.py", "Hotspots", "🔥"), + Page("pages/structures.py", "Structures", "🔭"), + ], ) + # Load and apply the CSS file at the start of your app + # local debug + load_css("style.css") -if st.button("Enregistrer la sélection"): - # Enregistrer les valeurs sélectionnées dans le session.state - st.session_state["niveau_admin"] = select_niveauadmin - st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( - select_niveauadmin, + st.markdown( + """ + # Bienvenue 👋 + #### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! + """, ) - st.session_state["collectivite"] = select_collectivite - st.session_state["index_collec"] = list(liste_collectivites).index( - select_collectivite, + st.markdown("""# À propos""") + + # Chargement des données et filtre géographique à l'arrivée sur le dashboard + # Table des volumes par matériaux + @st.cache_data + def load_df_other() -> pd.DataFrame: + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv", + ) + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE + # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] + return df + + # Table du nb de déchets + @st.cache_data + def load_df_nb_dechet() -> pd.DataFrame: + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_releve_nb_dechet.csv", + ) + + # Appel des fonctions pour charger les données + + df_other = load_df_other() + df_nb_dechets = load_df_nb_dechet() + + # Création du filtre par niveau géographique : correspondance labels et variables du df + niveaux_admin_dict = { + "Région": "REGION", + "Département": "DEP_CODE_NOM", + "EPCI": "LIBEPCI", + "Commune": "COMMUNE_CODE_NOM", + } + + # 1ère étape : sélection du niveau administratif concerné (région, dép...) + # Si déjà saisi précédemment, initialiser le filtre avec les valeurs entrées précédemment + # Récupérer les index pour conserver la valeur des filtres au changement de pages + # Filtre niveau administratif + niveau_admin = st.session_state.get("niveau_admin", None) + index_admin = st.session_state.get("index_admin", None) + # Filtre collectivité + collectivite = st.session_state.get("collectivite", None) + index_collec = st.session_state.get("index_collec", None) + + # Initialiser la selectbox avec l'index récupéré + select_niveauadmin = st.selectbox( + "Niveau administratif : ", + niveaux_admin_dict.keys(), + index=index_admin, ) - # Afficher la collectivité sélectionnée - st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") - - # Filtrer et enregistrer le DataFrame dans un "session state" pour les onglets suivants - colonne_filtre = niveaux_admin_dict[select_niveauadmin] - st.session_state["df_other_filtre"] = df_other[ - df_other[colonne_filtre] == select_collectivite - ] - - nb_releves = len(st.session_state["df_other_filtre"]) - st.write( - f"{nb_releves} relevés de collecte disponibles \ - pour l'analyse sur votre territoire.", + if select_niveauadmin is not None: + # Filtrer la liste des collectivités en fonction du niveau admin + liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] + liste_collectivites = liste_collectivites.sort_values().unique() + + # 2ème filtre : sélection de la collectivité concernée + select_collectivite = st.selectbox( + "Collectivité : ", + liste_collectivites, + index=index_collec, + ) + + if st.button("Enregistrer la sélection"): + # Enregistrer les valeurs sélectionnées dans le session.state + st.session_state["niveau_admin"] = select_niveauadmin + st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( + select_niveauadmin, + ) + + st.session_state["collectivite"] = select_collectivite + st.session_state["index_collec"] = list(liste_collectivites).index( + select_collectivite, + ) + + # Afficher la collectivité sélectionnée + st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") + + # Filtrer et enregistrer le DataFrame dans un session state pour la suite + colonne_filtre = niveaux_admin_dict[select_niveauadmin] + df_other_filtre = df_other[df_other[colonne_filtre] == select_collectivite] + st.session_state["df_other_filtre"] = df_other_filtre + + # Filtrer et enregistrer le dataframe nb_dechets dans session.State + # Récuperer la liste des relevés + id_releves = df_other_filtre["ID_RELEVE"].unique() + # Filtrer df_nb_dechets sur la liste des relevés + st.session_state["df_nb_dechets_filtre"] = df_nb_dechets[ + df_nb_dechets["ID_RELEVE"].isin(id_releves) + ] + + # Afficher le nombre de relevés disponibles + nb_releves = len(st.session_state["df_other_filtre"]) + st.write( + f"{nb_releves} relevés de collecte sont disponibles \ + pour l'analyse sur votre territoire.", + ) + + authenticator.logout() +elif st.session_state["authentication_status"] is False: + st.error("Mauvais identifiants ou mot de passe.") +elif st.session_state["authentication_status"] is None: + st.warning("Veuillez entrer votre identifiant et mot de passe") + + show_pages( + [ + Page("home.py", "Home", "🏠 "), + Page("pages/register.py", "S'enregistrer", "🚀"), + ], ) diff --git a/dashboards/app/pages/actions.py b/dashboards/app/pages/actions.py index ea669a4..c38c02d 100644 --- a/dashboards/app/pages/actions.py +++ b/dashboards/app/pages/actions.py @@ -16,334 +16,335 @@ filtre_niveau = st.session_state.get("niveau_admin", "") filtre_collectivite = st.session_state.get("collectivite", "") -# Définition d'une fonction pour charger les données du nombre de déchets -@st.cache_data -def load_df_dict_corr_dechet_materiau(): - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" - "exploration-des-donn%C3%A9es/Exploration_visualisation/data/dict_de" - "chet_groupe_materiau.csv" - ) - - -@st.cache_data -def load_df_nb_dechet(): - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_releve_nb_dechet.csv" - ) - - -# Définition d'une fonction pour charger les autres données -@st.cache_data -def load_df_other(): - df = pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv" - ) - - # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) - df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] - df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] - - return df +if st.session_state["authentication_status"]: + # Définition d'une fonction pour charger les données du nombre de déchets + @st.cache_data + def load_df_dict_corr_dechet_materiau(): + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" + "exploration-des-donn%C3%A9es/Exploration_visualisation/data/dict_de" + "chet_groupe_materiau.csv" + ) + @st.cache_data + def load_df_nb_dechet(): + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_releve_nb_dechet.csv" + ) -# Appel des fonctions pour charger les données + # Définition d'une fonction pour charger les autres données + @st.cache_data + def load_df_other(): + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv" + ) -# Appeler les dataframes volumes et nb_dechets filtré depuis le session state -if "df_other_filtre" not in st.session_state: - st.write( - """ - ### :warning: Merci de sélectionner une collectivité\ - dans l'onglet Home pour afficher les données. :warning: - """ - ) - st.stop() -else: - df_other = st.session_state["df_other_filtre"].copy() + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] -# Titre de l'onglet -st.markdown( - """# 🔎 Actions -Quels sont les actions mises en place par les acteurs ? -""" -) + return df -# 2 Onglets : Evènements, Evènements à venir -tab1, tab2 = st.tabs( - [ - "Evènements", - "Evènements à venir", - ] -) + # Appel des fonctions pour charger les données -# Onglet 1 : Evènements -with tab1: - if filtre_niveau == "" and filtre_collectivite == "": + # Appeler les dataframes volumes et nb_dechets filtré depuis le session state + if "df_other_filtre" not in st.session_state: st.write( - "Aucune sélection de territoire n'ayant été effectuée les données sont globales" + """ + ### :warning: Merci de sélectionner une collectivité\ + dans l'onglet Home pour afficher les données. :warning: + """ ) + st.stop() else: - st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") + df_other = st.session_state["df_other_filtre"].copy() - #################### - # @Valerie : J'ai comment pour éviter les errreur - # Les DF sont chargés au dessus comme dans l'onglet DATA - # Je n'ai pas trouvé de référence à 'df_nb_dechets_filtre' dans l'onglet DATA - #################### - - # Appeler les dataframes volumes et nb_dechets filtré depuis le session state - # if ("df_other_filtre" not in st.session_state) or ( - # "df_nb_dechets_filtre" not in st.session_state - # ): - # st.write( - # """ - # ### :warning: Merci de sélectionner une collectivité\ - # dans l'onglet Home pour afficher les données. :warning: - # """ - # ) - - # df_nb_dechet = pd.read_csv( - # ( - # "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - # "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - # "sation/data/data_releve_nb_dechet.csv" - # ) - # ) - - # df_other = pd.read_csv( - # ( - # "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - # "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - # "sation/data/data_zds_enriched.csv" - # ) - # ) - - # else: - # df_other = st.session_state["df_other_filtre"].copy() - # df_nb_dechet = st.session_state["df_nb_dechets_filtre"].copy() - - # Copier le df pour la partie filtrée par milieu/lieu/année - df_other_metrics_raw = df_other.copy() - - annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) - - # Filtre par année: - options = ["Aucune sélection"] + list(df_other["ANNEE"].unique()) - annee_choisie = st.selectbox("Choisissez l'année:", options, index=0) - - if annee_choisie == "Aucune sélection": - df_other_filtre = df_other.copy() - - if annee_choisie != "Aucune sélection": - df_other_filtre = df_other[df_other["ANNEE"] == annee_choisie].copy() - - # Copie des données pour transfo - df_events = df_other_filtre.copy() - - # Calcul des indicateurs clés de haut de tableau avant transformation - volume_total = df_events["VOLUME_TOTAL"].sum() - poids_total = df_events["POIDS_TOTAL"].sum() - nombre_participants = df_events["NB_PARTICIPANTS"].sum() - nb_collectes = len(df_events) - nombre_structures = df_events["ID_STRUCTURE"].nunique() - - # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page - l1_col1, l1_col2, l1_col3 = st.columns(3) - - # 1ère métrique : nombre de relevés - cell1 = l1_col1.container(border=True) - nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") - cell1.metric("Nombre de collectes réalisées", f"{nb_collectes}") - - # 2ème métrique : Nombre de Participants - cell2 = l1_col2.container(border=True) - nombre_participants = f"{nombre_participants:,.0f}".replace(",", " ") - cell2.metric("Nombre de participants", f"{nombre_participants}") - - # 3ème métrique : Nombre de Structures - cell3 = l1_col3.container(border=True) - nombre_structures = f"{nombre_structures:,.0f}".replace(",", " ") - cell3.metric("Nombre de structures", f"{nombre_structures}") - - # Ligne 2 : Carte - with st.container(): - # Création du DataFrame de travail pour la carte - df_map_evnenements = df_other_filtre.copy() - # Création de la carte centrée autour d'une localisation - # Calcul des limites à partir de vos données - min_lat = df_map_evnenements["LIEU_COORD_GPS_Y"].min() - max_lat = df_map_evnenements["LIEU_COORD_GPS_Y"].max() - min_lon = df_map_evnenements["LIEU_COORD_GPS_X"].min() - max_lon = df_map_evnenements["LIEU_COORD_GPS_X"].max() - - map_evenements = folium.Map( - location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], - zoom_start=8, - tiles="OpenStreetMap", - ) - # Facteur de normalisation pour ajuster la taille des bulles - normalisation_facteur = 100 - for index, row in df_map_evnenements.iterrows(): - # Application de la normalisation - radius = row["NB_PARTICIPANTS"] / normalisation_facteur - - # Application d'une limite minimale pour le rayon si nécessaire - radius = max(radius, 5) - - folium.CircleMarker( - location=(row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]), - radius=radius, # Utilisation du rayon ajusté - popup=f"{row['NOM_ZONE']}, {row['LIEU_VILLE']}, {row['NOM_EVENEMENT']}, {row['DATE']} : nombre de participants : {row['NB_PARTICIPANTS']}", - color="#3186cc", - fill=True, - fill_color="#3186cc", - ).add_to(map_evenements) - - # Affichage de la carte Folium dans Streamlit - st_folium = st.components.v1.html - st_folium( - folium.Figure().add_child(map_evenements).render(), # , width=1400 - height=750, - ) - - # Ligne 3 : 1 graphique donut chart et un graphique barplot horizontal nombre de relevés par types de milieux - # préparation du dataframe et figure niveaux de caracterisation - - df_carac = df_other_filtre.copy() - df_carac_counts = df_carac["NIVEAU_CARAC"].value_counts().reset_index() - df_carac_counts.columns = ["NIVEAU_CARAC", "counts"] - - fig1_actions = px.pie( - df_carac_counts, - values="counts", - names="NIVEAU_CARAC", - title="Répartition des niveaux de caractérisation", - hole=0.5, + # Titre de l'onglet + st.markdown( + """# 🔎 Actions + Quels sont les actions mises en place par les acteurs ? + """ ) - fig1_actions.update_traces(textposition="inside", textinfo="percent+label") - # préparation du dataframe et figure releves types de milieux - - df_milieux = df_other_filtre.copy() - df_milieux_counts = df_milieux["TYPE_MILIEU"].value_counts().reset_index() - df_milieux_counts.columns = ["TYPE_MILIEU", "counts"] - df_milieux_counts_sorted = df_milieux_counts.sort_values( - by="counts", ascending=True + # 2 Onglets : Evènements, Evènements à venir + tab1, tab2 = st.tabs( + [ + "Evènements", + "Evènements à venir", + ] ) - fig2_actions = px.bar( - df_milieux_counts_sorted, - y="TYPE_MILIEU", - x="counts", - title="Nombre de relevés par types de milieux", - text="counts", - orientation="h", - ) - fig2_actions.update_layout(xaxis_title="", yaxis_title="") - - l3_col1, l3_col2 = st.columns(2) - cell4 = l3_col1.container(border=True) - cell5 = l3_col2.container(border=True) - - # Affichage donut - with cell4: - st.plotly_chart(fig1_actions, use_container_width=True) - - # Affichage barplot - with cell5: - st.plotly_chart(fig2_actions, use_container_width=True) - - # Ligne 3 : 2 graphiques en ligne : carte relevés et bar chart matériaux - l3_col1, l3_col2 = st.columns(2) - cell6 = l3_col1.container(border=True) - cell7 = l3_col2.container(border=True) - - # Ligne 4 : 2 graphiques en ligne : bar chart milieux et bar chart types déchets - l4_col1, l4_col2 = st.columns(2) - cell8 = l4_col1.container(border=True) - cell9 = l4_col2.container(border=True) - - # Ligne 5 : 2 graphiques en ligne : line chart volume + nb collectes et Pie niveau de caractérisation - l5_col1, l5_col2 = st.columns(2) - cell10 = l5_col1.container(border=True) - cell11 = l5_col2.container(border=True) - - -# onglet Evenements a venir -with tab2: - st.write(f"Votre territoire : Pays - France") - - # Définition d'une fonction pour charger les evenements à venir - @st.cache_data - def load_df_events_clean() -> pd.DataFrame: - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/export_events_cleaned.csv" + # Onglet 1 : Evènements + with tab1: + if filtre_niveau == "" and filtre_collectivite == "": + st.write( + "Aucune sélection de territoire n'ayant été effectuée les données sont globales" + ) + else: + st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") + + #################### + # @Valerie : J'ai comment pour éviter les errreur + # Les DF sont chargés au dessus comme dans l'onglet DATA + # Je n'ai pas trouvé de référence à 'df_nb_dechets_filtre' dans l'onglet DATA + #################### + + # Appeler les dataframes volumes et nb_dechets filtré depuis le session state + # if ("df_other_filtre" not in st.session_state) or ( + # "df_nb_dechets_filtre" not in st.session_state + # ): + # st.write( + # """ + # ### :warning: Merci de sélectionner une collectivité\ + # dans l'onglet Home pour afficher les données. :warning: + # """ + # ) + + # df_nb_dechet = pd.read_csv( + # ( + # "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + # "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + # "sation/data/data_releve_nb_dechet.csv" + # ) + # ) + + # df_other = pd.read_csv( + # ( + # "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + # "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + # "sation/data/data_zds_enriched.csv" + # ) + # ) + + # else: + # df_other = st.session_state["df_other_filtre"].copy() + # df_nb_dechet = st.session_state["df_nb_dechets_filtre"].copy() + + # Copier le df pour la partie filtrée par milieu/lieu/année + df_other_metrics_raw = df_other.copy() + + annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) + + # Filtre par année: + options = ["Aucune sélection"] + list(df_other["ANNEE"].unique()) + annee_choisie = st.selectbox("Choisissez l'année:", options, index=0) + + if annee_choisie == "Aucune sélection": + df_other_filtre = df_other.copy() + + if annee_choisie != "Aucune sélection": + df_other_filtre = df_other[df_other["ANNEE"] == annee_choisie].copy() + + # Copie des données pour transfo + df_events = df_other_filtre.copy() + + # Calcul des indicateurs clés de haut de tableau avant transformation + volume_total = df_events["VOLUME_TOTAL"].sum() + poids_total = df_events["POIDS_TOTAL"].sum() + nombre_participants = df_events["NB_PARTICIPANTS"].sum() + nb_collectes = len(df_events) + nombre_structures = df_events["ID_STRUCTURE"].nunique() + + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page + l1_col1, l1_col2, l1_col3 = st.columns(3) + + # 1ère métrique : nombre de relevés + cell1 = l1_col1.container(border=True) + nb_collectes = f"{nb_collectes:,.0f}".replace(",", " ") + cell1.metric("Nombre de collectes réalisées", f"{nb_collectes}") + + # 2ème métrique : Nombre de Participants + cell2 = l1_col2.container(border=True) + nombre_participants = f"{nombre_participants:,.0f}".replace(",", " ") + cell2.metric("Nombre de participants", f"{nombre_participants}") + + # 3ème métrique : Nombre de Structures + cell3 = l1_col3.container(border=True) + nombre_structures = f"{nombre_structures:,.0f}".replace(",", " ") + cell3.metric("Nombre de structures", f"{nombre_structures}") + + # Ligne 2 : Carte + with st.container(): + # Création du DataFrame de travail pour la carte + df_map_evnenements = df_other_filtre.copy() + # Création de la carte centrée autour d'une localisation + # Calcul des limites à partir de vos données + min_lat = df_map_evnenements["LIEU_COORD_GPS_Y"].min() + max_lat = df_map_evnenements["LIEU_COORD_GPS_Y"].max() + min_lon = df_map_evnenements["LIEU_COORD_GPS_X"].min() + max_lon = df_map_evnenements["LIEU_COORD_GPS_X"].max() + + map_evenements = folium.Map( + location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], + zoom_start=8, + tiles="OpenStreetMap", + ) + # Facteur de normalisation pour ajuster la taille des bulles + normalisation_facteur = 100 + for index, row in df_map_evnenements.iterrows(): + # Application de la normalisation + radius = row["NB_PARTICIPANTS"] / normalisation_facteur + + # Application d'une limite minimale pour le rayon si nécessaire + radius = max(radius, 5) + + folium.CircleMarker( + location=(row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]), + radius=radius, # Utilisation du rayon ajusté + popup=f"{row['NOM_ZONE']}, {row['LIEU_VILLE']}, {row['NOM_EVENEMENT']}, {row['DATE']} : nombre de participants : {row['NB_PARTICIPANTS']}", + color="#3186cc", + fill=True, + fill_color="#3186cc", + ).add_to(map_evenements) + + # Affichage de la carte Folium dans Streamlit + st_folium = st.components.v1.html + st_folium( + folium.Figure().add_child(map_evenements).render(), # , width=1400 + height=750, + ) + + # Ligne 3 : 1 graphique donut chart et un graphique barplot horizontal nombre de relevés par types de milieux + # préparation du dataframe et figure niveaux de caracterisation + + df_carac = df_other_filtre.copy() + df_carac_counts = df_carac["NIVEAU_CARAC"].value_counts().reset_index() + df_carac_counts.columns = ["NIVEAU_CARAC", "counts"] + + fig1_actions = px.pie( + df_carac_counts, + values="counts", + names="NIVEAU_CARAC", + title="Répartition des niveaux de caractérisation", + hole=0.5, ) + fig1_actions.update_traces(textposition="inside", textinfo="percent+label") - # Appel des fonctions pour charger les données - df_events = load_df_events_clean() - - df_events.DATE = pd.to_datetime(df_events.DATE) - - # Filtrer les événements à venir - df_events_a_venir = df_events[df_events.DATE > (datetime.now() - timedelta(days=5))] - - # Trie les events par date - df_events_a_venir.sort_values(by="DATE", inplace=True) - - # Coord approximatives du centre de la France - coord_centre_france = [46.603354, 1.888334] - - # Code couleurs de ZDS - color_ZDS_bleu = "#003463" - color_ZDS_rouge = "#e9003f" - - # Créer la carte - map_events = folium.Map( - location=coord_centre_france, - zoom_start=6, - ) + # préparation du dataframe et figure releves types de milieux - # Ajouter des marqueurs pour chaque événement à venir sur la carte - for idx, row in df_events_a_venir.iterrows(): - folium.Marker( - location=[row.COORD_GPS_Y, row.COORD_GPS_X], - popup=folium.Popup(row.NOM_EVENEMENT, lazy=False), - # tooltip=row.NOM_EVENEMENT, - # icon=folium.Icon(icon_color=color_ZDS_bleu) - ).add_to(map_events) + df_milieux = df_other_filtre.copy() + df_milieux_counts = df_milieux["TYPE_MILIEU"].value_counts().reset_index() + df_milieux_counts.columns = ["TYPE_MILIEU", "counts"] + df_milieux_counts_sorted = df_milieux_counts.sort_values( + by="counts", ascending=True + ) - # Afficher la liste des événements à venir avec la date affichée avant le nom - st.subheader("Actions à venir :") + fig2_actions = px.bar( + df_milieux_counts_sorted, + y="TYPE_MILIEU", + x="counts", + title="Nombre de relevés par types de milieux", + text="counts", + orientation="h", + ) + fig2_actions.update_layout(xaxis_title="", yaxis_title="") + + l3_col1, l3_col2 = st.columns(2) + cell4 = l3_col1.container(border=True) + cell5 = l3_col2.container(border=True) + + # Affichage donut + with cell4: + st.plotly_chart(fig1_actions, use_container_width=True) + + # Affichage barplot + with cell5: + st.plotly_chart(fig2_actions, use_container_width=True) + + # Ligne 3 : 2 graphiques en ligne : carte relevés et bar chart matériaux + l3_col1, l3_col2 = st.columns(2) + cell6 = l3_col1.container(border=True) + cell7 = l3_col2.container(border=True) + + # Ligne 4 : 2 graphiques en ligne : bar chart milieux et bar chart types déchets + l4_col1, l4_col2 = st.columns(2) + cell8 = l4_col1.container(border=True) + cell9 = l4_col2.container(border=True) + + # Ligne 5 : 2 graphiques en ligne : line chart volume + nb collectes et Pie niveau de caractérisation + l5_col1, l5_col2 = st.columns(2) + cell10 = l5_col1.container(border=True) + cell11 = l5_col2.container(border=True) + + # onglet Evenements a venir + with tab2: + st.write(f"Votre territoire : Pays - France") + + # Définition d'une fonction pour charger les evenements à venir + @st.cache_data + def load_df_events_clean() -> pd.DataFrame: + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/export_events_cleaned.csv" + ) + + # Appel des fonctions pour charger les données + df_events = load_df_events_clean() + + df_events.DATE = pd.to_datetime(df_events.DATE) + + # Filtrer les événements à venir + df_events_a_venir = df_events[ + df_events.DATE > (datetime.now() - timedelta(days=5)) + ] + + # Trie les events par date + df_events_a_venir.sort_values(by="DATE", inplace=True) + + # Coord approximatives du centre de la France + coord_centre_france = [46.603354, 1.888334] + + # Code couleurs de ZDS + color_ZDS_bleu = "#003463" + color_ZDS_rouge = "#e9003f" + + # Créer la carte + map_events = folium.Map( + location=coord_centre_france, + zoom_start=6, + ) - with st.container(height=500, border=False): + # Ajouter des marqueurs pour chaque événement à venir sur la carte for idx, row in df_events_a_venir.iterrows(): - with st.container(border=True): - # Bloc contenant la date - date_block = f"
{row.DATE.day}
{row.DATE.strftime('%b')}
" - # Bloc contenant le nom de l'événement - event_block = ( - f"
{row.NOM_EVENEMENT}
" - ) - # Bloc contenant le type d'événement et le nom de la structure - type_structure_block = f"{row.TYPE_EVENEMENT} | {row.NOM_STRUCTURE}" - - # Ajout de chaque événement dans la liste - st.write( - f"
{date_block}
{event_block}{type_structure_block}
", - unsafe_allow_html=True, - ) - - # Afficher la carte avec Streamlit - st_folium = st.components.v1.html - st_folium( - folium.Figure().add_child(map_events).render(), - width=800, - height=800, - ) + folium.Marker( + location=[row.COORD_GPS_Y, row.COORD_GPS_X], + popup=folium.Popup(row.NOM_EVENEMENT, lazy=False), + # tooltip=row.NOM_EVENEMENT, + # icon=folium.Icon(icon_color=color_ZDS_bleu) + ).add_to(map_events) + + # Afficher la liste des événements à venir avec la date affichée avant le nom + st.subheader("Actions à venir :") + + with st.container(height=500, border=False): + for idx, row in df_events_a_venir.iterrows(): + with st.container(border=True): + # Bloc contenant la date + date_block = f"
{row.DATE.day}
{row.DATE.strftime('%b')}
" + # Bloc contenant le nom de l'événement + event_block = ( + f"
{row.NOM_EVENEMENT}
" + ) + # Bloc contenant le type d'événement et le nom de la structure + type_structure_block = f"{row.TYPE_EVENEMENT} | {row.NOM_STRUCTURE}" + + # Ajout de chaque événement dans la liste + st.write( + f"
{date_block}
{event_block}{type_structure_block}
", + unsafe_allow_html=True, + ) + + # Afficher la carte avec Streamlit + st_folium = st.components.v1.html + st_folium( + folium.Figure().add_child(map_events).render(), + width=800, + height=800, + ) +else: + st.markdown("## 🚨 Veuillez vous connecter pour accéder à l'onglet 🚨") diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index e75f8b9..2314da2 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -5,3 +5,6 @@ duckdb==0.10.0 streamlit==1.32.2 streamlit-folium==0.19.1 plotly==5.19.0 +streamlit-dynamic-filters==0.1.6 +streamlit-authenticator==0.3.2 +st-pages==0.4.5 From a7f45038735f812ebe4ab9a0da9758185c55afee Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Fri, 19 Apr 2024 17:18:36 -0400 Subject: [PATCH 052/147] =?UTF-8?q?[kb]=20=F0=9F=9A=91=20Fix=20authenticat?= =?UTF-8?q?ion?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/register.py | 47 ++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 dashboards/app/pages/register.py diff --git a/dashboards/app/pages/register.py b/dashboards/app/pages/register.py new file mode 100644 index 0000000..be54cb4 --- /dev/null +++ b/dashboards/app/pages/register.py @@ -0,0 +1,47 @@ +from pathlib import Path +import yaml +from yaml.loader import SafeLoader +import streamlit as st +import streamlit_authenticator as stauth + +st.markdown( + """ +# Bienvenue 👋 +#### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! +""", +) + +p_cred = Path(".credentials.yml") +with p_cred.open() as file: + config = yaml.load(file, Loader=SafeLoader) + +authenticator = stauth.Authenticate( + config["credentials"], + config["cookie"]["name"], + config["cookie"]["key"], + config["cookie"]["expiry_days"], + config["pre-authorized"], +) + +try: + ( + email_of_registered_user, + username_of_registered_user, + name_of_registered_user, + ) = authenticator.register_user( + pre_authorization=False, + fields={ + "Form name": "S'enregistrer", + "Email": "Email", + "Username": "Identifiant", + "Password": "Mot de passe", + "Repeat password": "Répeter le mot de passe", + "Register": "S'enregistrer", + }, + ) + if email_of_registered_user: + with open(".credentials.yml", "w") as file: + yaml.dump(config, file, default_flow_style=False) + st.success("Utilisateur enregistré") +except Exception as e: + st.error(e) From 04f389ca33988485c1318ce2f7cbbd51e4f9c4af Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Fri, 19 Apr 2024 17:19:39 -0400 Subject: [PATCH 053/147] =?UTF-8?q?[kb]=20=F0=9F=9A=91=20Fix=20authenticat?= =?UTF-8?q?ion?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/register.py | 47 ++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 dashboards/app/pages/register.py diff --git a/dashboards/app/pages/register.py b/dashboards/app/pages/register.py new file mode 100644 index 0000000..be54cb4 --- /dev/null +++ b/dashboards/app/pages/register.py @@ -0,0 +1,47 @@ +from pathlib import Path +import yaml +from yaml.loader import SafeLoader +import streamlit as st +import streamlit_authenticator as stauth + +st.markdown( + """ +# Bienvenue 👋 +#### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! +""", +) + +p_cred = Path(".credentials.yml") +with p_cred.open() as file: + config = yaml.load(file, Loader=SafeLoader) + +authenticator = stauth.Authenticate( + config["credentials"], + config["cookie"]["name"], + config["cookie"]["key"], + config["cookie"]["expiry_days"], + config["pre-authorized"], +) + +try: + ( + email_of_registered_user, + username_of_registered_user, + name_of_registered_user, + ) = authenticator.register_user( + pre_authorization=False, + fields={ + "Form name": "S'enregistrer", + "Email": "Email", + "Username": "Identifiant", + "Password": "Mot de passe", + "Repeat password": "Répeter le mot de passe", + "Register": "S'enregistrer", + }, + ) + if email_of_registered_user: + with open(".credentials.yml", "w") as file: + yaml.dump(config, file, default_flow_style=False) + st.success("Utilisateur enregistré") +except Exception as e: + st.error(e) From fef1bfe091f49365b26b7fac5f17f9bb3e000080 Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Fri, 19 Apr 2024 17:28:24 -0400 Subject: [PATCH 054/147] =?UTF-8?q?[kb]=20=F0=9F=90=9B=20Fix=20imports?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/actions.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/dashboards/app/pages/actions.py b/dashboards/app/pages/actions.py index 6374cde..c38c02d 100644 --- a/dashboards/app/pages/actions.py +++ b/dashboards/app/pages/actions.py @@ -1,12 +1,12 @@ -import streamlit as st -import altair as alt import pandas as pd -import duckdb +from datetime import datetime, timedelta +import plotly.express as px +import streamlit as st +import folium -st.markdown( - """# 👊 Actions -*Quels sont les actions mises en place par les acteurs ?* -""" +# Page setting : wide layout +st.set_page_config( + layout="wide", page_title="Dashboard Zéro Déchet Sauvage : onglet Actions" ) # Session state From 6a3b5b26417e556b83fa8e18f9a9a6756b1fdeef Mon Sep 17 00:00:00 2001 From: Kyllian Beguin <50613619+KyllianBeguin@users.noreply.github.com> Date: Sat, 20 Apr 2024 19:08:13 +0200 Subject: [PATCH 055/147] =?UTF-8?q?[kb]=20=F0=9F=8E=89=20Add=20credentials?= =?UTF-8?q?=20for=20dev?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/.credentials-dev.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 dashboards/.credentials-dev.yml diff --git a/dashboards/.credentials-dev.yml b/dashboards/.credentials-dev.yml new file mode 100644 index 0000000..716cedd --- /dev/null +++ b/dashboards/.credentials-dev.yml @@ -0,0 +1,14 @@ +cookie: + expiry_days: 30 + key: some_signature_key + name: some_cookie_name +credentials: + usernames: + test: + email: test@test.com + logged_in: false + name: test + password: $2b$12$fR4sp7tIG.dbeusbr695MOw/xvN1sf.21rML7t7j9pCdIVREIocUO +pre-authorized: + emails: + - test@test.com From 9d0605562b0e023154bd956453622b3357b37d50 Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Sat, 20 Apr 2024 13:10:25 -0400 Subject: [PATCH 056/147] =?UTF-8?q?[kb]=20=F0=9F=9A=9A=20Move=20creds=20to?= =?UTF-8?q?=20app?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/.credentials-dev.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 dashboards/app/.credentials-dev.yml diff --git a/dashboards/app/.credentials-dev.yml b/dashboards/app/.credentials-dev.yml new file mode 100644 index 0000000..716cedd --- /dev/null +++ b/dashboards/app/.credentials-dev.yml @@ -0,0 +1,14 @@ +cookie: + expiry_days: 30 + key: some_signature_key + name: some_cookie_name +credentials: + usernames: + test: + email: test@test.com + logged_in: false + name: test + password: $2b$12$fR4sp7tIG.dbeusbr695MOw/xvN1sf.21rML7t7j9pCdIVREIocUO +pre-authorized: + emails: + - test@test.com From 0c4bbb4278cc7c09a111ff08cb597760a8f455e8 Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Sat, 20 Apr 2024 13:15:58 -0400 Subject: [PATCH 057/147] =?UTF-8?q?[kb]=20=F0=9F=94=A5=20Remove=20file?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/.credentials-dev.yml | 14 -------------- 1 file changed, 14 deletions(-) delete mode 100644 dashboards/.credentials-dev.yml diff --git a/dashboards/.credentials-dev.yml b/dashboards/.credentials-dev.yml deleted file mode 100644 index 716cedd..0000000 --- a/dashboards/.credentials-dev.yml +++ /dev/null @@ -1,14 +0,0 @@ -cookie: - expiry_days: 30 - key: some_signature_key - name: some_cookie_name -credentials: - usernames: - test: - email: test@test.com - logged_in: false - name: test - password: $2b$12$fR4sp7tIG.dbeusbr695MOw/xvN1sf.21rML7t7j9pCdIVREIocUO -pre-authorized: - emails: - - test@test.com From f9f1f6fd2d0fb367ce763508e20b0d7e9143693b Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Sat, 20 Apr 2024 13:27:05 -0400 Subject: [PATCH 058/147] =?UTF-8?q?[kb]=20=F0=9F=99=88=20Add=20streamlit?= =?UTF-8?q?=20credentials?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 67bb6de..55b396b 100644 --- a/.gitignore +++ b/.gitignore @@ -163,4 +163,7 @@ cython_debug/ .ruff_cache # Dossier sauvegarde Thibaut -TG_sauv \ No newline at end of file +TG_sauv + +# Streamlit: credentials +dashboards/app/.credentials.yml From a36f44dccc454b4468537b63d018b5ace7c620ae Mon Sep 17 00:00:00 2001 From: Floriane Duccini Date: Fri, 19 Apr 2024 14:05:27 +0200 Subject: [PATCH 059/147] first draft onglet structure --- .gitignore | 2 + dashboards/app/home.py | 110 +++++++++++++++++- dashboards/app/pages/structures.py | 174 +++++++++++++++++++++++------ dashboards/app/requirements.txt | 1 + 4 files changed, 253 insertions(+), 34 deletions(-) diff --git a/.gitignore b/.gitignore index b8fb0eb..624c6e0 100644 --- a/.gitignore +++ b/.gitignore @@ -131,6 +131,8 @@ venv/ ENV/ env.bak/ venv.bak/ +.venv* +venv* # Spyder project settings .spyderproject diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 3fd4b7b..d42cf40 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -1,3 +1,4 @@ +import pandas as pd import streamlit as st st.markdown( @@ -8,4 +9,111 @@ ) st.markdown("""# À propos""") -st.image("media/ZDS-logo.png") + + +# Chargement des données et filtre géographique à l'arrivée sur le dashboard +# Table des volumes par matériaux +@st.cache_data +def load_df_other() -> pd.DataFrame: + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv", + ) + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE + # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] + return df + + +# Table des structures +@st.cache_data +def load_structures() -> pd.DataFrame: + return pd.read_excel( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/export_structures_29022024%20(1).xlsx", + index_col=0, + ) + + +# Appel des fonctions pour charger les données + +df_other = load_df_other() +df_structures = load_structures() + + +# Création du filtre par niveau géographique : correspondance labels et variables du dataframe +niveaux_admin_dict = { + "Région": "REGION", + "Département": "DEP_CODE_NOM", + "EPCI": "LIBEPCI", + "Commune": "COMMUNE_CODE_NOM", +} + +# 1ère étape : sélection du niveau administratif concerné (région, dép...) +# Si déjà saisi précédemment, initialiser le filtre avec les valeurs entrées précédemment +# Récupérer les index pour conserver la valeur des filtres au changement de pages +# Filtre niveau administratif +niveau_admin = st.session_state.get("niveau_admin", None) +index_admin = st.session_state.get("index_admin", None) +# Filtre collectivité +collectivite = st.session_state.get("collectivite", None) +index_collec = st.session_state.get("index_collec", None) + +# Initialiser la selectbox avec l'index récupéré +select_niveauadmin = st.selectbox( + "Niveau administratif : ", + niveaux_admin_dict.keys(), + index=index_admin, +) + +if select_niveauadmin is not None: + # Filtrer la liste des collectivités en fonction du niveau admin + liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] + liste_collectivites = liste_collectivites.sort_values().unique() + + # 2ème filtre : sélection de la collectivité concernée + select_collectivite = st.selectbox( + "Collectivité : ", + liste_collectivites, + index=index_collec, + ) + + +if st.button("Enregistrer la sélection"): + # Enregistrer les valeurs sélectionnées dans le session.state + st.session_state["niveau_admin"] = select_niveauadmin + st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( + select_niveauadmin, + ) + + st.session_state["collectivite"] = select_collectivite + st.session_state["index_collec"] = list(liste_collectivites).index( + select_collectivite, + ) + + # Afficher la collectivité sélectionnée + st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") + + # Filtrer et enregistrer le DataFrame dans un session state pour la suite + colonne_filtre = niveaux_admin_dict[select_niveauadmin] + df_other_filtre = df_other[df_other[colonne_filtre] == select_collectivite] + st.session_state["df_other_filtre"] = df_other_filtre + + # Filtrer dataframe structures et enregistrer dans le session.state + st.session_state["structures"] = df_structures + + # Filtrer et enregistrer le dataframe nb_dechets dans session.State + # Récuperer la liste des relevés + id_releves = df_other_filtre["ID_RELEVE"].unique() + # Filtrer df_nb_dechets sur la liste des relevés + # st.session_state["df_nb_dechets_filtre"] = df_nb_dechets[ + + # Afficher le nombre de relevés disponibles + nb_releves = len(st.session_state["df_other_filtre"]) + st.write( + f"{nb_releves} relevés de collecte sont disponibles \ + pour l'analyse sur votre territoire.", + ) diff --git a/dashboards/app/pages/structures.py b/dashboards/app/pages/structures.py index b5ae554..3e160ae 100644 --- a/dashboards/app/pages/structures.py +++ b/dashboards/app/pages/structures.py @@ -2,6 +2,13 @@ import altair as alt import duckdb import pandas as pd +import plotly.express as px + + +# Configuration de la page +st.set_page_config( + layout="wide", page_title="Dashboard Zéro Déchet Sauvage : onglet Structures" +) st.markdown( """# 🔭 Structures @@ -9,41 +16,142 @@ """ ) -df_nb_dechet = pd.read_csv( - ( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_releve_nb_dechet.csv" + +# Récupérer les filtres géographiques s'ils ont été fixés +filtre_niveau = st.session_state.get("niveau_admin", "") +filtre_collectivite = st.session_state.get("collectivite", "") + + +# Appeler les dataframes filtrés depuis le session state +if "structures" not in st.session_state: + st.write( + """ + ### :warning: Merci de sélectionner une collectivité\ + dans l'onglet Home pour afficher les données. :warning: + """ ) -) + st.stop() +else: + df_structures = st.session_state["structures"] + +# # df_nb_dechet = pd.read_csv( +# # ( +# # "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" +# # "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" +# # "sation/data/data_releve_nb_dechet.csv" +# # ) +# # ) + +# # df_other = pd.read_csv( +# # ( +# # "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" +# # "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" +# # "sation/data/data_zds_enriched.csv" +# # ) +# # ) + +# # res_aggCategory_filGroup = duckdb.query( +# # ( +# # "SELECT categorie, sum(nb_dechet) AS total_dechet " +# # "FROM df_nb_dechet " +# # "WHERE type_regroupement = 'GROUPE' " +# # "GROUP BY categorie " +# # "HAVING sum(nb_dechet) > 10000 " +# # "ORDER BY total_dechet DESC;" +# # ) +# # ).to_df() + +# # st.bar_chart(data=res_aggCategory_filGroup, x="categorie", y="total_dechet") + +# st.altair_chart( +# alt.Chart(res_aggCategory_filGroup) +# .mark_bar() +# .encode( +# x=alt.X("categorie", sort=None, title=""), +# y=alt.Y("total_dechet", title="Total de déchet"), +# ), +# use_container_width=True, +# ) + +if filtre_niveau == "" and filtre_collectivite == "": + st.write("Aucune sélection de territoire n'a été effectuée") +else: + st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") + + +# Ligne 1 : 2 cellules avec les indicateurs clés en haut de page +l1_col1, l1_col2 = st.columns(2) + +# Pour avoir une bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) -df_other = pd.read_csv( - ( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv" +# 1ère métrique : nombre d'acteurs +cell1 = l1_col1.container(border=True) +nb_acteurs = len(df_structures) +# Trick pour séparer les milliers +cell1.metric("Acteurs présents sur le territoire", nb_acteurs) + +# 2ème métrique : nb de spots adoptés +cell2 = l1_col2.container(border=True) +nb_spots_adoptes = df_structures["A1S_NB_SPOTS_ADOPTES"].sum() +cell2.metric("Spots adoptés", nb_spots_adoptes) + + +# Ligne 2 : 2 graphiques en ligne : carte et pie chart type de structures + +# l2_col1, l2_col2 = st.columns(2) +# cell4 = l2_col1.container(border=True) +# cell5 = l2_col2.container(border=True) + +with st.container(): + + df_aggType = duckdb.query( + ( + "SELECT TYPE, count(TYPE) AS nb_structures " + "FROM df_structures " + "GROUP BY TYPE " + "ORDER BY nb_structures DESC;" + ) + ).to_df() + + # Création du diagramme en donut en utilisant le dictionnaire de couleurs pour la correspondance + fig = px.pie( + df_aggType, + values="nb_structures", + names="TYPE", + title="Répartition des types de structures", + hole=0.4, + color="TYPE", ) -) -res_aggCategory_filGroup = duckdb.query( - ( - "SELECT categorie, sum(nb_dechet) AS total_dechet " - "FROM df_nb_dechet " - "WHERE type_regroupement = 'GROUPE' " - "GROUP BY categorie " - "HAVING sum(nb_dechet) > 10000 " - "ORDER BY total_dechet DESC;" + # Amélioration de l'affichage + # Change the percent format to round to integer + fig.update_traces( + textinfo="percent", + texttemplate="%{percent:.0%}", + textfont_size=16, ) -).to_df() - -# st.bar_chart(data=res_aggCategory_filGroup, x="categorie", y="total_dechet") - -st.altair_chart( - alt.Chart(res_aggCategory_filGroup) - .mark_bar() - .encode( - x=alt.X("categorie", sort=None, title=""), - y=alt.Y("total_dechet", title="Total de déchet"), - ), - use_container_width=True, -) + fig.update_layout( + autosize=True, + legend_title_text="Type de structure", + ) + + # Affichage du graphique + st.plotly_chart(fig, use_container_width=True) + +# Cartographie des structures +with st.container(): + st.markdown(""" **Cartographie des structures du territoire**""") + + +# Affichage du dataframe +with st.container(): + st.markdown(""" **Structures du territoire**""") + df_struct_simplifie = duckdb.query( + ( + "SELECT NOM as Nom, TYPE, ACTION_RAMASSAGE AS 'Nombre de collectes', A1S_NB_SPOTS_ADOPTES as 'Nombre de spots adoptés' " + "FROM df_structures " + "ORDER BY Nom DESC;" + ) + ).to_df() + + st.dataframe(df_struct_simplifie, hide_index=True) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index e75f8b9..134d136 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -5,3 +5,4 @@ duckdb==0.10.0 streamlit==1.32.2 streamlit-folium==0.19.1 plotly==5.19.0 +openpyxl==3.1.2 \ No newline at end of file From 463ee7639463d2e807a47becd17668d4925bde8e Mon Sep 17 00:00:00 2001 From: linh dinh Date: Sun, 21 Apr 2024 19:17:12 +0200 Subject: [PATCH 060/147] =?UTF-8?q?Ajoute=20des=20graphs=20et=20les=20r?= =?UTF-8?q?=C3=A9organiser=20en=20diff=C3=A9rents=20tabs?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/hotspots.py | 588 +++++++++++++++++++++---------- 1 file changed, 397 insertions(+), 191 deletions(-) diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index fbe23f5..6319f98 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -11,6 +11,8 @@ import requests import plotly.express as px +# import for line chart +import plotly.graph_objects as go # To show folium maps on streamlit import folium @@ -23,12 +25,7 @@ st.set_page_config( page_title="Hotspots", layout="wide", - initial_sidebar_state="expanded", - menu_items={ - 'Get Help': 'https://www.extremelycoolapp.com/help', - 'Report a bug': "https://www.extremelycoolapp.com/bug", - 'About': "# This is a header. This is an *extremely* cool app!" - } + initial_sidebar_state="expanded" ) ###################################### @@ -84,6 +81,23 @@ } ] +# Params for the density graph filters +DENSITY_FILTERS_PARAMS = [ + { + "filter_col": "REGION", + "filter_message": "Sélectionnez une région (par défaut votre région) :" + }, + { + "filter_col": "TYPE_MILIEU", "filter_message": "Sélectionnez un milieu :" + }, + { + "filter_col": "TYPE_LIEU2", "filter_message": "Sélectionnez un lieu :" + }, + { + "filter_col":"ANNEE", "filter_message": "Sélectionnez une année :" + } +] + ########################################################################### # 0 bis/ Fonctions utilitaires : peuvent être utilisées par tout le monde # ########################################################################### @@ -102,12 +116,20 @@ def construct_query_string(bound_word=" and ", **params) -> str: for param_key, param in params.items(): # Construct the param sub string if the param is not 'None' if param is not None: + # Check if the parameter value is of type int if isinstance(param, int): # If it's an integer, use integer comparison query_sub_string = f'{param_key} == {param}' + + # Check if the parameter value is a list. + elif isinstance(param, list): + # Handle list of values for multiselect queries. + param_values = ', '.join([f'"{value}"' for value in param]) # Prepare string of values enclosed in quotes. + query_sub_string = f'{param_key} in [{param_values}]' # Use 'in' operator for lists. + else: - # If it's not an integer, treat it as a string + # Create a query sub-string for other data types query_sub_string = f'{param_key} == "{param}"' # Add to the query string @@ -117,8 +139,10 @@ def construct_query_string(bound_word=" and ", **params) -> str: return query_string.strip(bound_word) -def scalable_filters( - data_zds: pd.DataFrame, filters_params=ADOPTED_SPOTS_FILTERS_PARAMS +def scalable_filters_single_select( + data_zds: pd.DataFrame, + filters_params=ADOPTED_SPOTS_FILTERS_PARAMS, + base_key="default_key" ) -> dict: """Create streamlit select box filters as specified by the filters_params list. Create the filter dict used to filter the hotspots maps accordingly.""" @@ -134,11 +158,14 @@ def scalable_filters( # Set the filter column and the filter message column, message = filter_params["filter_col"], filter_params["filter_message"] - # Sort the unique values of the column in ascending order + # Sort the unique values of the column in ascending order sorted_values = sorted(data_zds[column].unique(), reverse=True) - # Create the streamlit select box with sorted values - s = columns[i].selectbox(message, sorted_values) + # Create unique values and sort them in descending order + unique_key = f"{base_key}_{column}_{i}" + + # Create the Streamlit select box with sorted values and a unique key + s = columns[i].selectbox(message, sorted_values, key=unique_key) # Show the select box on screen columns[i].write(s) @@ -148,6 +175,40 @@ def scalable_filters( return filter_dict +def scalable_filters_multi_select( + data_zds: pd.DataFrame, + filters_params=DENSITY_FILTERS_PARAMS, + base_key="default_key" +) -> dict: + """Create streamlit select box filters as specified by the filters_params list. + Create the filter dict used to filter the hotspots maps accordingly.""" + + # Instanciate the empty filter dict + filter_dict = dict() + + # Create as many columns as the lenght of the filters_params list + columns = st.columns(len(filters_params)) + + # Iterate over filters_params + for i, filter_params in enumerate(filters_params): + # Set the filter column and the filter message + column, message = filter_params["filter_col"], filter_params["filter_message"] + + # Get unique values, convert to string and sort them + sorted_values = sorted(data_zds[column].dropna().astype(str).unique(), reverse=True) + + # Generate a unique key for each multiselect widget + unique_key = f"{base_key}_{column}_{i}" + + # Create the Streamlit multiselect with sorted values and a unique key + selected_values = columns[i].multiselect(message, sorted_values, + default=sorted_values[0] if sorted_values else [], + key=unique_key) + + # Fill the filter dict with the selected values + filter_dict[column] = selected_values + + return filter_dict ################## # 1/ Import data # @@ -166,12 +227,12 @@ def scalable_filters( #spot = pd.read_excel(DATA_SPOT) # correction : corrected data for density map -#correction = pd.read_excel(CORRECTION) +correction = pd.read_excel(CORRECTION) # Fusion and correction -#data_correct = pd.merge(data_zds, correction, on='ID_RELEVE', how='left') -#data_correct = data_correct[data_correct['SURFACE_OK'] == 'OUI'] -#data_zds = data_correct[data_correct['VOLUME_TOTAL'] > 0] +data_correct = pd.merge(data_zds, correction, on='ID_RELEVE', how='left') +data_correct = data_correct[data_correct['SURFACE_OK'] == 'OUI'] +data_zds = data_correct[data_correct['VOLUME_TOTAL'] > 0] ################## # 2/ Hotspot tab # @@ -180,159 +241,41 @@ def scalable_filters( # Tab title st.markdown("""# 🔥 Hotspots : **Quelles sont les zones les plus impactées ?**""") -################################ -# 2.1/ Carte des spots adoptés # -################################ - -# Create the filter dict for the adopted spots map and the streamlit filter boxes -filter_dict = scalable_filters(data_zds) - -# Create the map of the adopted spots -def plot_adopted_waste_spots( - data_zds: pd.DataFrame, - filter_dict: dict, - region_geojson_path: str, -) -> folium.Map: - """Show a folium innteractive map of adopted spots within a selected region, - filtered by environments of deposit. - Arguments: - - data_zds: The waste dataframe - - filter_dict: dictionary mapping the name of the column in the waste df and the value you want to filter by - """ - print("Filter Dictionary:", filter_dict) # Check the filter dictionary - - # 1/ Create the waste geodataframe # - # Create a GeoDataFrame for waste points - gdf = gpd.GeoDataFrame( - data_zds, - geometry=gpd.points_from_xy( - data_zds["LIEU_COORD_GPS_X"], data_zds["LIEU_COORD_GPS_Y"] - ), - crs="EPSG:4326", - ) - - # Convert ANNEE values to integers - if "ANNEE" in filter_dict: - filter_dict["ANNEE"] = int(filter_dict["ANNEE"]) - - # Construct the query string - query_string = construct_query_string(**filter_dict) - print("Query String:", query_string) # Check the constructed query string - - # Filter the geodataframe by region and by environment - gdf_filtered = gdf.query(query_string) - - # 2/ Create the regions geodataframe # - # Unpack the region name - region = filter_dict["REGION"] - - # Load France regions from a GeoJSON file - regions = gpd.read_file(region_geojson_path) - regions = regions.loc[regions["nom"] == region, :] - - # Filter the region geodataframe for the specified region - selected_region = regions[regions["nom"].str.lower() == region.lower()] - if selected_region.empty: - raise KeyError(f"Region '{region}' not found.") - - # 3/ Initialize folium map # - # Initialize a folium map, centered around the mean location of the waste points - map_center = [gdf_filtered.geometry.y.mean(), gdf_filtered.geometry.x.mean()] - - # Catch ValueError if the filtered geodataframe contain no rows - try: - m = folium.Map( - location=map_center, zoom_start=5 - ) # Adjust zoom_start as needed for the best initial view - - # Return None if ValueError - except ValueError as e: - st.markdown( - "Il n'y a pas de hotspots pour les valeurs de filtres selectionnés !" - ) - return - - # 4/ Add the markers # - # Use MarkerCluster to manage markers if dealing with a large number of points - marker_cluster = MarkerCluster().add_to(m) - - # Add each waste point as a marker on the folium map - for _, row in gdf_filtered.iterrows(): - # Define the marker color: green for adopted spots, red for others - marker_color = "darkgreen" if row["SPOT_A1S"] else "red" - # Define the icon: check-circle for adopted, info-sign for others - icon_type = "check-circle" if row["SPOT_A1S"] else "info-sign" - - folium.Marker( - location=[row.geometry.y, row.geometry.x], - popup=f"Zone: {row['NOM_ZONE']}
Date: {row['DATE']}
Volume: {row['VOLUME_TOTAL']} litres", - icon=folium.Icon(color=marker_color, icon=icon_type, prefix="fa"), - ).add_to(marker_cluster) - - # 5/ Add the region boundary # - # Add the region boundary to the map for context - folium.GeoJson( - selected_region, - name="Region Boundary", - style_function=lambda feature: { - "weight": 2, - "fillOpacity": 0.1, - }, - ).add_to(m) - - return m - +######################################################## +# 2.1/ Carte densité de déchets sur les zones étudiées # +######################################################## +# à faire! #################################################################################### -# 2.1/ Tableaux de la densité par milieu et lieu de déchets sur les zones étudiées # +# 2.2/ Tableaux de la densité par milieu et lieu de déchets sur les zones étudiées # #################################################################################### -def data_density_lieu_preparation(data): - - # Calculate waste volume sum for each 'LIEU' - volume_total_lieu = data.groupby('TYPE_LIEU2')['VOLUME_TOTAL'].sum().reset_index() - - # Remove duplicate data and calculate SURFACE total - data_unique = data.drop_duplicates(subset=['LIEU_COORD_GPS']) - surface_total_lieu = data_unique.groupby('TYPE_LIEU2')['SURFACE'].sum().reset_index() - - # Merge volume and surface data for 'LIEU', calculate density, and sort - data_lieu = pd.merge(volume_total_lieu, surface_total_lieu, on='TYPE_LIEU2') - data_lieu['DENSITE_LIEU'] = (data_lieu['VOLUME_TOTAL'] / data_lieu['SURFACE']).round(5) - data_lieu_sorted = data_lieu.sort_values(by="DENSITE_LIEU", ascending=False) - - return data_lieu_sorted - -def data_density_milieu_preparation(data): - # Calculate waste volume sum for each 'MILIEU' - volume_total_milieu = data.groupby('TYPE_MILIEU')['VOLUME_TOTAL'].sum().reset_index() - - # Remove duplicate data and calculate SURFACE total - data_unique = data.drop_duplicates(subset=['LIEU_COORD_GPS']) - surface_total_milieu = data_unique.groupby('TYPE_MILIEU')['SURFACE'].sum().reset_index() - # Merge volume and surface data for 'MILIEU', calculate density, and sort - data_milieu = pd.merge(volume_total_milieu, surface_total_milieu, on='TYPE_MILIEU') - data_milieu['DENSITE_MILIEU'] = (data_milieu['VOLUME_TOTAL'] / data_milieu['SURFACE']).round(5) - data_milieu_sorted = data_milieu.sort_values(by="DENSITE_MILIEU", ascending=False) - - return data_milieu_sorted - -def density_lieu(data_zds: pd.DataFrame, filter_dict: dict): +def density_lieu(data_zds: pd.DataFrame, multi_filter_dict: dict): """ Calculate and display the density of waste by type of location ('LIEU') for a selected region. """ + # Get the selected region from filter_dict - selected_region = filter_dict.get("REGION", None) + selected_regions = multi_filter_dict.get("REGION", []) - if selected_region is not None: + if selected_regions is not None: # Filter data for selected region - data_selected_region = data_zds[data_zds['LIEU_REGION'] == selected_region] + data_selected_region = data_zds[data_zds['LIEU_REGION'].isin(selected_regions)] - # Apply data preparation function - data_lieu_sorted = data_density_lieu_preparation(data_selected_region) + # Calculate waste volume sum for each 'LIEU' + volume_total_lieu = data_selected_region.groupby('TYPE_LIEU2')['VOLUME_TOTAL'].sum().reset_index() + + # Remove duplicate data and calculate SURFACE total + data_unique = data_selected_region.drop_duplicates(subset=['LIEU_COORD_GPS']) + surface_total_lieu = data_unique.groupby('TYPE_LIEU2')['SURFACE'].sum().reset_index() + + # Merge volume and surface data for 'LIEU', calculate density, and sort + data_lieu = pd.merge(volume_total_lieu, surface_total_lieu, on='TYPE_LIEU2') + data_lieu['DENSITE_LIEU'] = (data_lieu['VOLUME_TOTAL'] / data_lieu['SURFACE']).round(5) + data_lieu_sorted = data_lieu.sort_values(by="DENSITE_LIEU", ascending=False) # Display sorted DataFrame with specific configuration for 'data_lieu_sorted' lieu = st.markdown('##### Densité des déchets par type de lieu (L/m2)') @@ -355,20 +298,28 @@ def density_lieu(data_zds: pd.DataFrame, filter_dict: dict): return lieu - -def density_milieu(data_zds: pd.DataFrame, filter_dict: dict): +def density_milieu(data_zds: pd.DataFrame, multi_filter_dict: dict): """ Calculate and display the density of waste by type of location ('MILIEU') for a selected region. """ # Get the selected region from filter_dict - selected_region = filter_dict.get("REGION", None) + selected_regions = multi_filter_dict.get("REGION", []) - if selected_region is not None: + if selected_regions is not None: # Filter data for selected region - data_selected_region = data_zds[data_zds['LIEU_REGION'] == selected_region] + data_selected_region = data_zds[data_zds['LIEU_REGION'].isin(selected_regions)] + + # Calculate waste volume sum for each 'MILIEU' + volume_total_milieu = data_selected_region.groupby('TYPE_MILIEU')['VOLUME_TOTAL'].sum().reset_index() - # Apply data preparation function - data_milieu_sorted = data_density_milieu_preparation(data_selected_region) + # Remove duplicate data and calculate SURFACE total + data_unique = data_selected_region.drop_duplicates(subset=['LIEU_COORD_GPS']) + surface_total_milieu = data_unique.groupby('TYPE_MILIEU')['SURFACE'].sum().reset_index() + + # Merge volume and surface data for 'MILIEU', calculate density, and sort + data_milieu = pd.merge(volume_total_milieu, surface_total_milieu, on='TYPE_MILIEU') + data_milieu['DENSITE_MILIEU'] = (data_milieu['VOLUME_TOTAL'] / data_milieu['SURFACE']).round(5) + data_milieu_sorted = data_milieu.sort_values(by="DENSITE_MILIEU", ascending=False) # Display sorted DataFrame with specific configuration for 'data_milieu_sorted' milieu = st.markdown('##### Densité des déchets par type de milieu (L/m2)') @@ -391,13 +342,6 @@ def density_milieu(data_zds: pd.DataFrame, filter_dict: dict): return milieu -######################################################## -# 2.2/ Carte densité de déchets sur les zones étudiées # -######################################################## - - - - ###################################################### # 2.3/ Carte choropleth densité de déchets en France # ###################################################### @@ -406,15 +350,26 @@ def make_density_choropleth(data_zds, region_geojson_path): # Load all regions from the GeoJSON file regions_geojson = requests.get(region_geojson_path).json() + # Extract region names from GeoJSON for later comparison + regions_from_geojson = [feature['properties']['nom'] for feature in regions_geojson['features']] + + # Create a DataFrame from the GeoJSON region names + regions_df = pd.DataFrame(regions_from_geojson, columns=['nom']) + # Data preparation # Calculate the total VOLUME_TOTAL for each region without removing duplicate data volume_total_sums = data_zds.groupby('LIEU_REGION')['VOLUME_TOTAL'].sum().reset_index() # Merge the waste data and the geographical data - volume_total_sums = pd.merge(regions, volume_total_sums, left_on='nom', right_on='LIEU_REGION', how='left') + volume_total_sums = pd.merge(regions_df, volume_total_sums, left_on='nom', right_on='LIEU_REGION', how='left') + + # Identify regions with no available data + regions_no_data = volume_total_sums[volume_total_sums['VOLUME_TOTAL'].isna()]['nom'].tolist() + if regions_no_data: + st.info(f"Aucune donnée disponible pour les régions suivantes : {', '.join(regions_no_data)}", icon="⚠️") - # Remove rows containing NaN - volume_total_sums = volume_total_sums.dropna() + # Drop rows containing NaN to avoid errors in the choropleth + volume_total_sums.dropna(inplace=True) # Remove duplicate data and calculate SURFACE total data_unique = data_zds.drop_duplicates(subset=['LIEU_COORD_GPS']) @@ -449,37 +404,288 @@ def make_density_choropleth(data_zds, region_geojson_path): margin=dict(l=0, r=0, t=0, b=0) ) - return choropleth + +############################################################ +# 2.1/ Line chart de l'évolution de la densité des déchets # +# par lieu et par milieu au fil des années spots adoptés # +############################################################ + +def line_chart_lieu(data_zds: pd.DataFrame, multi_filter_dict: dict): + # Get the selected region and milieu from the filter dictionary + selected_regions = multi_filter_dict.get("REGION", []) + selected_lieu = multi_filter_dict.get("TYPE_LIEU2", []) + + # Ensure that at least one region is selected + if not selected_regions: + st.error("Aucune région sélectionnée. Veuillez préciser une région.") + return + + # Filter data for the selected region + data_selected_region = data_zds[data_zds['LIEU_REGION'].isin(selected_regions)] + if data_selected_region.empty: + st.warning(f"Aucune donnée disponible pour la région sélectionnée : {selected_regions}") + return + + # Further filter data for the selected milieus + data_selected_lieu = data_selected_region[data_selected_region['TYPE_LIEU2'].isin(selected_lieu)] if selected_lieu else data_selected_region + + # Check if there is any data left after filtering by milieu + if data_selected_lieu.empty: + st.warning("Aucune donnée disponible pour le lieu sélectionné.") + return + + # Calculate waste volume sum for each 'LIEU' by 'ANNEE' + volume_total_annee = data_selected_lieu.groupby(['TYPE_LIEU2', 'ANNEE'])['VOLUME_TOTAL'].sum().reset_index() + + # Remove duplicate data and calculate SURFACE total + data_unique = data_selected_lieu.drop_duplicates(subset=['LIEU_COORD_GPS']) + surface_total_annee = data_unique.groupby(['TYPE_LIEU2', 'ANNEE'])['SURFACE'].sum().reset_index() + + # Merge volume and surface data for 'MILIEU', calculate density, and sort + data_lieu = pd.merge(volume_total_annee, surface_total_annee, on=['TYPE_LIEU2', 'ANNEE']) + if data_lieu.empty: + st.warning("Aucune donnée superposée pour les calculs de volume et de surface pour les lieux sélectionnés.") + return + + data_lieu['DENSITE_LIEU'] = (data_lieu['VOLUME_TOTAL'] / data_lieu['SURFACE']).round(5) + data_lieu_sorted = data_lieu.sort_values(by='ANNEE', ascending=False) + + # Create the plot + fig = go.Figure() + for type_lieu in data_lieu_sorted['TYPE_LIEU2'].unique(): + df_plot = data_lieu_sorted[data_lieu_sorted['TYPE_LIEU2'] == type_lieu] + fig.add_trace(go.Scatter(x=df_plot["ANNEE"], y=df_plot['DENSITE_LIEU'], mode='lines+markers', name=type_lieu)) + + # Update plot layout + fig.update_layout( + title=f"Densité des déchets par type de lieu", + xaxis_title="Année", + yaxis_title="Densité L/m2", + legend_title="Type de lieu" + ) + + st.plotly_chart(fig) + +def line_chart_milieu(data_zds: pd.DataFrame, multi_filter_dict: dict): + # Get the selected region and milieu from the filter dictionary + selected_regions = multi_filter_dict.get("REGION", []) + selected_milieu = multi_filter_dict.get("TYPE_MILIEU", []) + + # Ensure that at least one region is selected + if not selected_regions: + st.error("Aucune région sélectionnée. Veuillez préciser une région.") + return + + # Filter data for the selected region + data_selected_region = data_zds[data_zds['LIEU_REGION'].isin(selected_regions)] + if data_selected_region.empty: + st.warning(f"Aucune donnée disponible pour la région sélectionnée : {selected_regions}") + return + + # Further filter data for the selected milieus + data_selected_milieu = data_selected_region[data_selected_region['TYPE_MILIEU'].isin(selected_milieu)] if selected_milieu else data_selected_region + + # Check if there is any data left after filtering by milieu + if data_selected_milieu.empty: + st.warning("Aucune donnée disponible pour le milieu sélectionné.") + return + + # Calculate waste volume sum for each 'MILIEU' by 'ANNEE' + volume_total_annee = data_selected_milieu.groupby(['TYPE_MILIEU', 'ANNEE'])['VOLUME_TOTAL'].sum().reset_index() + + # Remove duplicate data and calculate SURFACE total + data_unique = data_selected_milieu.drop_duplicates(subset=['LIEU_COORD_GPS']) + surface_total_annee = data_unique.groupby(['TYPE_MILIEU', 'ANNEE'])['SURFACE'].sum().reset_index() + + # Merge volume and surface data for 'MILIEU', calculate density, and sort + data_milieu = pd.merge(volume_total_annee, surface_total_annee, on=['TYPE_MILIEU', 'ANNEE']) + if data_milieu.empty: + st.warning("Aucune donnée superposée pour les calculs de volume et de surface pour les milieux sélectionnés.") + return + + data_milieu['DENSITE_MILIEU'] = (data_milieu['VOLUME_TOTAL'] / data_milieu['SURFACE']).round(5) + data_milieu_sorted = data_milieu.sort_values(by='ANNEE', ascending=False) + + # Create the plot + fig = go.Figure() + for type_milieu in data_milieu_sorted['TYPE_MILIEU'].unique(): + df_plot = data_milieu_sorted[data_milieu_sorted['TYPE_MILIEU'] == type_milieu] + fig.add_trace(go.Scatter(x=df_plot["ANNEE"], y=df_plot['DENSITE_MILIEU'], mode='lines+markers', name=type_milieu)) + + # Update plot layout + fig.update_layout( + title=f"Densité des déchets par type de milieu", + xaxis_title="Année", + yaxis_title="Densité L/m2", + legend_title="Type de milieu" + ) + + st.plotly_chart(fig) + + + +################################ +# 2.1/ Carte des spots adoptés # +################################ + +# Create the map of the adopted spots +def plot_adopted_waste_spots( + data_zds: pd.DataFrame, + single_filter_dict: dict, + region_geojson_path: str, +) -> folium.Map: + """Show a folium innteractive map of adopted spots within a selected region, + filtered by environments of deposit. + Arguments: + - data_zds: The waste dataframe + - filter_dict: dictionary mapping the name of the column in the waste df and the value you want to filter by + """ + print("Filter Dictionary:", single_filter_dict) # Check the filter dictionary + + # 1/ Create the waste geodataframe # + # Create a GeoDataFrame for waste points + gdf = gpd.GeoDataFrame( + data_zds, + geometry=gpd.points_from_xy( + data_zds["LIEU_COORD_GPS_X"], data_zds["LIEU_COORD_GPS_Y"] + ), + crs="EPSG:4326", + ) + + # Convert ANNEE values to integers + if "ANNEE" in single_filter_dict: + single_filter_dict["ANNEE"] = int(single_filter_dict["ANNEE"]) + + # Construct the query string + query_string = construct_query_string(**single_filter_dict) + print("Query String:", query_string) # Check the constructed query string + + # Filter the geodataframe by region and by environment + gdf_filtered = gdf.query(query_string) + + # 2/ Create the regions geodataframe # + # Unpack the region name + region = single_filter_dict["REGION"] + + # Load France regions from a GeoJSON file + regions = gpd.read_file(region_geojson_path) + regions = regions.loc[regions["nom"] == region, :] + + # Filter the region geodataframe for the specified region + selected_region = regions[regions["nom"].str.lower() == region.lower()] + if selected_region.empty: + raise KeyError(f"Region '{region}' not found.") + + # 3/ Initialize folium map # + # Initialize a folium map, centered around the mean location of the waste points + map_center = [gdf_filtered.geometry.y.mean(), gdf_filtered.geometry.x.mean()] + + # Catch ValueError if the filtered geodataframe contain no rows + try: + m = folium.Map( + location=map_center, zoom_start=5 + ) # Adjust zoom_start as needed for the best initial view + + # Return None if ValueError + except ValueError as e: + st.markdown( + "Il n'y a pas de hotspots pour les valeurs de filtres selectionnés !" + ) + return + + # 4/ Add the markers # + # Use MarkerCluster to manage markers if dealing with a large number of points + marker_cluster = MarkerCluster().add_to(m) + + # Add each waste point as a marker on the folium map + for _, row in gdf_filtered.iterrows(): + # Define the marker color: green for adopted spots, red for others + marker_color = "darkgreen" if row["SPOT_A1S"] else "red" + # Define the icon: check-circle for adopted, info-sign for others + icon_type = "check-circle" if row["SPOT_A1S"] else "info-sign" + + folium.Marker( + location=[row.geometry.y, row.geometry.x], + popup=f"Zone: {row['NOM_ZONE']}
Date: {row['DATE']}
Volume: {row['VOLUME_TOTAL']} litres", + icon=folium.Icon(color=marker_color, icon=icon_type, prefix="fa"), + ).add_to(marker_cluster) + + # 5/ Add the region boundary # + # Add the region boundary to the map for context + folium.GeoJson( + selected_region, + name="Region Boundary", + style_function=lambda feature: { + "weight": 2, + "fillOpacity": 0.1, + }, + ).add_to(m) + + return m + + + ######################## # Dashboard Main Panel # ######################## -st.markdown('### Spots Adoptés') -m = plot_adopted_waste_spots(data_zds, filter_dict, REGION_GEOJSON_PATH) -# Show the adopted spots map on the streamlit tab -if m: - folium_static(m) +tab1, tab2, tab3, tab4 = st.tabs(["Densité des déchets dans zone étudié", + "Évolution de la densité au fil du temps", + "Spots Adoptés", + "Aperçu à travers la France"]) + +with tab1: -col = st.columns((4, 4, 2), gap='medium') + # Select only the filters for 'REGION' and 'ANNEE' + selected_filters_1 = [f for f in DENSITY_FILTERS_PARAMS if f["filter_col"] in ["REGION", "ANNEE"]] -# Construct the map -with col[0]: + # Use the selected filters for multi-select + multi_filter_dict_1 = scalable_filters_multi_select(data_zds, selected_filters_1, tab1) - density_lieu(data_zds, filter_dict) + col = st.columns((4, 4, 2), gap='medium') -with col[1]: + # Construct the map + with col[0]: + density_lieu(data_zds, multi_filter_dict_1) - density_milieu(data_zds, filter_dict) + with col[1]: + density_milieu(data_zds, multi_filter_dict_1) -with col[2]: - with st.expander('Notice ℹ️', expanded=True): - st.write(''' - Explication des diffférences entre Lieu et Milieu + with col[2]: + with st.expander('Notice ℹ️', expanded=True): + st.write(''' + Explication des diffférences entre Lieu et Milieu ''') -st.markdown('### Densité des déchets en France') -choropleth = make_density_choropleth(data_zds, REGION_GEOJSON_PATH) -st.plotly_chart(choropleth, use_container_width=True) +with tab2: + # Select only the filters for 'REGION' and 'ANNEE' + selected_filters_2 = [f for f in DENSITY_FILTERS_PARAMS if f["filter_col"] in ["REGION", "TYPE_LIEU2", "TYPE_MILIEU"]] + + # Use the selected filters for multi-select + multi_filter_dict_2 = scalable_filters_multi_select(data_zds, selected_filters_2, tab2) + + col = st.columns((7, 7), gap='medium') + + with col[0]: + line_chart_lieu(data_zds, multi_filter_dict_2) + + with col[1]: + line_chart_milieu(data_zds, multi_filter_dict_2) + +with tab3: + # Use the selected filters + single_filter_dict_3 = scalable_filters_single_select(data_zds, ADOPTED_SPOTS_FILTERS_PARAMS, tab3) + + st.markdown('### Spots Adoptés') + m = plot_adopted_waste_spots(data_zds, single_filter_dict_3, REGION_GEOJSON_PATH) + # Show the adopted spots map on the streamlit tab + if m: + folium_static(m) + +with tab4: + st.markdown('### Densité des déchets en France') + choropleth = make_density_choropleth(data_zds, REGION_GEOJSON_PATH) + st.plotly_chart(choropleth, use_container_width=True) From 123f3d1d57a943034c341bbea95ae0e633fec2e4 Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Sun, 21 Apr 2024 14:52:28 -0400 Subject: [PATCH 061/147] =?UTF-8?q?[kb]=20=F0=9F=93=8C=20Add=20openpyxl?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index e75f8b9..731ea30 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -5,3 +5,4 @@ duckdb==0.10.0 streamlit==1.32.2 streamlit-folium==0.19.1 plotly==5.19.0 +openpyxl==3.1.2 From e8d8a57c92e8add9a0e6c31d6185fd6223f44708 Mon Sep 17 00:00:00 2001 From: DridrM Date: Sun, 21 Apr 2024 22:14:04 +0200 Subject: [PATCH 062/147] Update requirements.txt --- dashboards/app/home.py | 187 ++++++++++++- dashboards/app/pages/hotspots.py | 460 ++++++++++++++++++++----------- dashboards/app/requirements.txt | 3 + 3 files changed, 483 insertions(+), 167 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 3fd4b7b..00c5d07 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -1,11 +1,184 @@ +from pathlib import Path + +import pandas as pd import streamlit as st +import streamlit_authenticator as stauth +import yaml +from st_pages import Page, show_pages +from yaml.loader import SafeLoader + +# Configuration de la page +st.set_page_config( + layout="wide", + page_title="Dashboard Zéro Déchet Sauvage", + page_icon=":dolphin:", + menu_items={ + "About": "https://www.zero-dechet-sauvage.org/", + }, +) + +# load and apply CSS styles +def load_css(file_name: str) -> None: + with Path(file_name).open() as f: + st.markdown(f"", unsafe_allow_html=True) -st.markdown( - """ -# Bienvenue 👋 -#### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! -""", + +# Login +p_cred = Path(".credentials.yml") +with p_cred.open() as file: + config = yaml.load(file, Loader=SafeLoader) + +authenticator = stauth.Authenticate( + config["credentials"], + config["cookie"]["name"], + config["cookie"]["key"], + config["cookie"]["expiry_days"], + config["pre-authorized"], +) +authenticator.login( + fields={ + "Form name": "Connexion", + "Username": "Identifiant", + "Password": "Mot de passe", + "Login": "Connexion", + }, ) -st.markdown("""# À propos""") -st.image("media/ZDS-logo.png") + +if st.session_state["authentication_status"]: + show_pages( + [ + Page("home.py", "Accueil", "🏠"), + Page("pages/actions.py", "Actions", "👊"), + Page("pages/data.py", "Data", "🔍"), + Page("pages/hotspots.py", "Hotspots", "🔥"), + Page("pages/structures.py", "Structures", "🔭"), + ], + ) + + # Load and apply the CSS file at the start of your app + # local debug + load_css("style.css") + + st.markdown( + """ + # Bienvenue 👋 + #### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! + """, + ) + + st.markdown("""# À propos""") + + # Chargement des données et filtre géographique à l'arrivée sur le dashboard + # Table des volumes par matériaux + @st.cache_data + def load_df_other() -> pd.DataFrame: + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv", + ) + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE + # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] + return df + + # Table du nb de déchets + @st.cache_data + def load_df_nb_dechet() -> pd.DataFrame: + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_releve_nb_dechet.csv", + ) + + # Appel des fonctions pour charger les données + + df_other = load_df_other() + df_nb_dechets = load_df_nb_dechet() + + # Création du filtre par niveau géographique : correspondance labels et variables du df + niveaux_admin_dict = { + "Région": "REGION", + "Département": "DEP_CODE_NOM", + "EPCI": "LIBEPCI", + "Commune": "COMMUNE_CODE_NOM", + } + + # 1ère étape : sélection du niveau administratif concerné (région, dép...) + # Si déjà saisi précédemment, initialiser le filtre avec les valeurs entrées précédemment + # Récupérer les index pour conserver la valeur des filtres au changement de pages + # Filtre niveau administratif + niveau_admin = st.session_state.get("niveau_admin", None) + index_admin = st.session_state.get("index_admin", None) + # Filtre collectivité + collectivite = st.session_state.get("collectivite", None) + index_collec = st.session_state.get("index_collec", None) + + # Initialiser la selectbox avec l'index récupéré + select_niveauadmin = st.selectbox( + "Niveau administratif : ", + niveaux_admin_dict.keys(), + index=index_admin, + ) + + if select_niveauadmin is not None: + # Filtrer la liste des collectivités en fonction du niveau admin + liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] + liste_collectivites = liste_collectivites.sort_values().unique() + + # 2ème filtre : sélection de la collectivité concernée + select_collectivite = st.selectbox( + "Collectivité : ", + liste_collectivites, + index=index_collec, + ) + + if st.button("Enregistrer la sélection"): + # Enregistrer les valeurs sélectionnées dans le session.state + st.session_state["niveau_admin"] = select_niveauadmin + st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( + select_niveauadmin, + ) + + st.session_state["collectivite"] = select_collectivite + st.session_state["index_collec"] = list(liste_collectivites).index( + select_collectivite, + ) + + # Afficher la collectivité sélectionnée + st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") + + # Filtrer et enregistrer le DataFrame dans un session state pour la suite + colonne_filtre = niveaux_admin_dict[select_niveauadmin] + df_other_filtre = df_other[df_other[colonne_filtre] == select_collectivite] + st.session_state["df_other_filtre"] = df_other_filtre + + # Filtrer et enregistrer le dataframe nb_dechets dans session.State + # Récuperer la liste des relevés + id_releves = df_other_filtre["ID_RELEVE"].unique() + # Filtrer df_nb_dechets sur la liste des relevés + st.session_state["df_nb_dechets_filtre"] = df_nb_dechets[ + df_nb_dechets["ID_RELEVE"].isin(id_releves) + ] + + # Afficher le nombre de relevés disponibles + nb_releves = len(st.session_state["df_other_filtre"]) + st.write( + f"{nb_releves} relevés de collecte sont disponibles \ + pour l'analyse sur votre territoire.", + ) + + authenticator.logout() +elif st.session_state["authentication_status"] is False: + st.error("Mauvais identifiants ou mot de passe.") +elif st.session_state["authentication_status"] is None: + st.warning("Veuillez entrer votre identifiant et mot de passe") + + show_pages( + [ + Page("home.py", "Home", "🏠 "), + Page("pages/register.py", "S'enregistrer", "🚀"), + ], + ) diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index 6319f98..f0a9d10 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -19,18 +19,10 @@ from folium.plugins import MarkerCluster from streamlit_folium import folium_static, st_folium -###################### -# Page configuration # -###################### -st.set_page_config( - page_title="Hotspots", - layout="wide", - initial_sidebar_state="expanded" - ) -###################################### -# 0/ Parameters for the hotspots tab # -###################################### +################################### +# Parameters for the hotspots tab # +################################### # Data path for the df_nb_dechets NB_DECHETS_PATH = ( @@ -71,33 +63,65 @@ ADOPTED_SPOTS_FILTERS_PARAMS = [ { "filter_col": "REGION", - "filter_message": "Sélectionnez une région (par défaut votre région) :" - }, - { - "filter_col": "TYPE_MILIEU", "filter_message": "Sélectionnez un milieu :" - }, - { - "filter_col":"ANNEE", "filter_message": "Sélectionnez une année :" - } + "filter_message": "Sélectionnez une région (par défaut votre région) :", + }, + {"filter_col": "TYPE_MILIEU", "filter_message": "Sélectionnez un milieu :"}, + {"filter_col": "ANNEE", "filter_message": "Sélectionnez une année :"}, ] # Params for the density graph filters DENSITY_FILTERS_PARAMS = [ { "filter_col": "REGION", - "filter_message": "Sélectionnez une région (par défaut votre région) :" - }, - { - "filter_col": "TYPE_MILIEU", "filter_message": "Sélectionnez un milieu :" - }, - { - "filter_col": "TYPE_LIEU2", "filter_message": "Sélectionnez un lieu :" - }, - { - "filter_col":"ANNEE", "filter_message": "Sélectionnez une année :" - } + "filter_message": "Sélectionnez une région (par défaut votre région) :", + }, + {"filter_col": "TYPE_MILIEU", "filter_message": "Sélectionnez un milieu :"}, + {"filter_col": "TYPE_LIEU2", "filter_message": "Sélectionnez un lieu :"}, + {"filter_col": "ANNEE", "filter_message": "Sélectionnez une année :"}, ] + +######################### +# 0/ Page configuration # +######################### + +# Session state +session_state = st.session_state + +# Récupérer les filtres géographiques s'ils ont été fixés +filtre_niveau = st.session_state.get("niveau_admin", "") +filtre_collectivite = st.session_state.get("collectivite", "") + +# Set the streamlit page config +st.set_page_config( + page_title="Hotspots", layout="wide", initial_sidebar_state="expanded" +) + +# Execute code page if the authentication was complete +if st.session_state["authentication_status"]: + # Check if the filtre for "niveau administratif" and for the "collectivité" were selected in the home tab + if filtre_niveau == "" and filtre_collectivite == "": + st.write("Aucune sélection de territoire n'a été effectuée") + + else: + st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") + + # Call the dataframes data_zds and nb_dechets filtered from the session state + if ("df_other_filtre" not in st.session_state) or ( + "df_nb_dechets_filtre" not in st.session_state + ): + st.write( + """ + ### :warning: Merci de sélectionner une collectivité\ + dans l'onglet Home pour afficher les données. :warning: + """ + ) + st.stop() + else: + data_zds = st.session_state["df_other_filtre"].copy() + df_nb_dechet = st.session_state["df_nb_dechets_filtre"].copy() + + ########################################################################### # 0 bis/ Fonctions utilitaires : peuvent être utilisées par tout le monde # ########################################################################### @@ -120,13 +144,17 @@ def construct_query_string(bound_word=" and ", **params) -> str: # Check if the parameter value is of type int if isinstance(param, int): # If it's an integer, use integer comparison - query_sub_string = f'{param_key} == {param}' + query_sub_string = f"{param_key} == {param}" # Check if the parameter value is a list. elif isinstance(param, list): # Handle list of values for multiselect queries. - param_values = ', '.join([f'"{value}"' for value in param]) # Prepare string of values enclosed in quotes. - query_sub_string = f'{param_key} in [{param_values}]' # Use 'in' operator for lists. + param_values = ", ".join( + [f'"{value}"' for value in param] + ) # Prepare string of values enclosed in quotes. + query_sub_string = ( + f"{param_key} in [{param_values}]" # Use 'in' operator for lists. + ) else: # Create a query sub-string for other data types @@ -142,7 +170,7 @@ def construct_query_string(bound_word=" and ", **params) -> str: def scalable_filters_single_select( data_zds: pd.DataFrame, filters_params=ADOPTED_SPOTS_FILTERS_PARAMS, - base_key="default_key" + base_key="default_key", ) -> dict: """Create streamlit select box filters as specified by the filters_params list. Create the filter dict used to filter the hotspots maps accordingly.""" @@ -175,10 +203,11 @@ def scalable_filters_single_select( return filter_dict + def scalable_filters_multi_select( data_zds: pd.DataFrame, filters_params=DENSITY_FILTERS_PARAMS, - base_key="default_key" + base_key="default_key", ) -> dict: """Create streamlit select box filters as specified by the filters_params list. Create the filter dict used to filter the hotspots maps accordingly.""" @@ -195,21 +224,27 @@ def scalable_filters_multi_select( column, message = filter_params["filter_col"], filter_params["filter_message"] # Get unique values, convert to string and sort them - sorted_values = sorted(data_zds[column].dropna().astype(str).unique(), reverse=True) + sorted_values = sorted( + data_zds[column].dropna().astype(str).unique(), reverse=True + ) # Generate a unique key for each multiselect widget unique_key = f"{base_key}_{column}_{i}" # Create the Streamlit multiselect with sorted values and a unique key - selected_values = columns[i].multiselect(message, sorted_values, - default=sorted_values[0] if sorted_values else [], - key=unique_key) + selected_values = columns[i].multiselect( + message, + sorted_values, + default=sorted_values[0] if sorted_values else [], + key=unique_key, + ) # Fill the filter dict with the selected values filter_dict[column] = selected_values return filter_dict + ################## # 1/ Import data # ################## @@ -218,21 +253,22 @@ def scalable_filters_multi_select( regions = gpd.read_file(REGION_GEOJSON_PATH) # nb dechets : Unused for now -df_nb_dechets = pd.read_csv(NB_DECHETS_PATH) +# df_nb_dechets = pd.read_csv(NB_DECHETS_PATH) # data_zds : main source of data for the hotspots tab -data_zds = pd.read_csv(DATA_ZDS_PATH) +# /!\ Already loaded from the streamlit session state defined in the home tab +# data_zds = pd.read_csv(DATA_ZDS_PATH) # spot: -#spot = pd.read_excel(DATA_SPOT) +# spot = pd.read_excel(DATA_SPOT) # correction : corrected data for density map correction = pd.read_excel(CORRECTION) # Fusion and correction -data_correct = pd.merge(data_zds, correction, on='ID_RELEVE', how='left') -data_correct = data_correct[data_correct['SURFACE_OK'] == 'OUI'] -data_zds = data_correct[data_correct['VOLUME_TOTAL'] > 0] +data_correct = pd.merge(data_zds, correction, on="ID_RELEVE", how="left") +data_correct = data_correct[data_correct["SURFACE_OK"] == "OUI"] +data_zds = data_correct[data_correct["VOLUME_TOTAL"] > 0] ################## # 2/ Hotspot tab # @@ -252,7 +288,6 @@ def scalable_filters_multi_select( #################################################################################### - def density_lieu(data_zds: pd.DataFrame, multi_filter_dict: dict): """ Calculate and display the density of waste by type of location ('LIEU') for a selected region. @@ -263,37 +298,47 @@ def density_lieu(data_zds: pd.DataFrame, multi_filter_dict: dict): if selected_regions is not None: # Filter data for selected region - data_selected_region = data_zds[data_zds['LIEU_REGION'].isin(selected_regions)] + data_selected_region = data_zds[data_zds["LIEU_REGION"].isin(selected_regions)] # Calculate waste volume sum for each 'LIEU' - volume_total_lieu = data_selected_region.groupby('TYPE_LIEU2')['VOLUME_TOTAL'].sum().reset_index() + volume_total_lieu = ( + data_selected_region.groupby("TYPE_LIEU2")["VOLUME_TOTAL"] + .sum() + .reset_index() + ) # Remove duplicate data and calculate SURFACE total - data_unique = data_selected_region.drop_duplicates(subset=['LIEU_COORD_GPS']) - surface_total_lieu = data_unique.groupby('TYPE_LIEU2')['SURFACE'].sum().reset_index() + data_unique = data_selected_region.drop_duplicates(subset=["LIEU_COORD_GPS"]) + surface_total_lieu = ( + data_unique.groupby("TYPE_LIEU2")["SURFACE"].sum().reset_index() + ) # Merge volume and surface data for 'LIEU', calculate density, and sort - data_lieu = pd.merge(volume_total_lieu, surface_total_lieu, on='TYPE_LIEU2') - data_lieu['DENSITE_LIEU'] = (data_lieu['VOLUME_TOTAL'] / data_lieu['SURFACE']).round(5) + data_lieu = pd.merge(volume_total_lieu, surface_total_lieu, on="TYPE_LIEU2") + data_lieu["DENSITE_LIEU"] = ( + data_lieu["VOLUME_TOTAL"] / data_lieu["SURFACE"] + ).round(5) data_lieu_sorted = data_lieu.sort_values(by="DENSITE_LIEU", ascending=False) # Display sorted DataFrame with specific configuration for 'data_lieu_sorted' - lieu = st.markdown('##### Densité des déchets par type de lieu (L/m2)') - st.dataframe(data_lieu_sorted, - column_order=("TYPE_LIEU2", "DENSITE_LIEU"), - hide_index=True, - width=None, - column_config={ - "TYPE_LIEU2": st.column_config.TextColumn( - "Lieu", - ), - "DENSITE_LIEU": st.column_config.ProgressColumn( - "Densité", - format="%f", - min_value=0, - max_value=max(data_lieu_sorted['DENSITE_LIEU']), - )} - ) + lieu = st.markdown("##### Densité des déchets par type de lieu (L/m2)") + st.dataframe( + data_lieu_sorted, + column_order=("TYPE_LIEU2", "DENSITE_LIEU"), + hide_index=True, + width=None, + column_config={ + "TYPE_LIEU2": st.column_config.TextColumn( + "Lieu", + ), + "DENSITE_LIEU": st.column_config.ProgressColumn( + "Densité", + format="%f", + min_value=0, + max_value=max(data_lieu_sorted["DENSITE_LIEU"]), + ), + }, + ) return lieu @@ -307,37 +352,51 @@ def density_milieu(data_zds: pd.DataFrame, multi_filter_dict: dict): if selected_regions is not None: # Filter data for selected region - data_selected_region = data_zds[data_zds['LIEU_REGION'].isin(selected_regions)] + data_selected_region = data_zds[data_zds["LIEU_REGION"].isin(selected_regions)] # Calculate waste volume sum for each 'MILIEU' - volume_total_milieu = data_selected_region.groupby('TYPE_MILIEU')['VOLUME_TOTAL'].sum().reset_index() + volume_total_milieu = ( + data_selected_region.groupby("TYPE_MILIEU")["VOLUME_TOTAL"] + .sum() + .reset_index() + ) # Remove duplicate data and calculate SURFACE total - data_unique = data_selected_region.drop_duplicates(subset=['LIEU_COORD_GPS']) - surface_total_milieu = data_unique.groupby('TYPE_MILIEU')['SURFACE'].sum().reset_index() + data_unique = data_selected_region.drop_duplicates(subset=["LIEU_COORD_GPS"]) + surface_total_milieu = ( + data_unique.groupby("TYPE_MILIEU")["SURFACE"].sum().reset_index() + ) # Merge volume and surface data for 'MILIEU', calculate density, and sort - data_milieu = pd.merge(volume_total_milieu, surface_total_milieu, on='TYPE_MILIEU') - data_milieu['DENSITE_MILIEU'] = (data_milieu['VOLUME_TOTAL'] / data_milieu['SURFACE']).round(5) - data_milieu_sorted = data_milieu.sort_values(by="DENSITE_MILIEU", ascending=False) + data_milieu = pd.merge( + volume_total_milieu, surface_total_milieu, on="TYPE_MILIEU" + ) + data_milieu["DENSITE_MILIEU"] = ( + data_milieu["VOLUME_TOTAL"] / data_milieu["SURFACE"] + ).round(5) + data_milieu_sorted = data_milieu.sort_values( + by="DENSITE_MILIEU", ascending=False + ) # Display sorted DataFrame with specific configuration for 'data_milieu_sorted' - milieu = st.markdown('##### Densité des déchets par type de milieu (L/m2)') - st.dataframe(data_milieu_sorted, - column_order=("TYPE_MILIEU", "DENSITE_MILIEU"), - hide_index=True, - width=None, - column_config={ - "TYPE_MILIEU": st.column_config.TextColumn( - "Milieu", - ), - "DENSITE_MILIEU": st.column_config.ProgressColumn( - "Densité", - format="%f", - min_value=0, - max_value=max(data_milieu_sorted['DENSITE_MILIEU']), - )} - ) + milieu = st.markdown("##### Densité des déchets par type de milieu (L/m2)") + st.dataframe( + data_milieu_sorted, + column_order=("TYPE_MILIEU", "DENSITE_MILIEU"), + hide_index=True, + width=None, + column_config={ + "TYPE_MILIEU": st.column_config.TextColumn( + "Milieu", + ), + "DENSITE_MILIEU": st.column_config.ProgressColumn( + "Densité", + format="%f", + min_value=0, + max_value=max(data_milieu_sorted["DENSITE_MILIEU"]), + ), + }, + ) return milieu @@ -346,62 +405,76 @@ def density_milieu(data_zds: pd.DataFrame, multi_filter_dict: dict): # 2.3/ Carte choropleth densité de déchets en France # ###################################################### + def make_density_choropleth(data_zds, region_geojson_path): # Load all regions from the GeoJSON file regions_geojson = requests.get(region_geojson_path).json() # Extract region names from GeoJSON for later comparison - regions_from_geojson = [feature['properties']['nom'] for feature in regions_geojson['features']] + regions_from_geojson = [ + feature["properties"]["nom"] for feature in regions_geojson["features"] + ] # Create a DataFrame from the GeoJSON region names - regions_df = pd.DataFrame(regions_from_geojson, columns=['nom']) + regions_df = pd.DataFrame(regions_from_geojson, columns=["nom"]) # Data preparation # Calculate the total VOLUME_TOTAL for each region without removing duplicate data - volume_total_sums = data_zds.groupby('LIEU_REGION')['VOLUME_TOTAL'].sum().reset_index() + volume_total_sums = ( + data_zds.groupby("LIEU_REGION")["VOLUME_TOTAL"].sum().reset_index() + ) # Merge the waste data and the geographical data - volume_total_sums = pd.merge(regions_df, volume_total_sums, left_on='nom', right_on='LIEU_REGION', how='left') + volume_total_sums = pd.merge( + regions_df, volume_total_sums, left_on="nom", right_on="LIEU_REGION", how="left" + ) # Identify regions with no available data - regions_no_data = volume_total_sums[volume_total_sums['VOLUME_TOTAL'].isna()]['nom'].tolist() + regions_no_data = volume_total_sums[volume_total_sums["VOLUME_TOTAL"].isna()][ + "nom" + ].tolist() if regions_no_data: - st.info(f"Aucune donnée disponible pour les régions suivantes : {', '.join(regions_no_data)}", icon="⚠️") + st.info( + f"Aucune donnée disponible pour les régions suivantes : {', '.join(regions_no_data)}", + icon="⚠️", + ) # Drop rows containing NaN to avoid errors in the choropleth volume_total_sums.dropna(inplace=True) # Remove duplicate data and calculate SURFACE total - data_unique = data_zds.drop_duplicates(subset=['LIEU_COORD_GPS']) - surface_total_sums = data_unique.groupby('LIEU_REGION')['SURFACE'].sum().reset_index() + data_unique = data_zds.drop_duplicates(subset=["LIEU_COORD_GPS"]) + surface_total_sums = ( + data_unique.groupby("LIEU_REGION")["SURFACE"].sum().reset_index() + ) # Combine two datasets and calculate DENSITE - data_choropleth_sums = pd.merge(volume_total_sums, surface_total_sums, on='LIEU_REGION') - data_choropleth_sums['DENSITE'] = data_choropleth_sums['VOLUME_TOTAL'] / data_choropleth_sums['SURFACE'] + data_choropleth_sums = pd.merge( + volume_total_sums, surface_total_sums, on="LIEU_REGION" + ) + data_choropleth_sums["DENSITE"] = ( + data_choropleth_sums["VOLUME_TOTAL"] / data_choropleth_sums["SURFACE"] + ) # Set bins for the choropleth - min_density = data_choropleth_sums['DENSITE'].min() - max_density = data_choropleth_sums['DENSITE'].max() + min_density = data_choropleth_sums["DENSITE"].min() + max_density = data_choropleth_sums["DENSITE"].max() # Create the choropleth map using Plotly Express choropleth = px.choropleth( data_choropleth_sums, geojson=regions_geojson, featureidkey="properties.nom", - locations='LIEU_REGION', - color='DENSITE', - color_continuous_scale='Reds', - range_color=(min_density, max_density), # set range using log scale - labels={'DENSITE': 'Densité de Déchets(L/m2)'} + locations="LIEU_REGION", + color="DENSITE", + color_continuous_scale="Reds", + range_color=(min_density, max_density), # set range using log scale + labels={"DENSITE": "Densité de Déchets(L/m2)"}, ) # Update layout to fit the map to the boundaries of the GeoJSON choropleth.update_layout( - geo=dict( - fitbounds="locations", - visible=False - ), - margin=dict(l=0, r=0, t=0, b=0) + geo=dict(fitbounds="locations", visible=False), margin=dict(l=0, r=0, t=0, b=0) ) return choropleth @@ -412,6 +485,7 @@ def make_density_choropleth(data_zds, region_geojson_path): # par lieu et par milieu au fil des années spots adoptés # ############################################################ + def line_chart_lieu(data_zds: pd.DataFrame, multi_filter_dict: dict): # Get the selected region and milieu from the filter dictionary selected_regions = multi_filter_dict.get("REGION", []) @@ -423,13 +497,19 @@ def line_chart_lieu(data_zds: pd.DataFrame, multi_filter_dict: dict): return # Filter data for the selected region - data_selected_region = data_zds[data_zds['LIEU_REGION'].isin(selected_regions)] + data_selected_region = data_zds[data_zds["LIEU_REGION"].isin(selected_regions)] if data_selected_region.empty: - st.warning(f"Aucune donnée disponible pour la région sélectionnée : {selected_regions}") + st.warning( + f"Aucune donnée disponible pour la région sélectionnée : {selected_regions}" + ) return # Further filter data for the selected milieus - data_selected_lieu = data_selected_region[data_selected_region['TYPE_LIEU2'].isin(selected_lieu)] if selected_lieu else data_selected_region + data_selected_lieu = ( + data_selected_region[data_selected_region["TYPE_LIEU2"].isin(selected_lieu)] + if selected_lieu + else data_selected_region + ) # Check if there is any data left after filtering by milieu if data_selected_lieu.empty: @@ -437,37 +517,57 @@ def line_chart_lieu(data_zds: pd.DataFrame, multi_filter_dict: dict): return # Calculate waste volume sum for each 'LIEU' by 'ANNEE' - volume_total_annee = data_selected_lieu.groupby(['TYPE_LIEU2', 'ANNEE'])['VOLUME_TOTAL'].sum().reset_index() + volume_total_annee = ( + data_selected_lieu.groupby(["TYPE_LIEU2", "ANNEE"])["VOLUME_TOTAL"] + .sum() + .reset_index() + ) # Remove duplicate data and calculate SURFACE total - data_unique = data_selected_lieu.drop_duplicates(subset=['LIEU_COORD_GPS']) - surface_total_annee = data_unique.groupby(['TYPE_LIEU2', 'ANNEE'])['SURFACE'].sum().reset_index() + data_unique = data_selected_lieu.drop_duplicates(subset=["LIEU_COORD_GPS"]) + surface_total_annee = ( + data_unique.groupby(["TYPE_LIEU2", "ANNEE"])["SURFACE"].sum().reset_index() + ) # Merge volume and surface data for 'MILIEU', calculate density, and sort - data_lieu = pd.merge(volume_total_annee, surface_total_annee, on=['TYPE_LIEU2', 'ANNEE']) + data_lieu = pd.merge( + volume_total_annee, surface_total_annee, on=["TYPE_LIEU2", "ANNEE"] + ) if data_lieu.empty: - st.warning("Aucune donnée superposée pour les calculs de volume et de surface pour les lieux sélectionnés.") + st.warning( + "Aucune donnée superposée pour les calculs de volume et de surface pour les lieux sélectionnés." + ) return - data_lieu['DENSITE_LIEU'] = (data_lieu['VOLUME_TOTAL'] / data_lieu['SURFACE']).round(5) - data_lieu_sorted = data_lieu.sort_values(by='ANNEE', ascending=False) + data_lieu["DENSITE_LIEU"] = ( + data_lieu["VOLUME_TOTAL"] / data_lieu["SURFACE"] + ).round(5) + data_lieu_sorted = data_lieu.sort_values(by="ANNEE", ascending=False) # Create the plot fig = go.Figure() - for type_lieu in data_lieu_sorted['TYPE_LIEU2'].unique(): - df_plot = data_lieu_sorted[data_lieu_sorted['TYPE_LIEU2'] == type_lieu] - fig.add_trace(go.Scatter(x=df_plot["ANNEE"], y=df_plot['DENSITE_LIEU'], mode='lines+markers', name=type_lieu)) + for type_lieu in data_lieu_sorted["TYPE_LIEU2"].unique(): + df_plot = data_lieu_sorted[data_lieu_sorted["TYPE_LIEU2"] == type_lieu] + fig.add_trace( + go.Scatter( + x=df_plot["ANNEE"], + y=df_plot["DENSITE_LIEU"], + mode="lines+markers", + name=type_lieu, + ) + ) # Update plot layout fig.update_layout( title=f"Densité des déchets par type de lieu", xaxis_title="Année", yaxis_title="Densité L/m2", - legend_title="Type de lieu" + legend_title="Type de lieu", ) st.plotly_chart(fig) + def line_chart_milieu(data_zds: pd.DataFrame, multi_filter_dict: dict): # Get the selected region and milieu from the filter dictionary selected_regions = multi_filter_dict.get("REGION", []) @@ -479,13 +579,19 @@ def line_chart_milieu(data_zds: pd.DataFrame, multi_filter_dict: dict): return # Filter data for the selected region - data_selected_region = data_zds[data_zds['LIEU_REGION'].isin(selected_regions)] + data_selected_region = data_zds[data_zds["LIEU_REGION"].isin(selected_regions)] if data_selected_region.empty: - st.warning(f"Aucune donnée disponible pour la région sélectionnée : {selected_regions}") + st.warning( + f"Aucune donnée disponible pour la région sélectionnée : {selected_regions}" + ) return # Further filter data for the selected milieus - data_selected_milieu = data_selected_region[data_selected_region['TYPE_MILIEU'].isin(selected_milieu)] if selected_milieu else data_selected_region + data_selected_milieu = ( + data_selected_region[data_selected_region["TYPE_MILIEU"].isin(selected_milieu)] + if selected_milieu + else data_selected_region + ) # Check if there is any data left after filtering by milieu if data_selected_milieu.empty: @@ -493,39 +599,57 @@ def line_chart_milieu(data_zds: pd.DataFrame, multi_filter_dict: dict): return # Calculate waste volume sum for each 'MILIEU' by 'ANNEE' - volume_total_annee = data_selected_milieu.groupby(['TYPE_MILIEU', 'ANNEE'])['VOLUME_TOTAL'].sum().reset_index() + volume_total_annee = ( + data_selected_milieu.groupby(["TYPE_MILIEU", "ANNEE"])["VOLUME_TOTAL"] + .sum() + .reset_index() + ) # Remove duplicate data and calculate SURFACE total - data_unique = data_selected_milieu.drop_duplicates(subset=['LIEU_COORD_GPS']) - surface_total_annee = data_unique.groupby(['TYPE_MILIEU', 'ANNEE'])['SURFACE'].sum().reset_index() + data_unique = data_selected_milieu.drop_duplicates(subset=["LIEU_COORD_GPS"]) + surface_total_annee = ( + data_unique.groupby(["TYPE_MILIEU", "ANNEE"])["SURFACE"].sum().reset_index() + ) # Merge volume and surface data for 'MILIEU', calculate density, and sort - data_milieu = pd.merge(volume_total_annee, surface_total_annee, on=['TYPE_MILIEU', 'ANNEE']) + data_milieu = pd.merge( + volume_total_annee, surface_total_annee, on=["TYPE_MILIEU", "ANNEE"] + ) if data_milieu.empty: - st.warning("Aucune donnée superposée pour les calculs de volume et de surface pour les milieux sélectionnés.") + st.warning( + "Aucune donnée superposée pour les calculs de volume et de surface pour les milieux sélectionnés." + ) return - data_milieu['DENSITE_MILIEU'] = (data_milieu['VOLUME_TOTAL'] / data_milieu['SURFACE']).round(5) - data_milieu_sorted = data_milieu.sort_values(by='ANNEE', ascending=False) + data_milieu["DENSITE_MILIEU"] = ( + data_milieu["VOLUME_TOTAL"] / data_milieu["SURFACE"] + ).round(5) + data_milieu_sorted = data_milieu.sort_values(by="ANNEE", ascending=False) # Create the plot fig = go.Figure() - for type_milieu in data_milieu_sorted['TYPE_MILIEU'].unique(): - df_plot = data_milieu_sorted[data_milieu_sorted['TYPE_MILIEU'] == type_milieu] - fig.add_trace(go.Scatter(x=df_plot["ANNEE"], y=df_plot['DENSITE_MILIEU'], mode='lines+markers', name=type_milieu)) + for type_milieu in data_milieu_sorted["TYPE_MILIEU"].unique(): + df_plot = data_milieu_sorted[data_milieu_sorted["TYPE_MILIEU"] == type_milieu] + fig.add_trace( + go.Scatter( + x=df_plot["ANNEE"], + y=df_plot["DENSITE_MILIEU"], + mode="lines+markers", + name=type_milieu, + ) + ) # Update plot layout fig.update_layout( title=f"Densité des déchets par type de milieu", xaxis_title="Année", yaxis_title="Densité L/m2", - legend_title="Type de milieu" + legend_title="Type de milieu", ) st.plotly_chart(fig) - ################################ # 2.1/ Carte des spots adoptés # ################################ @@ -626,26 +750,32 @@ def plot_adopted_waste_spots( return m - ######################## # Dashboard Main Panel # ######################## - -tab1, tab2, tab3, tab4 = st.tabs(["Densité des déchets dans zone étudié", - "Évolution de la densité au fil du temps", - "Spots Adoptés", - "Aperçu à travers la France"]) +tab1, tab2, tab3, tab4 = st.tabs( + [ + "Densité des déchets dans zone étudié", + "Évolution de la densité au fil du temps", + "Spots Adoptés", + "Aperçu à travers la France", + ] +) with tab1: # Select only the filters for 'REGION' and 'ANNEE' - selected_filters_1 = [f for f in DENSITY_FILTERS_PARAMS if f["filter_col"] in ["REGION", "ANNEE"]] + selected_filters_1 = [ + f for f in DENSITY_FILTERS_PARAMS if f["filter_col"] in ["REGION", "ANNEE"] + ] # Use the selected filters for multi-select - multi_filter_dict_1 = scalable_filters_multi_select(data_zds, selected_filters_1, tab1) + multi_filter_dict_1 = scalable_filters_multi_select( + data_zds, selected_filters_1, tab1 + ) - col = st.columns((4, 4, 2), gap='medium') + col = st.columns((4, 4, 2), gap="medium") # Construct the map with col[0]: @@ -655,19 +785,27 @@ def plot_adopted_waste_spots( density_milieu(data_zds, multi_filter_dict_1) with col[2]: - with st.expander('Notice ℹ️', expanded=True): - st.write(''' + with st.expander("Notice ℹ️", expanded=True): + st.write( + """ Explication des diffférences entre Lieu et Milieu - ''') + """ + ) with tab2: # Select only the filters for 'REGION' and 'ANNEE' - selected_filters_2 = [f for f in DENSITY_FILTERS_PARAMS if f["filter_col"] in ["REGION", "TYPE_LIEU2", "TYPE_MILIEU"]] + selected_filters_2 = [ + f + for f in DENSITY_FILTERS_PARAMS + if f["filter_col"] in ["REGION", "TYPE_LIEU2", "TYPE_MILIEU"] + ] # Use the selected filters for multi-select - multi_filter_dict_2 = scalable_filters_multi_select(data_zds, selected_filters_2, tab2) + multi_filter_dict_2 = scalable_filters_multi_select( + data_zds, selected_filters_2, tab2 + ) - col = st.columns((7, 7), gap='medium') + col = st.columns((7, 7), gap="medium") with col[0]: line_chart_lieu(data_zds, multi_filter_dict_2) @@ -677,15 +815,17 @@ def plot_adopted_waste_spots( with tab3: # Use the selected filters - single_filter_dict_3 = scalable_filters_single_select(data_zds, ADOPTED_SPOTS_FILTERS_PARAMS, tab3) + single_filter_dict_3 = scalable_filters_single_select( + data_zds, ADOPTED_SPOTS_FILTERS_PARAMS, tab3 + ) - st.markdown('### Spots Adoptés') + st.markdown("### Spots Adoptés") m = plot_adopted_waste_spots(data_zds, single_filter_dict_3, REGION_GEOJSON_PATH) # Show the adopted spots map on the streamlit tab if m: folium_static(m) with tab4: - st.markdown('### Densité des déchets en France') + st.markdown("### Densité des déchets en France") choropleth = make_density_choropleth(data_zds, REGION_GEOJSON_PATH) st.plotly_chart(choropleth, use_container_width=True) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index e75f8b9..2060a18 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -3,5 +3,8 @@ geopandas==0.14.3 folium==0.16.0 duckdb==0.10.0 streamlit==1.32.2 +streamlit-authenticator==0.3.2 +st-pages==0.4.5 streamlit-folium==0.19.1 plotly==5.19.0 +openpyxl==3.1.2 From 22c9624d5e5a491b659cfc92a2898c272f7f966f Mon Sep 17 00:00:00 2001 From: DridrM Date: Mon, 22 Apr 2024 11:11:05 +0200 Subject: [PATCH 063/147] Add style.css --- .gitignore | 5 ++++- dashboards/app/style.css | 13 +++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 dashboards/app/style.css diff --git a/.gitignore b/.gitignore index b8fb0eb..b269410 100644 --- a/.gitignore +++ b/.gitignore @@ -160,4 +160,7 @@ dmypy.json cython_debug/ # Precommit hooks: ruff cache -.ruff_cache \ No newline at end of file +.ruff_cache + +# Streamlit: credentials +dashboards/app/.credentials.yml diff --git a/dashboards/app/style.css b/dashboards/app/style.css new file mode 100644 index 0000000..3fb0486 --- /dev/null +++ b/dashboards/app/style.css @@ -0,0 +1,13 @@ +@import url('https://fonts.googleapis.com/css2?family=Montserrat:wght@500;700&display=swap'); + +/* GLOBAL FONT CHANGE */ +html, body, [class*="css"] { + font-family: 'Montserrat', sans-serif; +} + + +/* Sidebar color change */ +[data-testid="stSidebar"] { + background-color: #003463 !important; + color: #FFFFFF !important; +} From a83defa1fca892f5c19916cce9ff64232da234fd Mon Sep 17 00:00:00 2001 From: DridrM Date: Mon, 22 Apr 2024 11:23:36 +0200 Subject: [PATCH 064/147] Add register.py and functionnal hotspots tab --- dashboards/app/pages/register.py | 47 ++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 dashboards/app/pages/register.py diff --git a/dashboards/app/pages/register.py b/dashboards/app/pages/register.py new file mode 100644 index 0000000..be54cb4 --- /dev/null +++ b/dashboards/app/pages/register.py @@ -0,0 +1,47 @@ +from pathlib import Path +import yaml +from yaml.loader import SafeLoader +import streamlit as st +import streamlit_authenticator as stauth + +st.markdown( + """ +# Bienvenue 👋 +#### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! +""", +) + +p_cred = Path(".credentials.yml") +with p_cred.open() as file: + config = yaml.load(file, Loader=SafeLoader) + +authenticator = stauth.Authenticate( + config["credentials"], + config["cookie"]["name"], + config["cookie"]["key"], + config["cookie"]["expiry_days"], + config["pre-authorized"], +) + +try: + ( + email_of_registered_user, + username_of_registered_user, + name_of_registered_user, + ) = authenticator.register_user( + pre_authorization=False, + fields={ + "Form name": "S'enregistrer", + "Email": "Email", + "Username": "Identifiant", + "Password": "Mot de passe", + "Repeat password": "Répeter le mot de passe", + "Register": "S'enregistrer", + }, + ) + if email_of_registered_user: + with open(".credentials.yml", "w") as file: + yaml.dump(config, file, default_flow_style=False) + st.success("Utilisateur enregistré") +except Exception as e: + st.error(e) From 8ca713d556ae796d0032897006b8489d323e5f37 Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Sat, 20 Apr 2024 13:10:25 -0400 Subject: [PATCH 065/147] =?UTF-8?q?[kb]=20=F0=9F=9A=9A=20Move=20creds=20to?= =?UTF-8?q?=20app?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/.credentials-dev.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 dashboards/app/.credentials-dev.yml diff --git a/dashboards/app/.credentials-dev.yml b/dashboards/app/.credentials-dev.yml new file mode 100644 index 0000000..716cedd --- /dev/null +++ b/dashboards/app/.credentials-dev.yml @@ -0,0 +1,14 @@ +cookie: + expiry_days: 30 + key: some_signature_key + name: some_cookie_name +credentials: + usernames: + test: + email: test@test.com + logged_in: false + name: test + password: $2b$12$fR4sp7tIG.dbeusbr695MOw/xvN1sf.21rML7t7j9pCdIVREIocUO +pre-authorized: + emails: + - test@test.com From 6ad38f458f0eae810fcb8e6d92f68aebe7b003c1 Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Sat, 20 Apr 2024 13:27:05 -0400 Subject: [PATCH 066/147] =?UTF-8?q?[kb]=20=F0=9F=99=88=20Add=20streamlit?= =?UTF-8?q?=20credentials?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 67bb6de..55b396b 100644 --- a/.gitignore +++ b/.gitignore @@ -163,4 +163,7 @@ cython_debug/ .ruff_cache # Dossier sauvegarde Thibaut -TG_sauv \ No newline at end of file +TG_sauv + +# Streamlit: credentials +dashboards/app/.credentials.yml From 0fcddc4a30ba05cc471f44a12187f7ae38b78a1d Mon Sep 17 00:00:00 2001 From: DridrM Date: Mon, 22 Apr 2024 20:09:20 +0200 Subject: [PATCH 067/147] Update the filters_params, delete the region filter --- dashboards/app/pages/hotspots.py | 55 +++++++++++++++++++++++++------- 1 file changed, 43 insertions(+), 12 deletions(-) diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index f0a9d10..7908627 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -24,6 +24,24 @@ # Parameters for the hotspots tab # ################################### +# This dict map the name of the "niveaux admin" as define in the home tab and the +# column name in the data_zds df. The "niveaux_admin" selection is store in the session state. +NIVEAUX_ADMIN_DICT = { + "Région": "REGION", + "Département": "DEP_CODE_NOM", + "EPCI": "LIBEPCI", + "Commune": "COMMUNE_CODE_NOM", +} + +# The name of the "niveau_admin" fetch from the session state +NIVEAU_ADMIN = st.session_state["niveau_admin"] + +# The name of the "niveau_admin" column in the data_zds df +NIVEAU_ADMIN_COL = NIVEAUX_ADMIN_DICT[NIVEAU_ADMIN] + +# The value selected for the "niveau_admin" column fetch from the session state +NIVEAU_ADMIN_SELECTION = st.session_state["collectivite"] + # Data path for the df_nb_dechets NB_DECHETS_PATH = ( "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" @@ -61,20 +79,12 @@ # Params for the adopted spots map filters ADOPTED_SPOTS_FILTERS_PARAMS = [ - { - "filter_col": "REGION", - "filter_message": "Sélectionnez une région (par défaut votre région) :", - }, {"filter_col": "TYPE_MILIEU", "filter_message": "Sélectionnez un milieu :"}, {"filter_col": "ANNEE", "filter_message": "Sélectionnez une année :"}, ] # Params for the density graph filters DENSITY_FILTERS_PARAMS = [ - { - "filter_col": "REGION", - "filter_message": "Sélectionnez une région (par défaut votre région) :", - }, {"filter_col": "TYPE_MILIEU", "filter_message": "Sélectionnez un milieu :"}, {"filter_col": "TYPE_LIEU2", "filter_message": "Sélectionnez un lieu :"}, {"filter_col": "ANNEE", "filter_message": "Sélectionnez une année :"}, @@ -139,7 +149,7 @@ def construct_query_string(bound_word=" and ", **params) -> str: # Iterate over the params to construct the query string for param_key, param in params.items(): # Construct the param sub string if the param is not 'None' - if param is not None: + if param: # Check if the parameter value is of type int if isinstance(param, int): @@ -245,6 +255,30 @@ def scalable_filters_multi_select( return filter_dict +def construct_admin_lvl_boundaries( + admin_lvl: str, single_filter_dict: dict, admin_lvl_geojson_path_dict: dict +) -> any: + """""" + + # Unpack the admin level geojson path + admin_lvl_geojson_path = admin_lvl_geojson_path_dict[f"{admin_lvl}"] + + # Unpack the region name + admin_lvl_name = single_filter_dict[f"{admin_lvl}"] + + # Load France regions from a GeoJSON file + admin_lvl_shapes = gpd.read_file(admin_lvl_geojson_path) + + # Filter the region geodataframe for the specified region + selected_admin_lvl = admin_lvl_shapes[ + admin_lvl_shapes["nom"].str.lower() == admin_lvl_name.lower() + ] + if selected_admin_lvl.empty: + raise KeyError(f"Administrative level '{admin_lvl_name}' not found.") + + return selected_admin_lvl + + ################## # 1/ Import data # ################## @@ -666,7 +700,6 @@ def plot_adopted_waste_spots( - data_zds: The waste dataframe - filter_dict: dictionary mapping the name of the column in the waste df and the value you want to filter by """ - print("Filter Dictionary:", single_filter_dict) # Check the filter dictionary # 1/ Create the waste geodataframe # # Create a GeoDataFrame for waste points @@ -684,7 +717,6 @@ def plot_adopted_waste_spots( # Construct the query string query_string = construct_query_string(**single_filter_dict) - print("Query String:", query_string) # Check the constructed query string # Filter the geodataframe by region and by environment gdf_filtered = gdf.query(query_string) @@ -695,7 +727,6 @@ def plot_adopted_waste_spots( # Load France regions from a GeoJSON file regions = gpd.read_file(region_geojson_path) - regions = regions.loc[regions["nom"] == region, :] # Filter the region geodataframe for the specified region selected_region = regions[regions["nom"].str.lower() == region.lower()] From b07798869824dd29e86b320c007b364809b8229a Mon Sep 17 00:00:00 2001 From: linh dinh Date: Tue, 23 Apr 2024 10:38:54 +0200 Subject: [PATCH 068/147] =?UTF-8?q?Ajoute=20carte=20densit=C3=A9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/hotspots.py | 544 ++++++++----------------------- 1 file changed, 130 insertions(+), 414 deletions(-) diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index 7908627..95f0d48 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -58,9 +58,9 @@ # Data path for the France regions geojson REGION_GEOJSON_PATH = ( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" - "exploration-des-donn%C3%A9es/Exploration_visualisation/regions" - "-avec-outre-mer.geojson" + "https://raw.githubusercontent.com/dataforgoodfr/" + "12_zero_dechet_sauvage/1-exploration-des-donn%C3%A9es/" + "Exploration_visualisation/data/regions-avec-outre-mer.geojson" ) # Data path for Correction @@ -79,6 +79,10 @@ # Params for the adopted spots map filters ADOPTED_SPOTS_FILTERS_PARAMS = [ + { + "filter_col": "REGION", + "filter_message": "Sélectionnez une région (par défaut votre région) :", + }, {"filter_col": "TYPE_MILIEU", "filter_message": "Sélectionnez un milieu :"}, {"filter_col": "ANNEE", "filter_message": "Sélectionnez une année :"}, ] @@ -86,8 +90,7 @@ # Params for the density graph filters DENSITY_FILTERS_PARAMS = [ {"filter_col": "TYPE_MILIEU", "filter_message": "Sélectionnez un milieu :"}, - {"filter_col": "TYPE_LIEU2", "filter_message": "Sélectionnez un lieu :"}, - {"filter_col": "ANNEE", "filter_message": "Sélectionnez une année :"}, + {"filter_col": "TYPE_LIEU2", "filter_message": "Sélectionnez un lieu :"} ] @@ -315,373 +318,121 @@ def construct_admin_lvl_boundaries( ######################################################## # 2.1/ Carte densité de déchets sur les zones étudiées # ######################################################## -# à faire! - -#################################################################################### -# 2.2/ Tableaux de la densité par milieu et lieu de déchets sur les zones étudiées # -#################################################################################### - - -def density_lieu(data_zds: pd.DataFrame, multi_filter_dict: dict): - """ - Calculate and display the density of waste by type of location ('LIEU') for a selected region. - """ - - # Get the selected region from filter_dict - selected_regions = multi_filter_dict.get("REGION", []) - - if selected_regions is not None: - # Filter data for selected region - data_selected_region = data_zds[data_zds["LIEU_REGION"].isin(selected_regions)] - - # Calculate waste volume sum for each 'LIEU' - volume_total_lieu = ( - data_selected_region.groupby("TYPE_LIEU2")["VOLUME_TOTAL"] - .sum() - .reset_index() - ) - - # Remove duplicate data and calculate SURFACE total - data_unique = data_selected_region.drop_duplicates(subset=["LIEU_COORD_GPS"]) - surface_total_lieu = ( - data_unique.groupby("TYPE_LIEU2")["SURFACE"].sum().reset_index() - ) - - # Merge volume and surface data for 'LIEU', calculate density, and sort - data_lieu = pd.merge(volume_total_lieu, surface_total_lieu, on="TYPE_LIEU2") - data_lieu["DENSITE_LIEU"] = ( - data_lieu["VOLUME_TOTAL"] / data_lieu["SURFACE"] - ).round(5) - data_lieu_sorted = data_lieu.sort_values(by="DENSITE_LIEU", ascending=False) - - # Display sorted DataFrame with specific configuration for 'data_lieu_sorted' - lieu = st.markdown("##### Densité des déchets par type de lieu (L/m2)") - st.dataframe( - data_lieu_sorted, - column_order=("TYPE_LIEU2", "DENSITE_LIEU"), - hide_index=True, - width=None, - column_config={ - "TYPE_LIEU2": st.column_config.TextColumn( - "Lieu", - ), - "DENSITE_LIEU": st.column_config.ProgressColumn( - "Densité", - format="%f", - min_value=0, - max_value=max(data_lieu_sorted["DENSITE_LIEU"]), - ), - }, - ) - - return lieu - - -def density_milieu(data_zds: pd.DataFrame, multi_filter_dict: dict): - """ - Calculate and display the density of waste by type of location ('MILIEU') for a selected region. - """ - # Get the selected region from filter_dict - selected_regions = multi_filter_dict.get("REGION", []) - - if selected_regions is not None: - # Filter data for selected region - data_selected_region = data_zds[data_zds["LIEU_REGION"].isin(selected_regions)] - - # Calculate waste volume sum for each 'MILIEU' - volume_total_milieu = ( - data_selected_region.groupby("TYPE_MILIEU")["VOLUME_TOTAL"] - .sum() - .reset_index() - ) - - # Remove duplicate data and calculate SURFACE total - data_unique = data_selected_region.drop_duplicates(subset=["LIEU_COORD_GPS"]) - surface_total_milieu = ( - data_unique.groupby("TYPE_MILIEU")["SURFACE"].sum().reset_index() - ) - - # Merge volume and surface data for 'MILIEU', calculate density, and sort - data_milieu = pd.merge( - volume_total_milieu, surface_total_milieu, on="TYPE_MILIEU" - ) - data_milieu["DENSITE_MILIEU"] = ( - data_milieu["VOLUME_TOTAL"] / data_milieu["SURFACE"] - ).round(5) - data_milieu_sorted = data_milieu.sort_values( - by="DENSITE_MILIEU", ascending=False - ) - - # Display sorted DataFrame with specific configuration for 'data_milieu_sorted' - milieu = st.markdown("##### Densité des déchets par type de milieu (L/m2)") - st.dataframe( - data_milieu_sorted, - column_order=("TYPE_MILIEU", "DENSITE_MILIEU"), - hide_index=True, - width=None, - column_config={ - "TYPE_MILIEU": st.column_config.TextColumn( - "Milieu", - ), - "DENSITE_MILIEU": st.column_config.ProgressColumn( - "Densité", - format="%f", - min_value=0, - max_value=max(data_milieu_sorted["DENSITE_MILIEU"]), - ), - }, - ) - - return milieu - - -###################################################### -# 2.3/ Carte choropleth densité de déchets en France # -###################################################### - - -def make_density_choropleth(data_zds, region_geojson_path): - # Load all regions from the GeoJSON file - regions_geojson = requests.get(region_geojson_path).json() - - # Extract region names from GeoJSON for later comparison - regions_from_geojson = [ - feature["properties"]["nom"] for feature in regions_geojson["features"] - ] - - # Create a DataFrame from the GeoJSON region names - regions_df = pd.DataFrame(regions_from_geojson, columns=["nom"]) - - # Data preparation - # Calculate the total VOLUME_TOTAL for each region without removing duplicate data - volume_total_sums = ( - data_zds.groupby("LIEU_REGION")["VOLUME_TOTAL"].sum().reset_index() - ) - - # Merge the waste data and the geographical data - volume_total_sums = pd.merge( - regions_df, volume_total_sums, left_on="nom", right_on="LIEU_REGION", how="left" - ) - - # Identify regions with no available data - regions_no_data = volume_total_sums[volume_total_sums["VOLUME_TOTAL"].isna()][ - "nom" - ].tolist() - if regions_no_data: - st.info( - f"Aucune donnée disponible pour les régions suivantes : {', '.join(regions_no_data)}", - icon="⚠️", - ) - - # Drop rows containing NaN to avoid errors in the choropleth - volume_total_sums.dropna(inplace=True) - - # Remove duplicate data and calculate SURFACE total - data_unique = data_zds.drop_duplicates(subset=["LIEU_COORD_GPS"]) - surface_total_sums = ( - data_unique.groupby("LIEU_REGION")["SURFACE"].sum().reset_index() - ) - - # Combine two datasets and calculate DENSITE - data_choropleth_sums = pd.merge( - volume_total_sums, surface_total_sums, on="LIEU_REGION" - ) - data_choropleth_sums["DENSITE"] = ( - data_choropleth_sums["VOLUME_TOTAL"] / data_choropleth_sums["SURFACE"] - ) - - # Set bins for the choropleth - min_density = data_choropleth_sums["DENSITE"].min() - max_density = data_choropleth_sums["DENSITE"].max() - - # Create the choropleth map using Plotly Express - choropleth = px.choropleth( - data_choropleth_sums, - geojson=regions_geojson, - featureidkey="properties.nom", - locations="LIEU_REGION", - color="DENSITE", - color_continuous_scale="Reds", - range_color=(min_density, max_density), # set range using log scale - labels={"DENSITE": "Densité de Déchets(L/m2)"}, - ) - - # Update layout to fit the map to the boundaries of the GeoJSON - choropleth.update_layout( - geo=dict(fitbounds="locations", visible=False), margin=dict(l=0, r=0, t=0, b=0) - ) - - return choropleth - - -############################################################ -# 2.1/ Line chart de l'évolution de la densité des déchets # -# par lieu et par milieu au fil des années spots adoptés # -############################################################ +def calculate_and_display_metrics(data, indicator_col1, indicator_col2, indicator_col3): + # Calculate density + data['DENSITE'] = data['VOLUME_TOTAL'] / data['SURFACE'] + data = data[data['DENSITE'] < 20] # Remove rows with anomalously high density values -def line_chart_lieu(data_zds: pd.DataFrame, multi_filter_dict: dict): - # Get the selected region and milieu from the filter dictionary - selected_regions = multi_filter_dict.get("REGION", []) - selected_lieu = multi_filter_dict.get("TYPE_LIEU2", []) - # Ensure that at least one region is selected - if not selected_regions: - st.error("Aucune région sélectionnée. Veuillez préciser une région.") - return - # Filter data for the selected region - data_selected_region = data_zds[data_zds["LIEU_REGION"].isin(selected_regions)] - if data_selected_region.empty: - st.warning( - f"Aucune donnée disponible pour la région sélectionnée : {selected_regions}" - ) - return + # Display metrics in specified UI containers + cell1 = indicator_col1.container(border=True) + cell1.metric("Densité Moyenne :", f"{data['DENSITE'].mean().round(4)} L/m²") - # Further filter data for the selected milieus - data_selected_lieu = ( - data_selected_region[data_selected_region["TYPE_LIEU2"].isin(selected_lieu)] - if selected_lieu - else data_selected_region - ) + cell2 = indicator_col2.container(border=True) + cell2.metric("Volume Moyen :", f"{data['VOLUME_TOTAL'].mean().round(2)} Litres") - # Check if there is any data left after filtering by milieu - if data_selected_lieu.empty: - st.warning("Aucune donnée disponible pour le lieu sélectionné.") - return - - # Calculate waste volume sum for each 'LIEU' by 'ANNEE' - volume_total_annee = ( - data_selected_lieu.groupby(["TYPE_LIEU2", "ANNEE"])["VOLUME_TOTAL"] - .sum() - .reset_index() - ) + cell3 = indicator_col3.container(border=True) + cell3.metric("Surface Moyenne :", f"{data['SURFACE'].mean().round(2):,} m²") - # Remove duplicate data and calculate SURFACE total - data_unique = data_selected_lieu.drop_duplicates(subset=["LIEU_COORD_GPS"]) - surface_total_annee = ( - data_unique.groupby(["TYPE_LIEU2", "ANNEE"])["SURFACE"].sum().reset_index() - ) + return data - # Merge volume and surface data for 'MILIEU', calculate density, and sort - data_lieu = pd.merge( - volume_total_annee, surface_total_annee, on=["TYPE_LIEU2", "ANNEE"] - ) - if data_lieu.empty: - st.warning( - "Aucune donnée superposée pour les calculs de volume et de surface pour les lieux sélectionnés." - ) - return +# Define the colors representing les différents 'Lieux' et 'Milieux' +couleur = { + 'Littoral (terrestre)': 'lightblue', + 'Mer - Océan': 'darkblue', + 'Cours d\'eau': 'cyan', + 'Zone naturelle ou rurale (hors littoral et montagne)': 'green', + 'Zone urbaine': 'orange', + 'Lagune et étang côtier': 'red', + 'Multi-lieux': 'pink', + 'Montagne': 'grey', + 'Présent au sol (abandonné)': 'black'} - data_lieu["DENSITE_LIEU"] = ( - data_lieu["VOLUME_TOTAL"] / data_lieu["SURFACE"] - ).round(5) - data_lieu_sorted = data_lieu.sort_values(by="ANNEE", ascending=False) - - # Create the plot - fig = go.Figure() - for type_lieu in data_lieu_sorted["TYPE_LIEU2"].unique(): - df_plot = data_lieu_sorted[data_lieu_sorted["TYPE_LIEU2"] == type_lieu] - fig.add_trace( - go.Scatter( - x=df_plot["ANNEE"], - y=df_plot["DENSITE_LIEU"], - mode="lines+markers", - name=type_lieu, - ) - ) +# Function to retrieve the color associated with a given environment type +def couleur_milieu(type): + return couleur.get(type, 'white') # Returns 'white' if the type is not found - # Update plot layout - fig.update_layout( - title=f"Densité des déchets par type de lieu", - xaxis_title="Année", - yaxis_title="Densité L/m2", - legend_title="Type de lieu", - ) - - st.plotly_chart(fig) - - -def line_chart_milieu(data_zds: pd.DataFrame, multi_filter_dict: dict): - # Get the selected region and milieu from the filter dictionary - selected_regions = multi_filter_dict.get("REGION", []) - selected_milieu = multi_filter_dict.get("TYPE_MILIEU", []) - - # Ensure that at least one region is selected - if not selected_regions: - st.error("Aucune région sélectionnée. Veuillez préciser une région.") - return - - # Filter data for the selected region - data_selected_region = data_zds[data_zds["LIEU_REGION"].isin(selected_regions)] - if data_selected_region.empty: - st.warning( - f"Aucune donnée disponible pour la région sélectionnée : {selected_regions}" - ) - return - - # Further filter data for the selected milieus - data_selected_milieu = ( - data_selected_region[data_selected_region["TYPE_MILIEU"].isin(selected_milieu)] - if selected_milieu - else data_selected_region - ) - - # Check if there is any data left after filtering by milieu - if data_selected_milieu.empty: - st.warning("Aucune donnée disponible pour le milieu sélectionné.") - return +# Function to plot a density map +def plot_density_map( + data_zds: pd.DataFrame, + region_geojson_path: str, +) -> folium.Map: - # Calculate waste volume sum for each 'MILIEU' by 'ANNEE' - volume_total_annee = ( - data_selected_milieu.groupby(["TYPE_MILIEU", "ANNEE"])["VOLUME_TOTAL"] - .sum() + # Read geographic data from a GeoJSON file + gdf = gpd.read_file(region_geojson_path) + + # Calculate density + data_zds['DENSITE'] = data_zds['VOLUME_TOTAL']/data_zds['SURFACE'] + data_zds = data_zds[data_zds['DENSITE'] < 20] # Remove rows with anomalously high density values + + # Round density values for display + data_zds['DENSITE'] = data_zds['DENSITE'].round(4) + # Round surface values for display + data_zds['SURFACE_ROND'] = data_zds['SURFACE'].round(2) + + # Initialize a map centered at the mean coordinates of locations + m = folium.Map(location=[data_zds['LIEU_COORD_GPS_Y'].mean(), data_zds['LIEU_COORD_GPS_X'].mean()]) + + # Loop over each row in the DataFrame to place markers + for index, row in data_zds.iterrows(): + popup_html = f""" +
+

Densité: {row['DENSITE']} L/m²

+

Volume total : {row['VOLUME_TOTAL']} litres

+

Surface total : {row['SURFACE_ROND']} m²

+

Type de milieu : {row['TYPE_MILIEU']}

+

Type de lieu : {row['TYPE_LIEU']}

+
+ """ + lgd_txt = '{txt}' + color = couleur_milieu(row['TYPE_MILIEU']) + folium.CircleMarker( + fg = folium.FeatureGroup(name= lgd_txt.format( txt= ['TYPE_MILIEU'], col= color)), + location=[row['LIEU_COORD_GPS_Y'], row['LIEU_COORD_GPS_X']], + radius=np.log(row['DENSITE'] + 1)*15, + popup=folium.Popup(popup_html, max_width=300), + color=color, + fill=True, + + ).add_to(m) + + folium_static(m) + +# Function for 'milieu' density table + +def density_table(data_zds: pd.DataFrame): + + # Calculate density + data_zds['DENSITE'] = data_zds['VOLUME_TOTAL'] / data_zds['SURFACE'] + # Remove rows with anomalously high density values + data_zds = data_zds[data_zds['DENSITE'] < 20] + + # Group by 'TYPE_MILIEU', calculate mean density, sort, and round the density + table_milieu = ( + data_zds.groupby('TYPE_MILIEU')['DENSITE'] + .mean() .reset_index() + .sort_values(by='DENSITE', ascending=False) ) - - # Remove duplicate data and calculate SURFACE total - data_unique = data_selected_milieu.drop_duplicates(subset=["LIEU_COORD_GPS"]) - surface_total_annee = ( - data_unique.groupby(["TYPE_MILIEU", "ANNEE"])["SURFACE"].sum().reset_index() - ) - - # Merge volume and surface data for 'MILIEU', calculate density, and sort - data_milieu = pd.merge( - volume_total_annee, surface_total_annee, on=["TYPE_MILIEU", "ANNEE"] - ) - if data_milieu.empty: - st.warning( - "Aucune donnée superposée pour les calculs de volume et de surface pour les milieux sélectionnés." - ) - return - - data_milieu["DENSITE_MILIEU"] = ( - data_milieu["VOLUME_TOTAL"] / data_milieu["SURFACE"] - ).round(5) - data_milieu_sorted = data_milieu.sort_values(by="ANNEE", ascending=False) - - # Create the plot - fig = go.Figure() - for type_milieu in data_milieu_sorted["TYPE_MILIEU"].unique(): - df_plot = data_milieu_sorted[data_milieu_sorted["TYPE_MILIEU"] == type_milieu] - fig.add_trace( - go.Scatter( - x=df_plot["ANNEE"], - y=df_plot["DENSITE_MILIEU"], - mode="lines+markers", - name=type_milieu, - ) - ) - - # Update plot layout - fig.update_layout( - title=f"Densité des déchets par type de milieu", - xaxis_title="Année", - yaxis_title="Densité L/m2", - legend_title="Type de milieu", - ) - - st.plotly_chart(fig) + table_milieu['DENSITE'] = table_milieu['DENSITE'].round(4) + + st.dataframe(table_milieu, + column_order=("TYPE_MILIEU", "DENSITE"), + hide_index=True, + width=800, + column_config={ + "TYPE_MILIEU": st.column_config.TextColumn( + "Milieu", + ), + "DENSITE": st.column_config.NumberColumn( + "Densité (L/m²)", + format="%f", + min_value=0, + max_value=max(table_milieu['DENSITE']), + )} + ) ################################ @@ -711,10 +462,6 @@ def plot_adopted_waste_spots( crs="EPSG:4326", ) - # Convert ANNEE values to integers - if "ANNEE" in single_filter_dict: - single_filter_dict["ANNEE"] = int(single_filter_dict["ANNEE"]) - # Construct the query string query_string = construct_query_string(**single_filter_dict) @@ -785,69 +532,43 @@ def plot_adopted_waste_spots( # Dashboard Main Panel # ######################## -tab1, tab2, tab3, tab4 = st.tabs( +tab1, tab2 = st.tabs( [ "Densité des déchets dans zone étudié", - "Évolution de la densité au fil du temps", - "Spots Adoptés", - "Aperçu à travers la France", + "Spots Adoptés" ] ) with tab1: - # Select only the filters for 'REGION' and 'ANNEE' - selected_filters_1 = [ - f for f in DENSITY_FILTERS_PARAMS if f["filter_col"] in ["REGION", "ANNEE"] - ] + # Define placeholder widgets for displaying the information + indicator_col1 = st.container() + indicator_col2 = st.container() + indicator_col3 = st.container() - # Use the selected filters for multi-select - multi_filter_dict_1 = scalable_filters_multi_select( - data_zds, selected_filters_1, tab1 - ) + # Create side-by-side containers for indicators + indicator_col1, indicator_col2, indicator_col3 = st.columns(3) - col = st.columns((4, 4, 2), gap="medium") + # Call the function with the data and UI elements + calculate_and_display_metrics(data_zds, indicator_col1, indicator_col2, indicator_col3) - # Construct the map - with col[0]: - density_lieu(data_zds, multi_filter_dict_1) + st.markdown("---") - with col[1]: - density_milieu(data_zds, multi_filter_dict_1) + left_column, right_column = st.columns([2, 1]) - with col[2]: - with st.expander("Notice ℹ️", expanded=True): - st.write( - """ - Explication des diffférences entre Lieu et Milieu - """ - ) + with left_column: + st.markdown("### Carte des Densités") + plot_density_map(data_zds, REGION_GEOJSON_PATH) -with tab2: - # Select only the filters for 'REGION' and 'ANNEE' - selected_filters_2 = [ - f - for f in DENSITY_FILTERS_PARAMS - if f["filter_col"] in ["REGION", "TYPE_LIEU2", "TYPE_MILIEU"] - ] + with right_column: + st.markdown("### Tableau des Densités par Milieu") + density_table(data_zds) - # Use the selected filters for multi-select - multi_filter_dict_2 = scalable_filters_multi_select( - data_zds, selected_filters_2, tab2 - ) - - col = st.columns((7, 7), gap="medium") - - with col[0]: - line_chart_lieu(data_zds, multi_filter_dict_2) - with col[1]: - line_chart_milieu(data_zds, multi_filter_dict_2) - -with tab3: +with tab2: # Use the selected filters single_filter_dict_3 = scalable_filters_single_select( - data_zds, ADOPTED_SPOTS_FILTERS_PARAMS, tab3 + data_zds, ADOPTED_SPOTS_FILTERS_PARAMS, tab2 ) st.markdown("### Spots Adoptés") @@ -855,8 +576,3 @@ def plot_adopted_waste_spots( # Show the adopted spots map on the streamlit tab if m: folium_static(m) - -with tab4: - st.markdown("### Densité des déchets en France") - choropleth = make_density_choropleth(data_zds, REGION_GEOJSON_PATH) - st.plotly_chart(choropleth, use_container_width=True) From 733034891790dda9057e669e0acc8a4ea4cc43fd Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Tue, 23 Apr 2024 11:33:07 +0200 Subject: [PATCH 069/147] correction message erreur si pas de selection de territoire (Home>Acceuil) et bouton de selection rendu non clickable si pas de selection pour eviter bug --- dashboards/app/home.py | 6 +++--- dashboards/app/pages/data.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 00c5d07..1b0ee9f 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -49,10 +49,10 @@ def load_css(file_name: str) -> None: show_pages( [ Page("home.py", "Accueil", "🏠"), + Page("pages/structures.py", "Structures", "🔭"), Page("pages/actions.py", "Actions", "👊"), Page("pages/data.py", "Data", "🔍"), Page("pages/hotspots.py", "Hotspots", "🔥"), - Page("pages/structures.py", "Structures", "🔭"), ], ) @@ -134,8 +134,8 @@ def load_df_nb_dechet() -> pd.DataFrame: liste_collectivites, index=index_collec, ) - - if st.button("Enregistrer la sélection"): + button_disabled = not select_niveauadmin or not select_collectivite + if st.button("Enregistrer la sélection", disabled=button_disabled): # Enregistrer les valeurs sélectionnées dans le session.state st.session_state["niveau_admin"] = select_niveauadmin st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 6117c47..97ba5f3 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -47,7 +47,7 @@ def load_df_dict_corr_dechet_materiau(): st.write( """ ### :warning: Merci de sélectionner une collectivité\ - dans l'onglet Home pour afficher les données. :warning: + dans l'onglet Accueil pour afficher les données. :warning: """ ) st.stop() From eccef413042be2711e19065aad3ceb6908d426ef Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Tue, 23 Apr 2024 12:30:22 +0200 Subject: [PATCH 070/147] =?UTF-8?q?reglage=20du=20zoom=20onglet=20top=20de?= =?UTF-8?q?chet=20en=20fonction=20du=20niveau=20de=20collectivit=C3=A9=20e?= =?UTF-8?q?t=20modification=20de=20la=20page=20d'acceuil=20pour=20n'affich?= =?UTF-8?q?er=20le=20menu=20qu'apres=20selection=20de=20collectivit=C3=A9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/home.py | 13 +++++++++---- dashboards/app/pages/data.py | 15 +++++++++++++-- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 1b0ee9f..c7f84e6 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -49,10 +49,6 @@ def load_css(file_name: str) -> None: show_pages( [ Page("home.py", "Accueil", "🏠"), - Page("pages/structures.py", "Structures", "🔭"), - Page("pages/actions.py", "Actions", "👊"), - Page("pages/data.py", "Data", "🔍"), - Page("pages/hotspots.py", "Hotspots", "🔥"), ], ) @@ -149,6 +145,15 @@ def load_df_nb_dechet() -> pd.DataFrame: # Afficher la collectivité sélectionnée st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") + show_pages( + [ + Page("home.py", "Accueil", "🏠"), + Page("pages/structures.py", "Structures", "🔭"), + Page("pages/actions.py", "Actions", "👊"), + Page("pages/data.py", "Data", "🔍"), + Page("pages/hotspots.py", "Hotspots", "🔥"), + ], + ) # Filtrer et enregistrer le DataFrame dans un session state pour la suite colonne_filtre = niveaux_admin_dict[select_niveauadmin] diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 97ba5f3..89d0df2 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -15,6 +15,7 @@ # Récupérer les filtres géographiques s'ils ont été fixés filtre_niveau = st.session_state.get("niveau_admin", "") filtre_collectivite = st.session_state.get("collectivite", "") +# zoom_niveau = st.session_state.get("niveau_admin", "Région") # Titre de l'onglet st.markdown( @@ -642,6 +643,16 @@ def load_df_dict_corr_dechet_materiau(): ) # Création de la carte centrée autour d'une localisation + # Initialisation du zoom sur la carte + if filtre_niveau == "Commune": + zoom_admin = 12 + elif filtre_niveau == "EPCI": + zoom_admin = 13 + elif filtre_niveau == "Département": + zoom_admin = 11 + else: + zoom_admin = 8 + # Calcul des limites à partir de vos données min_lat = df_map_data["LIEU_COORD_GPS_Y"].min() max_lat = df_map_data["LIEU_COORD_GPS_Y"].max() @@ -650,7 +661,8 @@ def load_df_dict_corr_dechet_materiau(): map_data = folium.Map( location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], - zoom_start=8, + zoom_start=zoom_admin, + # zoom_start=8, tiles="OpenStreetMap", ) @@ -679,7 +691,6 @@ def load_df_dict_corr_dechet_materiau(): folium.Figure().add_child(map_data).render(), # , width=1400 height=750, ) - # Onglet 3 : Secteurs et marques with tab3: st.write("") From fe5c8c12ea9d77af031379b354c88a08787f3c83 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Tue, 23 Apr 2024 13:22:36 +0200 Subject: [PATCH 071/147] reglage du zoom carte et changement onglet home --- dashboards/app/home.py | 17 +++++++++++------ dashboards/app/pages/actions.py | 13 ++++++++++++- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 00c5d07..c7f84e6 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -49,10 +49,6 @@ def load_css(file_name: str) -> None: show_pages( [ Page("home.py", "Accueil", "🏠"), - Page("pages/actions.py", "Actions", "👊"), - Page("pages/data.py", "Data", "🔍"), - Page("pages/hotspots.py", "Hotspots", "🔥"), - Page("pages/structures.py", "Structures", "🔭"), ], ) @@ -134,8 +130,8 @@ def load_df_nb_dechet() -> pd.DataFrame: liste_collectivites, index=index_collec, ) - - if st.button("Enregistrer la sélection"): + button_disabled = not select_niveauadmin or not select_collectivite + if st.button("Enregistrer la sélection", disabled=button_disabled): # Enregistrer les valeurs sélectionnées dans le session.state st.session_state["niveau_admin"] = select_niveauadmin st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( @@ -149,6 +145,15 @@ def load_df_nb_dechet() -> pd.DataFrame: # Afficher la collectivité sélectionnée st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") + show_pages( + [ + Page("home.py", "Accueil", "🏠"), + Page("pages/structures.py", "Structures", "🔭"), + Page("pages/actions.py", "Actions", "👊"), + Page("pages/data.py", "Data", "🔍"), + Page("pages/hotspots.py", "Hotspots", "🔥"), + ], + ) # Filtrer et enregistrer le DataFrame dans un session state pour la suite colonne_filtre = niveaux_admin_dict[select_niveauadmin] diff --git a/dashboards/app/pages/actions.py b/dashboards/app/pages/actions.py index c38c02d..3940c61 100644 --- a/dashboards/app/pages/actions.py +++ b/dashboards/app/pages/actions.py @@ -168,6 +168,16 @@ def load_df_other(): cell3.metric("Nombre de structures", f"{nombre_structures}") # Ligne 2 : Carte + # Initialisation du zoom sur la carte + if filtre_niveau == "Commune": + zoom_admin = 12 + elif filtre_niveau == "EPCI": + zoom_admin = 13 + elif filtre_niveau == "Département": + zoom_admin = 10 + else: + zoom_admin = 8 + with st.container(): # Création du DataFrame de travail pour la carte df_map_evnenements = df_other_filtre.copy() @@ -180,7 +190,8 @@ def load_df_other(): map_evenements = folium.Map( location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], - zoom_start=8, + zoom_start=zoom_admin, + # zoom_start=8, tiles="OpenStreetMap", ) # Facteur de normalisation pour ajuster la taille des bulles From 2f346f57e86288680ad9c38a665a07d19f212840 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Tue, 23 Apr 2024 13:30:03 +0200 Subject: [PATCH 072/147] reglage du zoom departement --- dashboards/app/pages/data.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 89d0df2..78235b3 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -15,7 +15,6 @@ # Récupérer les filtres géographiques s'ils ont été fixés filtre_niveau = st.session_state.get("niveau_admin", "") filtre_collectivite = st.session_state.get("collectivite", "") -# zoom_niveau = st.session_state.get("niveau_admin", "Région") # Titre de l'onglet st.markdown( @@ -649,7 +648,7 @@ def load_df_dict_corr_dechet_materiau(): elif filtre_niveau == "EPCI": zoom_admin = 13 elif filtre_niveau == "Département": - zoom_admin = 11 + zoom_admin = 10 else: zoom_admin = 8 From 478cc57e9afa757725936bcc1d964190a3c45b8f Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Tue, 23 Apr 2024 14:40:07 +0200 Subject: [PATCH 073/147] =?UTF-8?q?ajout=20du=20graph=20responsabilit?= =?UTF-8?q?=C3=A9s=20dans=20top=20dechets?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 71 ++++++++++++++++++++++++++++++++---- 1 file changed, 64 insertions(+), 7 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 78235b3..65250e3 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -823,14 +823,23 @@ def load_df_dict_corr_dechet_materiau(): top_marque_df = top_marque_df.reset_index() top_marque_df.columns = ["Marque", "Nombre de déchets"] + # Data pour le plot responsabilitéz + rep_df = df_init[df_init["type_regroupement"].isin(["REP"])] + top_rep_df = ( + rep_df.groupby("categorie")["nb_dechet"].sum().sort_values(ascending=True) + ) + top_rep_df = top_rep_df.reset_index() + top_rep_df.columns = ["Responsabilité élargie producteur", "Nombre de déchets"] + # Chiffres clés nb_dechet_secteur = secteur_df["nb_dechet"].sum() nb_secteurs = len(top_secteur_df["Secteur"].unique()) - nb_dechet_marque = marque_df["nb_dechet"].sum() nb_marques = len(top_marque_df["Marque"].unique()) collectes = len(df_filtered) - + nb_dechet_rep = rep_df["nb_dechet"].sum() + nb_rep = len(top_rep_df["Responsabilité élargie producteur"].unique()) + # Metriques et graphs secteurs # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2, l1_col3 = st.columns(3) @@ -929,19 +938,20 @@ def load_df_dict_corr_dechet_materiau(): with st.container(border=True): st.plotly_chart(fig_secteur, use_container_width=True) - l1_col1, l1_col2 = st.columns(2) - cell1 = l1_col1.container(border=True) + # Metriques et graphes marques + l2_col1, l2_col2 = st.columns(2) + cell4 = l2_col1.container(border=True) # Trick pour séparer les milliers nb_dechet_marque = f"{nb_dechet_marque:,.0f}".replace(",", " ") - cell1.metric( + cell4.metric( "Nombre de déchets catégorisés par marque", f"{nb_dechet_marque} dechets" ) # 2ème métrique : poids - cell2 = l1_col2.container(border=True) + cell5 = l2_col2.container(border=True) nb_marques = f"{nb_marques:,.0f}".replace(",", " ") - cell2.metric( + cell5.metric( "Nombre de marques identifiés lors des collectes", f"{nb_marques} marques", ) @@ -972,5 +982,52 @@ def load_df_dict_corr_dechet_materiau(): with st.container(border=True): st.plotly_chart(fig_marque, use_container_width=True) + + # Metriques et graphes Responsabilité elargie producteurs + l3_col1, l3_col2 = st.columns(2) + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + # 1ère métrique : volume total de déchets collectés + cell6 = l3_col1.container(border=True) + # Trick pour séparer les milliers + nb_dechet_rep = f"{nb_dechet_rep:,.0f}".replace(",", " ") + cell6.metric( + "Nombre de déchets catégorisés par responsabilités", + f"{nb_dechet_rep} dechets", + ) + + # 2ème métrique : poids + cell7 = l3_col2.container(border=True) + nb_rep = f"{nb_rep:,.0f}".replace(",", " ") + cell7.metric( + "Nombre de responsabilités identifiés lors des collectes", + f"{nb_rep} Responsabilités", + ) + + fig_rep = px.bar( + top_rep_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), + x="Nombre de déchets", + y="Responsabilité élargie producteur", + title="Top 10 des Responsabilités élargies producteurs relatives aux dechets les plus ramassés", + color_discrete_sequence=["#1951A0"], + orientation="h", + text_auto=False, + text=top_rep_df.tail(10)["Responsabilité élargie producteur"] + + ": " + + top_rep_df.tail(10)["Nombre de déchets"].astype(str), + ) + # add log scale to x axis + fig_rep.update_layout(xaxis_type="log") + # fig_rep.update_traces(texttemplate="%{value:.0f}", textposition="inside") + + fig_rep.update_layout( + width=800, + height=500, + uniformtext_minsize=8, + uniformtext_mode="hide", + yaxis_title=None, + ) + + with st.container(border=True): + st.plotly_chart(fig_rep, use_container_width=True) else: st.markdown("## 🚨 Veuillez vous connecter pour accéder à l'onglet 🚨") From c90cd01945d1f4a466fca895283a876dd40926eb Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Tue, 23 Apr 2024 14:43:54 +0200 Subject: [PATCH 074/147] modifications mineures annotation code --- dashboards/app/pages/data.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 65250e3..8d5b20f 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -942,13 +942,14 @@ def load_df_dict_corr_dechet_materiau(): l2_col1, l2_col2 = st.columns(2) cell4 = l2_col1.container(border=True) + # 1er métrique : nombre de dechets categorises par marques # Trick pour séparer les milliers nb_dechet_marque = f"{nb_dechet_marque:,.0f}".replace(",", " ") cell4.metric( "Nombre de déchets catégorisés par marque", f"{nb_dechet_marque} dechets" ) - # 2ème métrique : poids + # 2ème métrique : nombre de marques identifiées lors des collectes cell5 = l2_col2.container(border=True) nb_marques = f"{nb_marques:,.0f}".replace(",", " ") cell5.metric( @@ -986,7 +987,7 @@ def load_df_dict_corr_dechet_materiau(): # Metriques et graphes Responsabilité elargie producteurs l3_col1, l3_col2 = st.columns(2) # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - # 1ère métrique : volume total de déchets collectés + # 1ère métrique : nombre de dechets catégorisés repartis par responsabilités cell6 = l3_col1.container(border=True) # Trick pour séparer les milliers nb_dechet_rep = f"{nb_dechet_rep:,.0f}".replace(",", " ") @@ -995,7 +996,7 @@ def load_df_dict_corr_dechet_materiau(): f"{nb_dechet_rep} dechets", ) - # 2ème métrique : poids + # 2ème métrique : nombre de responsabilités cell7 = l3_col2.container(border=True) nb_rep = f"{nb_rep:,.0f}".replace(",", " ") cell7.metric( From c29137ab77a0d90b04b8801d28c6c10e6d4c168e Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Tue, 23 Apr 2024 15:08:19 +0200 Subject: [PATCH 075/147] =?UTF-8?q?suppression=20dechets=20de=20secteurs?= =?UTF-8?q?=20vide=20et=20indetermin=C3=A9s=20dans=20le=20top=2010?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 8d5b20f..0da4279 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -63,7 +63,7 @@ def load_df_dict_corr_dechet_materiau(): [ "Matériaux :wood:", "Top Déchets :wastebasket:", - "Secteurs et marques :womans_clothes:", + "Secteurs, marques et responsabilités élargies producteurs :womans_clothes:", ] ) @@ -839,9 +839,24 @@ def load_df_dict_corr_dechet_materiau(): collectes = len(df_filtered) nb_dechet_rep = rep_df["nb_dechet"].sum() nb_rep = len(top_rep_df["Responsabilité élargie producteur"].unique()) + # Metriques et graphs secteurs - # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page + # Retrait des categoriés "VIDE" et "INDERTERMINE" si présentes et recupération des valeurs + nb_vide_indetermine = 0 + if "VIDE" in top_secteur_df["Secteur"].unique(): + df_vide_indetermine = top_secteur_df[top_secteur_df["Secteur"] == "VIDE"] + nb_vide_indetermine = df_vide_indetermine["Nombre de déchets"].sum() + top_secteur_df = top_secteur_df[top_secteur_df["Secteur"] != "VIDE"] + elif "INDERTERMINE" in top_secteur_df["Secteur"].unique(): + df_vide_indetermine = top_secteur_df[ + top_secteur_df["Secteur"] == "INDERTERMINE" + ] + nb_vide_indetermine += df_vide_indetermine["Nombre de déchets"].sum() + top_secteur_df = top_secteur_df[top_secteur_df["Secteur"] != "INDERTERMINE"] + else: + pass + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2, l1_col3 = st.columns(3) # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) # 1ère métrique : volume total de déchets collectés @@ -938,6 +953,13 @@ def load_df_dict_corr_dechet_materiau(): with st.container(border=True): st.plotly_chart(fig_secteur, use_container_width=True) + if nb_vide_indetermine != 0: + st.warning( + "⚠️ Il y a " + + str(nb_vide_indetermine) + + " dechets dont le secteur n'a pas été determiné dans le top 10 des secteurs" + ) + # Metriques et graphes marques l2_col1, l2_col2 = st.columns(2) cell4 = l2_col1.container(border=True) From 13f5a7055a4ab1d848c21d9f21c42b3ef2b5f840 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Tue, 23 Apr 2024 15:26:02 +0200 Subject: [PATCH 076/147] suppression labels abcisses barplots top 10 --- dashboards/app/pages/data.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 0da4279..96749cb 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -957,7 +957,7 @@ def load_df_dict_corr_dechet_materiau(): st.warning( "⚠️ Il y a " + str(nb_vide_indetermine) - + " dechets dont le secteur n'a pas été determiné dans le top 10 des secteurs" + + " dechets dont le secteur n'a pas été determiné dans la totalité des dechets du top 10 des secteurs" ) # Metriques et graphes marques @@ -993,6 +993,7 @@ def load_df_dict_corr_dechet_materiau(): ) # add log scale to x axis fig_marque.update_layout(xaxis_type="log") + fig_marque.update_yaxes(showticklabels=False) # fig_marque.update_traces(texttemplate="%{value:.0f}", textposition="inside") fig_marque.update_layout( @@ -1040,6 +1041,8 @@ def load_df_dict_corr_dechet_materiau(): ) # add log scale to x axis fig_rep.update_layout(xaxis_type="log") + # Masquer les labels de l'axe des ordonnées + fig_rep.update_yaxes(showticklabels=False) # fig_rep.update_traces(texttemplate="%{value:.0f}", textposition="inside") fig_rep.update_layout( From eeac0dd610a701dae96c5ff6dbe5cc34deb5f6bb Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Tue, 23 Apr 2024 15:45:06 +0200 Subject: [PATCH 077/147] formattage PEP8 data.py --- dashboards/app/pages/data.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 96749cb..05e20f6 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -46,9 +46,9 @@ def load_df_dict_corr_dechet_materiau(): ): st.write( """ - ### :warning: Merci de sélectionner une collectivité\ - dans l'onglet Accueil pour afficher les données. :warning: - """ + ### :warning: Merci de sélectionner une collectivité\ + dans l'onglet Accueil pour afficher les données. :warning: + """ ) st.stop() else: @@ -624,8 +624,8 @@ def load_df_dict_corr_dechet_materiau(): st.write("") st.caption( f"Note : Analyse basée sur les collectes qui ont fait l'objet d'un comptage détaillé par déchet,\ - soit {volume_total_categorise} litres équivalent à {pct_volume_categorise:.0%} du volume collecté\ - sur le territoire." + soit {volume_total_categorise} litres équivalent à {pct_volume_categorise:.0%} du volume collecté\ + sur le territoire." ) with st.container(): # Ajout de la selectbox @@ -661,7 +661,7 @@ def load_df_dict_corr_dechet_materiau(): map_data = folium.Map( location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], zoom_start=zoom_admin, - # zoom_start=8, + # zoom_start=8, tiles="OpenStreetMap", ) @@ -823,7 +823,7 @@ def load_df_dict_corr_dechet_materiau(): top_marque_df = top_marque_df.reset_index() top_marque_df.columns = ["Marque", "Nombre de déchets"] - # Data pour le plot responsabilitéz + # Data pour le plot responsabilités rep_df = df_init[df_init["type_regroupement"].isin(["REP"])] top_rep_df = ( rep_df.groupby("categorie")["nb_dechet"].sum().sort_values(ascending=True) @@ -994,7 +994,7 @@ def load_df_dict_corr_dechet_materiau(): # add log scale to x axis fig_marque.update_layout(xaxis_type="log") fig_marque.update_yaxes(showticklabels=False) - # fig_marque.update_traces(texttemplate="%{value:.0f}", textposition="inside") + # fig_marque.update_traces(texttemplate="%{value:.0f}", textposition="inside") fig_marque.update_layout( width=800, @@ -1043,7 +1043,7 @@ def load_df_dict_corr_dechet_materiau(): fig_rep.update_layout(xaxis_type="log") # Masquer les labels de l'axe des ordonnées fig_rep.update_yaxes(showticklabels=False) - # fig_rep.update_traces(texttemplate="%{value:.0f}", textposition="inside") + # fig_rep.update_traces(texttemplate="%{value:.0f}", textposition="inside") fig_rep.update_layout( width=800, From bbd91b1692038bb4b8fa90264ccffcc76802d5c0 Mon Sep 17 00:00:00 2001 From: DridrM Date: Tue, 23 Apr 2024 16:12:13 +0200 Subject: [PATCH 078/147] construct_admin_lvl_boundaries function finished --- dashboards/app/pages/hotspots.py | 93 ++++++++++++++++++++++---------- 1 file changed, 66 insertions(+), 27 deletions(-) diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index 7908627..e5bcd84 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -33,6 +33,17 @@ "Commune": "COMMUNE_CODE_NOM", } +# This is a copy of the previous dict, with just the "EPCI" value modified with the +# name of the "COMMUNE_CODE_NOM" column in the data_zds df, in order to trigger the display +# of the "EPCI" level boundaries without an "EPCI" geojson map, knowing that one EPCI is +# made of one or multiple "communes" +NIVEAUX_ADMIN_DICT_ALTERED = { + "Région": "REGION", + "Département": "DEP_CODE_NOM", + "EPCI": "COMMUNE_CODE_NOM", + "Commune": "COMMUNE_CODE_NOM", +} + # The name of the "niveau_admin" fetch from the session state NIVEAU_ADMIN = st.session_state["niveau_admin"] @@ -56,13 +67,20 @@ "sation/data/data_zds_enriched.csv" ) -# Data path for the France regions geojson -REGION_GEOJSON_PATH = ( +# Root data path for the France administrative levels geojson +NIVEAUX_ADMIN_GEOJSON_ROOT_PATH = ( "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" - "exploration-des-donn%C3%A9es/Exploration_visualisation/regions" - "-avec-outre-mer.geojson" + "exploration-des-donn%C3%A9es/Exploration_visualisation/data/" ) +# Dict containing the path of the administrative levels geojson referenced by the names of these adminitrative levels +NIVEAUX_ADMIN_GEOJSON_PATH_DICT = { + "Région": f"{NIVEAUX_ADMIN_GEOJSON_ROOT_PATH}regions-avec-outre-mer.geojson", + "Département": f"{NIVEAUX_ADMIN_GEOJSON_ROOT_PATH}departements-avec-outre-mer.geojson", + "EPCI": f"{NIVEAUX_ADMIN_GEOJSON_ROOT_PATH}communes-avec-outre-mer.geojson", + "Commune": f"{NIVEAUX_ADMIN_GEOJSON_ROOT_PATH}communes-avec-outre-mer.geojson", +} + # Data path for Correction CORRECTION = ( "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/" @@ -255,28 +273,55 @@ def scalable_filters_multi_select( return filter_dict +def construct_admin_lvl_filter_list( + data_zds: pd.DataFrame, + admin_lvl: str, + admin_lvl_dict_altered=NIVEAUX_ADMIN_DICT_ALTERED, +) -> list: + """Create a list of names for a given admin level. This function was created + in order to trigger the display of the 'EPCI' level boundaries without an + 'EPCI' geojson map, knowing that one EPCI is made of one or multiple 'communes' + Arguments: + - data_zds: the dataframe containing waste data and administrative levels columns + - admin_lvl: the common name of the target administrative level + Params: + - admin_lvl_dict_altered: a dict mapping admin levels common names and the names + of the columns corresponding in the data_zds df""" + + # Unpack the column name of the admin level + admin_lvl_col_name = admin_lvl_dict_altered[f"{admin_lvl}"] + + # Return the list of uniques administrative names corresponding to the selection made in the home tab + return data_zds[f"{admin_lvl_col_name}"].str.lower().unique().to_list() + + def construct_admin_lvl_boundaries( - admin_lvl: str, single_filter_dict: dict, admin_lvl_geojson_path_dict: dict + data_zds: pd.DataFrame, admin_lvl: str, admin_lvl_geojson_path_dict: dict ) -> any: - """""" + """Return a filtered geodataframe with shapes of a target administrative level. + Arguments: + - data_zds: the dataframe containing waste data and administrative levels columns + - admin_lvl: the common name of the target administrative level + - admin_lvl_geojson_path_dict: a dict mapping administrative levels common + names and the paths of the geojson administrative levels shapes""" # Unpack the admin level geojson path admin_lvl_geojson_path = admin_lvl_geojson_path_dict[f"{admin_lvl}"] # Unpack the region name - admin_lvl_name = single_filter_dict[f"{admin_lvl}"] + admin_lvl_names = construct_admin_lvl_filter_list(data_zds, admin_lvl) # Load France regions from a GeoJSON file admin_lvl_shapes = gpd.read_file(admin_lvl_geojson_path) # Filter the region geodataframe for the specified region - selected_admin_lvl = admin_lvl_shapes[ - admin_lvl_shapes["nom"].str.lower() == admin_lvl_name.lower() + selected_admin_lvl_shapes = admin_lvl_shapes[ + admin_lvl_shapes["nom"].str.lower().isin(admin_lvl_names) ] - if selected_admin_lvl.empty: - raise KeyError(f"Administrative level '{admin_lvl_name}' not found.") + if selected_admin_lvl_shapes.empty: + raise KeyError - return selected_admin_lvl + return selected_admin_lvl_shapes ################## @@ -284,7 +329,7 @@ def construct_admin_lvl_boundaries( ################## # Load all regions from the GeoJSON file -regions = gpd.read_file(REGION_GEOJSON_PATH) +# regions = gpd.read_file(REGION_GEOJSON_PATH) # Unused, keep as archive # nb dechets : Unused for now # df_nb_dechets = pd.read_csv(NB_DECHETS_PATH) @@ -692,7 +737,6 @@ def line_chart_milieu(data_zds: pd.DataFrame, multi_filter_dict: dict): def plot_adopted_waste_spots( data_zds: pd.DataFrame, single_filter_dict: dict, - region_geojson_path: str, ) -> folium.Map: """Show a folium innteractive map of adopted spots within a selected region, filtered by environments of deposit. @@ -722,16 +766,9 @@ def plot_adopted_waste_spots( gdf_filtered = gdf.query(query_string) # 2/ Create the regions geodataframe # - # Unpack the region name - region = single_filter_dict["REGION"] - - # Load France regions from a GeoJSON file - regions = gpd.read_file(region_geojson_path) - - # Filter the region geodataframe for the specified region - selected_region = regions[regions["nom"].str.lower() == region.lower()] - if selected_region.empty: - raise KeyError(f"Region '{region}' not found.") + selected_admin_lvl = construct_admin_lvl_boundaries( + data_zds, NIVEAU_ADMIN, NIVEAUX_ADMIN_GEOJSON_PATH_DICT + ) # 3/ Initialize folium map # # Initialize a folium map, centered around the mean location of the waste points @@ -770,7 +807,7 @@ def plot_adopted_waste_spots( # 5/ Add the region boundary # # Add the region boundary to the map for context folium.GeoJson( - selected_region, + selected_admin_lvl, name="Region Boundary", style_function=lambda feature: { "weight": 2, @@ -851,12 +888,14 @@ def plot_adopted_waste_spots( ) st.markdown("### Spots Adoptés") - m = plot_adopted_waste_spots(data_zds, single_filter_dict_3, REGION_GEOJSON_PATH) + m = plot_adopted_waste_spots(data_zds, single_filter_dict_3) # Show the adopted spots map on the streamlit tab if m: folium_static(m) with tab4: st.markdown("### Densité des déchets en France") - choropleth = make_density_choropleth(data_zds, REGION_GEOJSON_PATH) + choropleth = make_density_choropleth( + data_zds, NIVEAUX_ADMIN_GEOJSON_PATH_DICT["Région"] + ) st.plotly_chart(choropleth, use_container_width=True) From c01dc2fab65b799af61f188c959250e3f6129beb Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Tue, 23 Apr 2024 18:02:45 +0200 Subject: [PATCH 079/147] modifications mineures (titres et ajout texte explicatif sous onglet top dechets) --- dashboards/app/pages/data.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 05e20f6..d56c5f2 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -847,12 +847,12 @@ def load_df_dict_corr_dechet_materiau(): df_vide_indetermine = top_secteur_df[top_secteur_df["Secteur"] == "VIDE"] nb_vide_indetermine = df_vide_indetermine["Nombre de déchets"].sum() top_secteur_df = top_secteur_df[top_secteur_df["Secteur"] != "VIDE"] - elif "INDERTERMINE" in top_secteur_df["Secteur"].unique(): + elif "INDERTERMINÉ" in top_secteur_df["Secteur"].unique(): df_vide_indetermine = top_secteur_df[ - top_secteur_df["Secteur"] == "INDERTERMINE" + top_secteur_df["Secteur"] == "INDERTERMINÉ" ] nb_vide_indetermine += df_vide_indetermine["Nombre de déchets"].sum() - top_secteur_df = top_secteur_df[top_secteur_df["Secteur"] != "INDERTERMINE"] + top_secteur_df = top_secteur_df[top_secteur_df["Secteur"] != "INDERTERMINÉ"] else: pass @@ -935,7 +935,7 @@ def load_df_dict_corr_dechet_materiau(): x="Nombre de déchets", y="Secteur", color="Secteur", - title="Top 10 des secteurs les plus ramassés", + title="Top 10 des secteurs économiques qui ont pu être déterminés parmis les dechets comptés", orientation="h", color_discrete_map=colors_map_secteur, text_auto=True, @@ -1007,6 +1007,16 @@ def load_df_dict_corr_dechet_materiau(): with st.container(border=True): st.plotly_chart(fig_marque, use_container_width=True) + with st.container(border=True): + st.write( + "La Responsabilité Élargie du Producteur (REP) est une obligation qui impose aux entreprises de payer une contribution financière" + + " pour la prise en charge de la gestion des déchets issus des produits qu’ils mettent sur le marché selon le principe pollueur-payeur." + + " Pour ce faire, elles doivent contribuer financièrement à la collecte, du tri et au recyclage de ces produits, " + + "généralement à travers les éco-organismes privés, agréés par l’Etat, comme CITEO pour les emballages. " + + "L’État a depuis 1993 progressivement mis en place 25 filières REP, qui regroupent de grandes familles de produits " + + "(emballages ménagers, tabac, textile, ameublement, …)." + ) + # Metriques et graphes Responsabilité elargie producteurs l3_col1, l3_col2 = st.columns(2) # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) @@ -1015,7 +1025,7 @@ def load_df_dict_corr_dechet_materiau(): # Trick pour séparer les milliers nb_dechet_rep = f"{nb_dechet_rep:,.0f}".replace(",", " ") cell6.metric( - "Nombre de déchets catégorisés par responsabilités", + "Nombre de déchets catégorisés par filière REP", f"{nb_dechet_rep} dechets", ) @@ -1023,8 +1033,8 @@ def load_df_dict_corr_dechet_materiau(): cell7 = l3_col2.container(border=True) nb_rep = f"{nb_rep:,.0f}".replace(",", " ") cell7.metric( - "Nombre de responsabilités identifiés lors des collectes", - f"{nb_rep} Responsabilités", + "Nombre de filières REP identifiées lors des collectes", + f"{nb_rep} REP", ) fig_rep = px.bar( From 27def21b040d045756340b7e81f32151e3dcfdb4 Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Tue, 23 Apr 2024 18:29:49 +0200 Subject: [PATCH 080/147] =?UTF-8?q?modifications=20mineures=20(ajout=20lab?= =?UTF-8?q?els=20ordonn=C3=A9e=20top10=20marques=20et=20changement=20metho?= =?UTF-8?q?de=20de=20retrait=20secteurs=20vides=20et=20indetermin=C3=A9s)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index d56c5f2..75dc3e8 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -846,16 +846,17 @@ def load_df_dict_corr_dechet_materiau(): if "VIDE" in top_secteur_df["Secteur"].unique(): df_vide_indetermine = top_secteur_df[top_secteur_df["Secteur"] == "VIDE"] nb_vide_indetermine = df_vide_indetermine["Nombre de déchets"].sum() - top_secteur_df = top_secteur_df[top_secteur_df["Secteur"] != "VIDE"] - elif "INDERTERMINÉ" in top_secteur_df["Secteur"].unique(): + elif "INDÉTERMINÉ" in secteur_df["Secteur"].unique(): df_vide_indetermine = top_secteur_df[ - top_secteur_df["Secteur"] == "INDERTERMINÉ" + top_secteur_df["Secteur"] == "INDÉTERMINÉ" ] nb_vide_indetermine += df_vide_indetermine["Nombre de déchets"].sum() - top_secteur_df = top_secteur_df[top_secteur_df["Secteur"] != "INDERTERMINÉ"] else: pass + top_secteur_df = top_secteur_df[top_secteur_df["Secteur"] != "INDÉTERMINÉ"] + top_secteur_df = top_secteur_df[top_secteur_df["Secteur"] != "VIDE"] + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2, l1_col3 = st.columns(3) # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) @@ -957,7 +958,7 @@ def load_df_dict_corr_dechet_materiau(): st.warning( "⚠️ Il y a " + str(nb_vide_indetermine) - + " dechets dont le secteur n'a pas été determiné dans la totalité des dechets du top 10 des secteurs" + + " dechets dont le secteur n'a pas été determiné dans la totalité des dechets" ) # Metriques et graphes marques @@ -993,7 +994,6 @@ def load_df_dict_corr_dechet_materiau(): ) # add log scale to x axis fig_marque.update_layout(xaxis_type="log") - fig_marque.update_yaxes(showticklabels=False) # fig_marque.update_traces(texttemplate="%{value:.0f}", textposition="inside") fig_marque.update_layout( From f685c0c273b14bc4d717c85e9458414e4bdd5dc4 Mon Sep 17 00:00:00 2001 From: Mendi33 Date: Wed, 24 Apr 2024 08:40:51 +0000 Subject: [PATCH 081/147] =?UTF-8?q?Push=20pour=20MAJ=20avec=20Val=C3=A9rie?= =?UTF-8?q?.=20Nouvelles=20dispositions=20des=20evenements=20=C3=A0=20veni?= =?UTF-8?q?r.=20Nouveaux=20graphiques=20sur=20les=20ramassages=20r=C3=A9al?= =?UTF-8?q?is=C3=A9s=20Une=20partie=20des=20corrections=20demand=C3=A9es?= =?UTF-8?q?=20par=20TM=20faites=20(vocabulaire,=20emoji,=20...)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/actions.py | 308 ++++++++++++++++---------------- 1 file changed, 154 insertions(+), 154 deletions(-) diff --git a/dashboards/app/pages/actions.py b/dashboards/app/pages/actions.py index 3940c61..fea8ae1 100644 --- a/dashboards/app/pages/actions.py +++ b/dashboards/app/pages/actions.py @@ -16,138 +16,73 @@ filtre_niveau = st.session_state.get("niveau_admin", "") filtre_collectivite = st.session_state.get("collectivite", "") +# Titre de l'onglet +st.markdown( + """# 👊 Actions +Visualisez les actions réalisées et celles à venir +""" +) + if st.session_state["authentication_status"]: - # Définition d'une fonction pour charger les données du nombre de déchets - @st.cache_data - def load_df_dict_corr_dechet_materiau(): - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" - "exploration-des-donn%C3%A9es/Exploration_visualisation/data/dict_de" - "chet_groupe_materiau.csv" - ) + if filtre_niveau == "" and filtre_collectivite == "": + st.write("Aucune sélection de territoire n'a été effectuée") + else: + st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") - @st.cache_data - def load_df_nb_dechet(): + # Définition d'une fonction pour charger les evenements à venir + def load_df_events_clean() -> pd.DataFrame: return pd.read_csv( "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_releve_nb_dechet.csv" + "sation/data/export_events_cleaned.csv" ) - # Définition d'une fonction pour charger les autres données - @st.cache_data - def load_df_other(): - df = pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv" - ) - - # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) - df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] - df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] - - return df - # Appel des fonctions pour charger les données + df_events = load_df_events_clean() # Appeler les dataframes volumes et nb_dechets filtré depuis le session state if "df_other_filtre" not in st.session_state: st.write( """ - ### :warning: Merci de sélectionner une collectivité\ - dans l'onglet Home pour afficher les données. :warning: - """ + ### :warning: Merci de sélectionner une collectivité\ + dans l'onglet Accueil pour afficher les données. :warning: + """ ) st.stop() else: df_other = st.session_state["df_other_filtre"].copy() - # Titre de l'onglet - st.markdown( - """# 🔎 Actions - Quels sont les actions mises en place par les acteurs ? - """ - ) - # 2 Onglets : Evènements, Evènements à venir tab1, tab2 = st.tabs( [ - "Evènements", - "Evènements à venir", + "Ramassages réalisés ✔️", + "Evènements à venir 🗓️", ] ) # Onglet 1 : Evènements with tab1: - if filtre_niveau == "" and filtre_collectivite == "": - st.write( - "Aucune sélection de territoire n'ayant été effectuée les données sont globales" - ) - else: - st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") - - #################### - # @Valerie : J'ai comment pour éviter les errreur - # Les DF sont chargés au dessus comme dans l'onglet DATA - # Je n'ai pas trouvé de référence à 'df_nb_dechets_filtre' dans l'onglet DATA - #################### - - # Appeler les dataframes volumes et nb_dechets filtré depuis le session state - # if ("df_other_filtre" not in st.session_state) or ( - # "df_nb_dechets_filtre" not in st.session_state - # ): - # st.write( - # """ - # ### :warning: Merci de sélectionner une collectivité\ - # dans l'onglet Home pour afficher les données. :warning: - # """ - # ) - - # df_nb_dechet = pd.read_csv( - # ( - # "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - # "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - # "sation/data/data_releve_nb_dechet.csv" - # ) - # ) - - # df_other = pd.read_csv( - # ( - # "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - # "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - # "sation/data/data_zds_enriched.csv" - # ) - # ) - - # else: - # df_other = st.session_state["df_other_filtre"].copy() - # df_nb_dechet = st.session_state["df_nb_dechets_filtre"].copy() - - # Copier le df pour la partie filtrée par milieu/lieu/année - df_other_metrics_raw = df_other.copy() - annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) # Filtre par année: - options = ["Aucune sélection"] + list(df_other["ANNEE"].unique()) + options = ["Toute la période"] + annee_liste annee_choisie = st.selectbox("Choisissez l'année:", options, index=0) - if annee_choisie == "Aucune sélection": + if annee_choisie == "Toute la période": df_other_filtre = df_other.copy() - if annee_choisie != "Aucune sélection": + if annee_choisie != "Toute la période": df_other_filtre = df_other[df_other["ANNEE"] == annee_choisie].copy() # Copie des données pour transfo - df_events = df_other_filtre.copy() + df_ramassages = df_other_filtre.copy() # Calcul des indicateurs clés de haut de tableau avant transformation - volume_total = df_events["VOLUME_TOTAL"].sum() - poids_total = df_events["POIDS_TOTAL"].sum() - nombre_participants = df_events["NB_PARTICIPANTS"].sum() - nb_collectes = len(df_events) - nombre_structures = df_events["ID_STRUCTURE"].nunique() + volume_total = df_ramassages["VOLUME_TOTAL"].sum() + poids_total = df_ramassages["POIDS_TOTAL"].sum() + nombre_participants = df_ramassages["NB_PARTICIPANTS"].sum() + nb_collectes = len(df_ramassages) + nombre_structures = df_ramassages["ID_STRUCTURE"].nunique() # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2, l1_col3 = st.columns(3) @@ -165,7 +100,7 @@ def load_df_other(): # 3ème métrique : Nombre de Structures cell3 = l1_col3.container(border=True) nombre_structures = f"{nombre_structures:,.0f}".replace(",", " ") - cell3.metric("Nombre de structures", f"{nombre_structures}") + cell3.metric("Nombre de structures participantes", f"{nombre_structures}") # Ligne 2 : Carte # Initialisation du zoom sur la carte @@ -195,7 +130,7 @@ def load_df_other(): tiles="OpenStreetMap", ) # Facteur de normalisation pour ajuster la taille des bulles - normalisation_facteur = 100 + normalisation_facteur = 200 for index, row in df_map_evnenements.iterrows(): # Application de la normalisation radius = row["NB_PARTICIPANTS"] / normalisation_facteur @@ -243,6 +178,12 @@ def load_df_other(): df_milieux_counts_sorted = df_milieux_counts.sort_values( by="counts", ascending=True ) + # Retirer le texte entre parenthèses et les parenthèses elles-mêmes + df_milieux_counts_sorted.TYPE_MILIEU = ( + df_milieux_counts_sorted.TYPE_MILIEU.str.replace( + r"\([^()]*\)", "", regex=True + ).str.strip() + ) fig2_actions = px.bar( df_milieux_counts_sorted, @@ -266,42 +207,59 @@ def load_df_other(): with cell5: st.plotly_chart(fig2_actions, use_container_width=True) - # Ligne 3 : 2 graphiques en ligne : carte relevés et bar chart matériaux - l3_col1, l3_col2 = st.columns(2) - cell6 = l3_col1.container(border=True) - cell7 = l3_col2.container(border=True) - - # Ligne 4 : 2 graphiques en ligne : bar chart milieux et bar chart types déchets + # Ligne 4 : 2 graphiques en ligne : bar chart types déchets et line chart volume + nb collectes par mois + # préparation du dataframe et figure releves types de déchets + df_type_dechet = df_other_filtre.copy() + df_type_dechet_counts = ( + df_type_dechet["TYPE_DECHET"].value_counts().reset_index() + ) + df_type_dechet_counts.columns = ["TYPE_DECHET", "counts"] + df_type_dechet_counts_sorted = df_type_dechet_counts.sort_values( + by="counts", ascending=False + ) + fig3_actions = px.bar( + df_type_dechet_counts_sorted, + y="counts", + x="TYPE_DECHET", + title="Nombre de relevés par types de déchets", + text="counts", + ) + fig3_actions.update_layout(xaxis_title="", yaxis_title="") + # préparation du dataframe et figure volume + nb collectes volume + nb collectes par mois + df_mois = df_other_filtre.copy() + df_mois["DATE"] = pd.to_datetime(df_mois["DATE"]) + df_mois["MOIS"] = df_mois["DATE"].dt.month + df_mois_counts = df_mois["MOIS"].value_counts().reset_index() + df_mois_counts.columns = ["MOIS", "counts"] + fig4_actions = px.bar( + df_mois_counts, + y="counts", + x="MOIS", + title="Nombre de relevés par mois", + text="counts", + ) + fig4_actions.update_layout(xaxis_title="", yaxis_title="") l4_col1, l4_col2 = st.columns(2) - cell8 = l4_col1.container(border=True) - cell9 = l4_col2.container(border=True) - - # Ligne 5 : 2 graphiques en ligne : line chart volume + nb collectes et Pie niveau de caractérisation - l5_col1, l5_col2 = st.columns(2) - cell10 = l5_col1.container(border=True) - cell11 = l5_col2.container(border=True) + cell6 = l4_col1.container(border=True) + cell7 = l4_col2.container(border=True) + # Affichage barplot + with cell6: + st.plotly_chart(fig3_actions, use_container_width=True) + # Affichage barplot + with cell7: + st.plotly_chart(fig4_actions, use_container_width=True) # onglet Evenements a venir with tab2: - st.write(f"Votre territoire : Pays - France") - - # Définition d'une fonction pour charger les evenements à venir - @st.cache_data - def load_df_events_clean() -> pd.DataFrame: - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/export_events_cleaned.csv" - ) - - # Appel des fonctions pour charger les données - df_events = load_df_events_clean() + # Copie des données pour transfo + df_events_a_venir = df_events.copy() - df_events.DATE = pd.to_datetime(df_events.DATE) + # Convertit la col DATE en datetime pour formatage + df_events_a_venir.DATE = pd.to_datetime(df_events.DATE) - # Filtrer les événements à venir - df_events_a_venir = df_events[ - df_events.DATE > (datetime.now() - timedelta(days=5)) + # Filtrer les événements à venir (jour de consultation -5 jours: pour afficher les possibles evenements en cours) + df_events_a_venir = df_events_a_venir[ + df_events_a_venir.DATE > (datetime.now() - timedelta(days=5)) ] # Trie les events par date @@ -320,42 +278,84 @@ def load_df_events_clean() -> pd.DataFrame: zoom_start=6, ) - # Ajouter des marqueurs pour chaque événement à venir sur la carte + # Ajout des marqueurs pour chaque événement à venir sur la carte for idx, row in df_events_a_venir.iterrows(): + # Personnalisation des Popup des markers + + # Vide si pas d'evenement d'envergure + event_envg = ( + "" + if pd.isna(row.EVENEMENT_ENVERGURE) + else f'
Opération : {row.EVENEMENT_ENVERGURE}
' + ) + + html = f""" +
+

+

+ {row.TYPE_EVENEMENT} +
+
+ {row.NOM_EVENEMENT} +
+
+
+ {row.DATE.strftime("%A %d %B %Y")} +
+

+

+ {event_envg} +

Organisé par : {row.NOM_STRUCTURE}
+

+
+ """ + + iframe = folium.IFrame(html=html, width=300, height=120) + popup = folium.Popup(iframe, parse_html=True, max_width=300) + folium.Marker( location=[row.COORD_GPS_Y, row.COORD_GPS_X], - popup=folium.Popup(row.NOM_EVENEMENT, lazy=False), - # tooltip=row.NOM_EVENEMENT, + popup=popup, + tooltip=row.NOM_VILLE, # icon=folium.Icon(icon_color=color_ZDS_bleu) ).add_to(map_events) # Afficher la liste des événements à venir avec la date affichée avant le nom st.subheader("Actions à venir :") - with st.container(height=500, border=False): - for idx, row in df_events_a_venir.iterrows(): - with st.container(border=True): - # Bloc contenant la date - date_block = f"
{row.DATE.day}
{row.DATE.strftime('%b')}
" - # Bloc contenant le nom de l'événement - event_block = ( - f"
{row.NOM_EVENEMENT}
" - ) - # Bloc contenant le type d'événement et le nom de la structure - type_structure_block = f"{row.TYPE_EVENEMENT} | {row.NOM_STRUCTURE}" - - # Ajout de chaque événement dans la liste - st.write( - f"
{date_block}
{event_block}{type_structure_block}
", - unsafe_allow_html=True, - ) - - # Afficher la carte avec Streamlit - st_folium = st.components.v1.html - st_folium( - folium.Figure().add_child(map_events).render(), - width=800, - height=800, - ) + carte, liste = st.columns(2) + + # Afficher la carte + with carte: + with st.container(border=True): + st_folium = st.components.v1.html + st_folium( + folium.Figure().add_child(map_events).render(), + # width=650, ne pas spécifié de largeur pour garder le coté responsive de la carte avec la liste à coté + height=600, + ) + + with liste: + with st.container( + height=600, border=False + ): # Container avec hauteur fixe => Scrollbar si beaucoup d'events + for idx, row in df_events_a_venir.iterrows(): + with st.container(border=True): + # Bloc contenant la date + date_block = f"
{row.DATE.day}
{row.DATE.strftime('%b')}
" + # Bloc contenant le nom de l'événement + event_block = ( + f"
{row.NOM_EVENEMENT}
" + ) + # Bloc contenant le type d'événement et le nom de la structure + type_structure_block = ( + f"{row.TYPE_EVENEMENT} | {row.NOM_STRUCTURE}" + ) + + # Ajout de chaque événement dans la liste + st.write( + f"
{date_block}
{event_block}{type_structure_block}
", + unsafe_allow_html=True, + ) else: st.markdown("## 🚨 Veuillez vous connecter pour accéder à l'onglet 🚨") From 5f53567213c62d05856c4552edc964810995b13f Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Wed, 24 Apr 2024 10:42:45 +0200 Subject: [PATCH 082/147] treemap rep et supression categorie vide avec message d'information --- dashboards/app/pages/data.py | 61 +++++++++++++++++++++++------------- 1 file changed, 39 insertions(+), 22 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 75dc3e8..2f1fa11 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -954,6 +954,7 @@ def load_df_dict_corr_dechet_materiau(): with st.container(border=True): st.plotly_chart(fig_secteur, use_container_width=True) + # Message d'avertissement Nombre de dechets dont le secteur n'a pas été determine if nb_vide_indetermine != 0: st.warning( "⚠️ Il y a " @@ -1020,6 +1021,19 @@ def load_df_dict_corr_dechet_materiau(): # Metriques et graphes Responsabilité elargie producteurs l3_col1, l3_col2 = st.columns(2) # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + # Suppression de la catégorie "VIDE" + nb_vide_rep = 0 + if "VIDE" in top_rep_df["Responsabilité élargie producteur"].unique(): + df_vide_rep = top_rep_df[ + top_rep_df["Responsabilité élargie producteur"] == "VIDE" + ] + nb_vide_rep = df_vide_rep["Nombre de déchets"].sum() + else: + pass + top_rep_df = top_rep_df[ + top_rep_df["Responsabilité élargie producteur"] != "VIDE" + ] + # 1ère métrique : nombre de dechets catégorisés repartis par responsabilités cell6 = l3_col1.container(border=True) # Trick pour séparer les milliers @@ -1037,33 +1051,36 @@ def load_df_dict_corr_dechet_materiau(): f"{nb_rep} REP", ) - fig_rep = px.bar( + # Treemap REP + figreptree = px.treemap( top_rep_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), - x="Nombre de déchets", - y="Responsabilité élargie producteur", + path=["Responsabilité élargie producteur"], + values="Nombre de déchets", title="Top 10 des Responsabilités élargies producteurs relatives aux dechets les plus ramassés", - color_discrete_sequence=["#1951A0"], - orientation="h", - text_auto=False, - text=top_rep_df.tail(10)["Responsabilité élargie producteur"] - + ": " - + top_rep_df.tail(10)["Nombre de déchets"].astype(str), + color="Responsabilité élargie producteur", + color_discrete_sequence=px.colors.qualitative.Set2, ) - # add log scale to x axis - fig_rep.update_layout(xaxis_type="log") - # Masquer les labels de l'axe des ordonnées - fig_rep.update_yaxes(showticklabels=False) - # fig_rep.update_traces(texttemplate="%{value:.0f}", textposition="inside") - - fig_rep.update_layout( - width=800, - height=500, - uniformtext_minsize=8, - uniformtext_mode="hide", - yaxis_title=None, + figreptree.update_layout( + margin=dict(t=50, l=25, r=25, b=25), autosize=True, height=600 + ) + figreptree.update_traces( + textinfo="label+value", + texttemplate="%{label}
%{value:.0f} litres", + textfont=dict(size=16), + hovertemplate="%{label}
Volume: %{value:.0f}", ) with st.container(border=True): - st.plotly_chart(fig_rep, use_container_width=True) + st.plotly_chart(figreptree, use_container_width=True) + + # Message d'avertissement Nombre de dechets dont la REP n'a pas été determine + if nb_vide_rep != 0: + st.warning( + "⚠️ Il y a " + + str(nb_vide_rep) + + " dechets dont la responsabilité producteur n'a pas été determiné dans la totalité des dechets comptabilisés" + ) + + else: st.markdown("## 🚨 Veuillez vous connecter pour accéder à l'onglet 🚨") From 34edef2f32e790905038daef108d01b22782c7c0 Mon Sep 17 00:00:00 2001 From: "F.Hakimi" Date: Wed, 24 Apr 2024 11:54:43 +0200 Subject: [PATCH 083/147] =?UTF-8?q?titres=20fig=20echelles=20log=20+=20nom?= =?UTF-8?q?=20Verre=20et=20Papier=20+=20Part=20->=20Proportion=20+=20suppr?= =?UTF-8?q?ession=20l=C3=A9gende=20Top=2010=20d=C3=A9chets?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/.credentials-dev.yml | 14 ------------ dashboards/app/pages/data.py | 34 +++++++++++++++++++---------- 2 files changed, 22 insertions(+), 26 deletions(-) delete mode 100644 dashboards/app/.credentials-dev.yml diff --git a/dashboards/app/.credentials-dev.yml b/dashboards/app/.credentials-dev.yml deleted file mode 100644 index 716cedd..0000000 --- a/dashboards/app/.credentials-dev.yml +++ /dev/null @@ -1,14 +0,0 @@ -cookie: - expiry_days: 30 - key: some_signature_key - name: some_cookie_name -credentials: - usernames: - test: - email: test@test.com - logged_in: false - name: test - password: $2b$12$fR4sp7tIG.dbeusbr695MOw/xvN1sf.21rML7t7j9pCdIVREIocUO -pre-authorized: - emails: - - test@test.com diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 2f1fa11..a4b9777 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -125,6 +125,13 @@ def load_df_dict_corr_dechet_materiau(): "Volume" ].sum() df_totals_sorted = df_totals_sorted.sort_values(["Volume"], ascending=False) + # replace "Verre" with "Verre/Céramique" in df_totals_sorted + df_totals_sorted["Matériau"] = df_totals_sorted["Matériau"].replace( + "Verre", "Verre/Céramique" + ) + df_totals_sorted["Matériau"] = df_totals_sorted["Matériau"].replace( + "Papier", "Papier/Carton" + ) # Charte graphique MERTERRE : colors_map = { @@ -261,7 +268,7 @@ def load_df_dict_corr_dechet_materiau(): y="Volume", color="Matériau", barnorm="percent", - title="Part de chaque matériau en volume selon le milieu de collecte", + title="Proportion de chaque matériau en volume selon le milieu de collecte", color_discrete_map=colors_map, text_auto=True, ) @@ -269,7 +276,7 @@ def load_df_dict_corr_dechet_materiau(): fig3.update_layout( bargap=0.2, height=600, - yaxis_title="Part du volume collecté (en %)", + yaxis_title="Proportion du volume collecté (en %)", xaxis_title=None, ) fig3.update_xaxes(tickangle=-30) @@ -278,7 +285,7 @@ def load_df_dict_corr_dechet_materiau(): texttemplate="%{y:.0f}%", textposition="inside", hovertemplate="%{x}
Part du volume collecté dans ce milieu: %{y:.0f} %", - textfont_size=14, + textfont_size=10, ) # Afficher le graphique @@ -592,13 +599,15 @@ def load_df_dict_corr_dechet_materiau(): y="categorie", x="nb_dechet", labels={"categorie": "Dechet", "nb_dechet": "Nombre total"}, - title="Top 10 dechets ramassés ", + title="Top 10 dechets ramassés (échelle logarithmique) ", text="nb_dechet", color="Materiau", color_discrete_map=colors_map, category_orders={"categorie": df_top10_dechets["categorie"].tolist()}, ) fig5.update_layout(xaxis_type="log") + # suppression de la légende des couleurs + fig5.update_layout(showlegend=False) # Amélioration du visuel du graphique fig5.update_traces( # texttemplate="%{text:.2f}", @@ -607,12 +616,7 @@ def load_df_dict_corr_dechet_materiau(): textfont_size=20, ) fig5.update_layout( - width=1400, - height=900, - uniformtext_minsize=8, - uniformtext_mode="hide", - xaxis_tickangle=90, - legend=dict(x=1, y=0, xanchor="right", yanchor="bottom"), + width=1400, height=900, uniformtext_minsize=8, uniformtext_mode="hide" ) # Suppression de la colonne categorie @@ -812,6 +816,9 @@ def load_df_dict_corr_dechet_materiau(): ) top_secteur_df = top_secteur_df.reset_index() top_secteur_df.columns = ["Secteur", "Nombre de déchets"] + top_secteur_df["Nombre de déchets"] = top_secteur_df[ + "Nombre de déchets" + ].astype(int) # Data pour le plot marque marque_df = df_init[df_init["type_regroupement"].isin(["MARQUE"])] @@ -822,6 +829,9 @@ def load_df_dict_corr_dechet_materiau(): ) top_marque_df = top_marque_df.reset_index() top_marque_df.columns = ["Marque", "Nombre de déchets"] + top_marque_df["Nombre de déchets"] = top_marque_df["Nombre de déchets"].astype( + int + ) # Data pour le plot responsabilités rep_df = df_init[df_init["type_regroupement"].isin(["REP"])] @@ -936,7 +946,7 @@ def load_df_dict_corr_dechet_materiau(): x="Nombre de déchets", y="Secteur", color="Secteur", - title="Top 10 des secteurs économiques qui ont pu être déterminés parmis les dechets comptés", + title="Top 10 secteurs économiques identifiés dans les déchets comptés (échelle logarithmique)", orientation="h", color_discrete_map=colors_map_secteur, text_auto=True, @@ -985,7 +995,7 @@ def load_df_dict_corr_dechet_materiau(): top_marque_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), x="Nombre de déchets", y="Marque", - title="Top 10 des marques les plus ramassées", + title="Top 10 des marques les plus ramassées (échelle logarithmique)", color_discrete_sequence=["#1951A0"], orientation="h", text_auto=False, From 56dfb8dece0f46c85b92dc03a690f93d9a722ba9 Mon Sep 17 00:00:00 2001 From: DridrM Date: Wed, 24 Apr 2024 13:29:55 +0200 Subject: [PATCH 084/147] Correction of the construct_query_string and the scalable_filters_multi_select functions --- dashboards/app/pages/hotspots.py | 254 +++++++++++++++++++------------ dashboards/app/requirements.txt | 1 + 2 files changed, 158 insertions(+), 97 deletions(-) diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index 95f0d48..0e8c18d 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -33,6 +33,17 @@ "Commune": "COMMUNE_CODE_NOM", } +# This is a copy of the previous dict, with just the "EPCI" value modified with the +# name of the "COMMUNE_CODE_NOM" column in the data_zds df, in order to trigger the display +# of the "EPCI" level boundaries without an "EPCI" geojson map, knowing that one EPCI is +# made of one or multiple "communes" +NIVEAUX_ADMIN_DICT_ALTERED = { + "Région": "REGION", + "Département": "DEPARTEMENT", + "EPCI": "commune", + "Commune": "commune", +} + # The name of the "niveau_admin" fetch from the session state NIVEAU_ADMIN = st.session_state["niveau_admin"] @@ -56,13 +67,20 @@ "sation/data/data_zds_enriched.csv" ) -# Data path for the France regions geojson -REGION_GEOJSON_PATH = ( - "https://raw.githubusercontent.com/dataforgoodfr/" - "12_zero_dechet_sauvage/1-exploration-des-donn%C3%A9es/" - "Exploration_visualisation/data/regions-avec-outre-mer.geojson" +# Root data path for the France administrative levels geojson +NIVEAUX_ADMIN_GEOJSON_ROOT_PATH = ( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/1-" + "exploration-des-donn%C3%A9es/Exploration_visualisation/data/" ) +# Dict containing the path of the administrative levels geojson referenced by the names of these adminitrative levels +NIVEAUX_ADMIN_GEOJSON_PATH_DICT = { + "Région": f"{NIVEAUX_ADMIN_GEOJSON_ROOT_PATH}regions-avec-outre-mer.geojson", + "Département": f"{NIVEAUX_ADMIN_GEOJSON_ROOT_PATH}departements-avec-outre-mer.geojson", + "EPCI": f"{NIVEAUX_ADMIN_GEOJSON_ROOT_PATH}communes-avec-outre-mer.geojson", + "Commune": f"{NIVEAUX_ADMIN_GEOJSON_ROOT_PATH}communes-avec-outre-mer.geojson", +} + # Data path for Correction CORRECTION = ( "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/" @@ -79,10 +97,6 @@ # Params for the adopted spots map filters ADOPTED_SPOTS_FILTERS_PARAMS = [ - { - "filter_col": "REGION", - "filter_message": "Sélectionnez une région (par défaut votre région) :", - }, {"filter_col": "TYPE_MILIEU", "filter_message": "Sélectionnez un milieu :"}, {"filter_col": "ANNEE", "filter_message": "Sélectionnez une année :"}, ] @@ -90,7 +104,8 @@ # Params for the density graph filters DENSITY_FILTERS_PARAMS = [ {"filter_col": "TYPE_MILIEU", "filter_message": "Sélectionnez un milieu :"}, - {"filter_col": "TYPE_LIEU2", "filter_message": "Sélectionnez un lieu :"} + {"filter_col": "TYPE_LIEU2", "filter_message": "Sélectionnez un lieu :"}, + {"filter_col": "ANNEE", "filter_message": "Sélectionnez une année :"}, ] @@ -155,7 +170,7 @@ def construct_query_string(bound_word=" and ", **params) -> str: if param: # Check if the parameter value is of type int - if isinstance(param, int): + if isinstance(param, (int, np.int64)): # If it's an integer, use integer comparison query_sub_string = f"{param_key} == {param}" @@ -163,7 +178,12 @@ def construct_query_string(bound_word=" and ", **params) -> str: elif isinstance(param, list): # Handle list of values for multiselect queries. param_values = ", ".join( - [f'"{value}"' for value in param] + [ + f'"{value}"' + if not isinstance(value, (int, np.int64)) + else f"{value}" + for value in param + ] ) # Prepare string of values enclosed in quotes. query_sub_string = ( f"{param_key} in [{param_values}]" # Use 'in' operator for lists. @@ -237,9 +257,7 @@ def scalable_filters_multi_select( column, message = filter_params["filter_col"], filter_params["filter_message"] # Get unique values, convert to string and sort them - sorted_values = sorted( - data_zds[column].dropna().astype(str).unique(), reverse=True - ) + sorted_values = sorted(data_zds[column].dropna().unique(), reverse=True) # Generate a unique key for each multiselect widget unique_key = f"{base_key}_{column}_{i}" @@ -258,28 +276,55 @@ def scalable_filters_multi_select( return filter_dict +def construct_admin_lvl_filter_list( + data_zds: pd.DataFrame, + admin_lvl: str, + admin_lvl_dict_altered=NIVEAUX_ADMIN_DICT_ALTERED, +) -> list: + """Create a list of names for a given admin level. This function was created + in order to trigger the display of the 'EPCI' level boundaries without an + 'EPCI' geojson map, knowing that one EPCI is made of one or multiple 'communes' + Arguments: + - data_zds: the dataframe containing waste data and administrative levels columns + - admin_lvl: the common name of the target administrative level + Params: + - admin_lvl_dict_altered: a dict mapping admin levels common names and the names + of the columns corresponding in the data_zds df""" + + # Unpack the column name of the admin level + admin_lvl_col_name = admin_lvl_dict_altered[f"{admin_lvl}"] + + # Return the list of uniques administrative names corresponding to the selection made in the home tab + return list(data_zds[f"{admin_lvl_col_name}"].str.lower().unique()) + + def construct_admin_lvl_boundaries( - admin_lvl: str, single_filter_dict: dict, admin_lvl_geojson_path_dict: dict + data_zds: pd.DataFrame, admin_lvl: str, admin_lvl_geojson_path_dict: dict ) -> any: - """""" + """Return a filtered geodataframe with shapes of a target administrative level. + Arguments: + - data_zds: the dataframe containing waste data and administrative levels columns + - admin_lvl: the common name of the target administrative level + - admin_lvl_geojson_path_dict: a dict mapping administrative levels common + names and the paths of the geojson administrative levels shapes""" # Unpack the admin level geojson path admin_lvl_geojson_path = admin_lvl_geojson_path_dict[f"{admin_lvl}"] # Unpack the region name - admin_lvl_name = single_filter_dict[f"{admin_lvl}"] + admin_lvl_names = construct_admin_lvl_filter_list(data_zds, admin_lvl) # Load France regions from a GeoJSON file admin_lvl_shapes = gpd.read_file(admin_lvl_geojson_path) # Filter the region geodataframe for the specified region - selected_admin_lvl = admin_lvl_shapes[ - admin_lvl_shapes["nom"].str.lower() == admin_lvl_name.lower() + selected_admin_lvl_shapes = admin_lvl_shapes[ + admin_lvl_shapes["nom"].str.lower().isin(admin_lvl_names) ] - if selected_admin_lvl.empty: - raise KeyError(f"Administrative level '{admin_lvl_name}' not found.") + if selected_admin_lvl_shapes.empty: + raise KeyError - return selected_admin_lvl + return selected_admin_lvl_shapes ################## @@ -287,7 +332,7 @@ def construct_admin_lvl_boundaries( ################## # Load all regions from the GeoJSON file -regions = gpd.read_file(REGION_GEOJSON_PATH) +# regions = gpd.read_file(REGION_GEOJSON_PATH) # Unused, keep as archive # nb dechets : Unused for now # df_nb_dechets = pd.read_csv(NB_DECHETS_PATH) @@ -319,12 +364,13 @@ def construct_admin_lvl_boundaries( # 2.1/ Carte densité de déchets sur les zones étudiées # ######################################################## + def calculate_and_display_metrics(data, indicator_col1, indicator_col2, indicator_col3): # Calculate density - data['DENSITE'] = data['VOLUME_TOTAL'] / data['SURFACE'] - data = data[data['DENSITE'] < 20] # Remove rows with anomalously high density values - - + data["DENSITE"] = data["VOLUME_TOTAL"] / data["SURFACE"] + data = data[ + data["DENSITE"] < 20 + ] # Remove rows with anomalously high density values # Display metrics in specified UI containers cell1 = indicator_col1.container(border=True) @@ -333,26 +379,29 @@ def calculate_and_display_metrics(data, indicator_col1, indicator_col2, indicato cell2 = indicator_col2.container(border=True) cell2.metric("Volume Moyen :", f"{data['VOLUME_TOTAL'].mean().round(2)} Litres") - cell3 = indicator_col3.container(border=True) + cell3 = indicator_col3.container(border=True) cell3.metric("Surface Moyenne :", f"{data['SURFACE'].mean().round(2):,} m²") return data + # Define the colors representing les différents 'Lieux' et 'Milieux' -couleur = { - 'Littoral (terrestre)': 'lightblue', - 'Mer - Océan': 'darkblue', - 'Cours d\'eau': 'cyan', - 'Zone naturelle ou rurale (hors littoral et montagne)': 'green', - 'Zone urbaine': 'orange', - 'Lagune et étang côtier': 'red', - 'Multi-lieux': 'pink', - 'Montagne': 'grey', - 'Présent au sol (abandonné)': 'black'} +couleur = { + "Littoral (terrestre)": "lightblue", + "Mer - Océan": "darkblue", + "Cours d'eau": "cyan", + "Zone naturelle ou rurale (hors littoral et montagne)": "green", + "Zone urbaine": "orange", + "Lagune et étang côtier": "red", + "Multi-lieux": "pink", + "Montagne": "grey", + "Présent au sol (abandonné)": "black", +} # Function to retrieve the color associated with a given environment type def couleur_milieu(type): - return couleur.get(type, 'white') # Returns 'white' if the type is not found + return couleur.get(type, "white") # Returns 'white' if the type is not found + # Function to plot a density map def plot_density_map( @@ -364,16 +413,23 @@ def plot_density_map( gdf = gpd.read_file(region_geojson_path) # Calculate density - data_zds['DENSITE'] = data_zds['VOLUME_TOTAL']/data_zds['SURFACE'] - data_zds = data_zds[data_zds['DENSITE'] < 20] # Remove rows with anomalously high density values + data_zds["DENSITE"] = data_zds["VOLUME_TOTAL"] / data_zds["SURFACE"] + data_zds = data_zds[ + data_zds["DENSITE"] < 20 + ] # Remove rows with anomalously high density values # Round density values for display - data_zds['DENSITE'] = data_zds['DENSITE'].round(4) - # Round surface values for display - data_zds['SURFACE_ROND'] = data_zds['SURFACE'].round(2) + data_zds["DENSITE"] = data_zds["DENSITE"].round(4) + # Round surface values for display + data_zds["SURFACE_ROND"] = data_zds["SURFACE"].round(2) # Initialize a map centered at the mean coordinates of locations - m = folium.Map(location=[data_zds['LIEU_COORD_GPS_Y'].mean(), data_zds['LIEU_COORD_GPS_X'].mean()]) + m = folium.Map( + location=[ + data_zds["LIEU_COORD_GPS_Y"].mean(), + data_zds["LIEU_COORD_GPS_X"].mean(), + ] + ) # Loop over each row in the DataFrame to place markers for index, row in data_zds.iterrows(): @@ -387,52 +443,55 @@ def plot_density_map( """ lgd_txt = '{txt}' - color = couleur_milieu(row['TYPE_MILIEU']) + color = couleur_milieu(row["TYPE_MILIEU"]) folium.CircleMarker( - fg = folium.FeatureGroup(name= lgd_txt.format( txt= ['TYPE_MILIEU'], col= color)), - location=[row['LIEU_COORD_GPS_Y'], row['LIEU_COORD_GPS_X']], - radius=np.log(row['DENSITE'] + 1)*15, + fg=folium.FeatureGroup(name=lgd_txt.format(txt=["TYPE_MILIEU"], col=color)), + location=[row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]], + radius=np.log(row["DENSITE"] + 1) * 15, popup=folium.Popup(popup_html, max_width=300), color=color, fill=True, - ).add_to(m) folium_static(m) + # Function for 'milieu' density table + def density_table(data_zds: pd.DataFrame): # Calculate density - data_zds['DENSITE'] = data_zds['VOLUME_TOTAL'] / data_zds['SURFACE'] + data_zds["DENSITE"] = data_zds["VOLUME_TOTAL"] / data_zds["SURFACE"] # Remove rows with anomalously high density values - data_zds = data_zds[data_zds['DENSITE'] < 20] + data_zds = data_zds[data_zds["DENSITE"] < 20] # Group by 'TYPE_MILIEU', calculate mean density, sort, and round the density table_milieu = ( - data_zds.groupby('TYPE_MILIEU')['DENSITE'] + data_zds.groupby("TYPE_MILIEU")["DENSITE"] .mean() .reset_index() - .sort_values(by='DENSITE', ascending=False) + .sort_values(by="DENSITE", ascending=False) + ) + table_milieu["DENSITE"] = table_milieu["DENSITE"].round(4) + + st.dataframe( + table_milieu, + column_order=("TYPE_MILIEU", "DENSITE"), + hide_index=True, + width=800, + column_config={ + "TYPE_MILIEU": st.column_config.TextColumn( + "Milieu", + ), + "DENSITE": st.column_config.NumberColumn( + "Densité (L/m²)", + format="%f", + min_value=0, + max_value=max(table_milieu["DENSITE"]), + ), + }, ) - table_milieu['DENSITE'] = table_milieu['DENSITE'].round(4) - - st.dataframe(table_milieu, - column_order=("TYPE_MILIEU", "DENSITE"), - hide_index=True, - width=800, - column_config={ - "TYPE_MILIEU": st.column_config.TextColumn( - "Milieu", - ), - "DENSITE": st.column_config.NumberColumn( - "Densité (L/m²)", - format="%f", - min_value=0, - max_value=max(table_milieu['DENSITE']), - )} - ) ################################ @@ -443,7 +502,6 @@ def density_table(data_zds: pd.DataFrame): def plot_adopted_waste_spots( data_zds: pd.DataFrame, single_filter_dict: dict, - region_geojson_path: str, ) -> folium.Map: """Show a folium innteractive map of adopted spots within a selected region, filtered by environments of deposit. @@ -469,16 +527,9 @@ def plot_adopted_waste_spots( gdf_filtered = gdf.query(query_string) # 2/ Create the regions geodataframe # - # Unpack the region name - region = single_filter_dict["REGION"] - - # Load France regions from a GeoJSON file - regions = gpd.read_file(region_geojson_path) - - # Filter the region geodataframe for the specified region - selected_region = regions[regions["nom"].str.lower() == region.lower()] - if selected_region.empty: - raise KeyError(f"Region '{region}' not found.") + selected_admin_lvl = construct_admin_lvl_boundaries( + data_zds, NIVEAU_ADMIN, NIVEAUX_ADMIN_GEOJSON_PATH_DICT + ) # 3/ Initialize folium map # # Initialize a folium map, centered around the mean location of the waste points @@ -495,6 +546,7 @@ def plot_adopted_waste_spots( st.markdown( "Il n'y a pas de hotspots pour les valeurs de filtres selectionnés !" ) + st.markdown(f"{e}") return # 4/ Add the markers # @@ -517,7 +569,7 @@ def plot_adopted_waste_spots( # 5/ Add the region boundary # # Add the region boundary to the map for context folium.GeoJson( - selected_region, + selected_admin_lvl, name="Region Boundary", style_function=lambda feature: { "weight": 2, @@ -532,12 +584,7 @@ def plot_adopted_waste_spots( # Dashboard Main Panel # ######################## -tab1, tab2 = st.tabs( - [ - "Densité des déchets dans zone étudié", - "Spots Adoptés" - ] -) +tab1, tab2 = st.tabs(["Densité des déchets dans zone étudié", "Spots Adoptés"]) with tab1: @@ -550,7 +597,9 @@ def plot_adopted_waste_spots( indicator_col1, indicator_col2, indicator_col3 = st.columns(3) # Call the function with the data and UI elements - calculate_and_display_metrics(data_zds, indicator_col1, indicator_col2, indicator_col3) + calculate_and_display_metrics( + data_zds, indicator_col1, indicator_col2, indicator_col3 + ) st.markdown("---") @@ -558,7 +607,7 @@ def plot_adopted_waste_spots( with left_column: st.markdown("### Carte des Densités") - plot_density_map(data_zds, REGION_GEOJSON_PATH) + plot_density_map(data_zds, NIVEAUX_ADMIN_GEOJSON_PATH_DICT["Région"]) with right_column: st.markdown("### Tableau des Densités par Milieu") @@ -567,12 +616,23 @@ def plot_adopted_waste_spots( with tab2: # Use the selected filters - single_filter_dict_3 = scalable_filters_single_select( - data_zds, ADOPTED_SPOTS_FILTERS_PARAMS, tab2 + single_filter_dict_3 = scalable_filters_multi_select( + data_zds, filters_params=ADOPTED_SPOTS_FILTERS_PARAMS, base_key=tab2 ) st.markdown("### Spots Adoptés") - m = plot_adopted_waste_spots(data_zds, single_filter_dict_3, REGION_GEOJSON_PATH) + + # Construct the adopted waste spots map + m = plot_adopted_waste_spots(data_zds, single_filter_dict_3) + + # Construct wo columns, one for the spots map the other for the tab of structures + left_column, right_column = st.columns([2, 1]) + # Show the adopted spots map on the streamlit tab - if m: - folium_static(m) + with left_column: + if m: + folium_static(m) + + +# if __name__ == "__main__": +# construct_admin_lvl_filter_list(data_zds, NIVEAU_ADMIN) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index 2060a18..e4084a9 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -1,4 +1,5 @@ pandas==2.0.3 +numpy==1.26 geopandas==0.14.3 folium==0.16.0 duckdb==0.10.0 From 82100943ab6eadbad2334ca8590c88a883188374 Mon Sep 17 00:00:00 2001 From: linh dinh Date: Wed, 24 Apr 2024 14:52:16 +0200 Subject: [PATCH 085/147] =?UTF-8?q?Fix=20problem=20non=20value=20entr?= =?UTF-8?q?=C3=A9e?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/hotspots.py | 337 +++++++++++++++++-------------- 1 file changed, 188 insertions(+), 149 deletions(-) diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index 0e8c18d..10ec744 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -366,21 +366,50 @@ def construct_admin_lvl_boundaries( def calculate_and_display_metrics(data, indicator_col1, indicator_col2, indicator_col3): - # Calculate density - data["DENSITE"] = data["VOLUME_TOTAL"] / data["SURFACE"] - data = data[ - data["DENSITE"] < 20 - ] # Remove rows with anomalously high density values - # Display metrics in specified UI containers - cell1 = indicator_col1.container(border=True) - cell1.metric("Densité Moyenne :", f"{data['DENSITE'].mean().round(4)} L/m²") + if data.empty: + st.write("Aucune donnée disponible pour la région sélectionnée.") - cell2 = indicator_col2.container(border=True) - cell2.metric("Volume Moyen :", f"{data['VOLUME_TOTAL'].mean().round(2)} Litres") - - cell3 = indicator_col3.container(border=True) - cell3.metric("Surface Moyenne :", f"{data['SURFACE'].mean().round(2):,} m²") + else: + # Calculate density + data["DENSITE"] = data["VOLUME_TOTAL"] / data["SURFACE"] + data = data[ + data["DENSITE"] < 20 + ] # Remove rows with anomalously high density values + + # Calculate the mean of DENSITE + mean_density = data['DENSITE'].mean() + + # Check if the result is a float and then apply round + if isinstance(mean_density, float): + rounded_mean_density = round(mean_density, 4) + else: + # Handle the unexpected type here, maybe set to a default value or raise an error + rounded_mean_density = 0 # Example default value + + # Display metrics in specified UI containers + cell1 = indicator_col1.container(border=True) + cell1.metric("Densité Moyenne :", f"{rounded_mean_density} L/m²") + + # Calculate the mean of VOLUME_TOTAL and check its type + mean_volume_total = data['VOLUME_TOTAL'].mean() + if isinstance(mean_volume_total, float): + rounded_mean_volume_total = round(mean_volume_total, 2) + else: + rounded_mean_volume_total = 0 # Example default value + + cell2 = indicator_col2.container(border=True) + cell2.metric("Volume Moyen :", f"{rounded_mean_volume_total} Litres") + + # Calculate the mean of SURFACE and check its type + mean_surface = data['SURFACE'].mean() + if isinstance(mean_surface, float): + rounded_mean_surface = round(mean_surface, 2) + else: + rounded_mean_surface = 0 # Example default value + + cell3 = indicator_col3.container(border=True) + cell3.metric("Surface Moyenne :", f"{rounded_mean_surface:,} m²") return data @@ -409,49 +438,52 @@ def plot_density_map( region_geojson_path: str, ) -> folium.Map: - # Read geographic data from a GeoJSON file - gdf = gpd.read_file(region_geojson_path) - - # Calculate density - data_zds["DENSITE"] = data_zds["VOLUME_TOTAL"] / data_zds["SURFACE"] - data_zds = data_zds[ - data_zds["DENSITE"] < 20 - ] # Remove rows with anomalously high density values - - # Round density values for display - data_zds["DENSITE"] = data_zds["DENSITE"].round(4) - # Round surface values for display - data_zds["SURFACE_ROND"] = data_zds["SURFACE"].round(2) - - # Initialize a map centered at the mean coordinates of locations - m = folium.Map( - location=[ - data_zds["LIEU_COORD_GPS_Y"].mean(), - data_zds["LIEU_COORD_GPS_X"].mean(), - ] - ) + if data_zds.empty: + st.write("Aucune donnée disponible pour la région sélectionnée.") + # Initialize a basic map without any data-specific layers + m = folium.Map(location=[46.6358, 2.5614], zoom_start=5) - # Loop over each row in the DataFrame to place markers - for index, row in data_zds.iterrows(): - popup_html = f""" -
-

Densité: {row['DENSITE']} L/m²

-

Volume total : {row['VOLUME_TOTAL']} litres

-

Surface total : {row['SURFACE_ROND']} m²

-

Type de milieu : {row['TYPE_MILIEU']}

-

Type de lieu : {row['TYPE_LIEU']}

-
- """ - lgd_txt = '{txt}' - color = couleur_milieu(row["TYPE_MILIEU"]) - folium.CircleMarker( - fg=folium.FeatureGroup(name=lgd_txt.format(txt=["TYPE_MILIEU"], col=color)), - location=[row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]], - radius=np.log(row["DENSITE"] + 1) * 15, - popup=folium.Popup(popup_html, max_width=300), - color=color, - fill=True, - ).add_to(m) + else: + # Calculate density + data_zds["DENSITE"] = data_zds["VOLUME_TOTAL"] / data_zds["SURFACE"] + data_zds = data_zds[ + data_zds["DENSITE"] < 20 + ] # Remove rows with anomalously high density values + + # Round density values for display + data_zds["DENSITE"] = data_zds["DENSITE"].round(4) + # Round surface values for display + data_zds["SURFACE_ROND"] = data_zds["SURFACE"].round(2) + + # Initialize a map centered at the mean coordinates of locations + m = folium.Map( + location=[ + data_zds["LIEU_COORD_GPS_Y"].mean(), + data_zds["LIEU_COORD_GPS_X"].mean(), + ] + ) + + # Loop over each row in the DataFrame to place markers + for index, row in data_zds.iterrows(): + popup_html = f""" +
+

Densité: {row['DENSITE']} L/m²

+

Volume total : {row['VOLUME_TOTAL']} litres

+

Surface total : {row['SURFACE_ROND']} m²

+

Type de milieu : {row['TYPE_MILIEU']}

+

Type de lieu : {row['TYPE_LIEU']}

+
+ """ + lgd_txt = '{txt}' + color = couleur_milieu(row["TYPE_MILIEU"]) + folium.CircleMarker( + fg=folium.FeatureGroup(name=lgd_txt.format(txt=["TYPE_MILIEU"], col=color)), + location=[row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]], + radius=np.log(row["DENSITE"] + 1) * 15, + popup=folium.Popup(popup_html, max_width=300), + color=color, + fill=True, + ).add_to(m) folium_static(m) @@ -461,37 +493,41 @@ def plot_density_map( def density_table(data_zds: pd.DataFrame): - # Calculate density - data_zds["DENSITE"] = data_zds["VOLUME_TOTAL"] / data_zds["SURFACE"] - # Remove rows with anomalously high density values - data_zds = data_zds[data_zds["DENSITE"] < 20] - - # Group by 'TYPE_MILIEU', calculate mean density, sort, and round the density - table_milieu = ( - data_zds.groupby("TYPE_MILIEU")["DENSITE"] - .mean() - .reset_index() - .sort_values(by="DENSITE", ascending=False) - ) - table_milieu["DENSITE"] = table_milieu["DENSITE"].round(4) - - st.dataframe( - table_milieu, - column_order=("TYPE_MILIEU", "DENSITE"), - hide_index=True, - width=800, - column_config={ - "TYPE_MILIEU": st.column_config.TextColumn( - "Milieu", - ), - "DENSITE": st.column_config.NumberColumn( - "Densité (L/m²)", - format="%f", - min_value=0, - max_value=max(table_milieu["DENSITE"]), - ), - }, - ) + if data_zds.empty: + st.write("Aucune donnée disponible pour la région sélectionnée.") + + else: + # Calculate density + data_zds["DENSITE"] = data_zds["VOLUME_TOTAL"] / data_zds["SURFACE"] + # Remove rows with anomalously high density values + data_zds = data_zds[data_zds["DENSITE"] < 20] + + # Group by 'TYPE_MILIEU', calculate mean density, sort, and round the density + table_milieu = ( + data_zds.groupby("TYPE_MILIEU")["DENSITE"] + .mean() + .reset_index() + .sort_values(by="DENSITE", ascending=False) + ) + table_milieu["DENSITE"] = table_milieu["DENSITE"].round(4) + + st.dataframe( + table_milieu, + column_order=("TYPE_MILIEU", "DENSITE"), + hide_index=True, + width=800, + column_config={ + "TYPE_MILIEU": st.column_config.TextColumn( + "Milieu", + ), + "DENSITE": st.column_config.NumberColumn( + "Densité (L/m²)", + format="%f", + min_value=0, + max_value=max(table_milieu["DENSITE"]), + ), + }, + ) ################################ @@ -509,75 +545,78 @@ def plot_adopted_waste_spots( - data_zds: The waste dataframe - filter_dict: dictionary mapping the name of the column in the waste df and the value you want to filter by """ + if data_zds.empty: + st.write("Aucune donnée disponible pour la région sélectionnée.") - # 1/ Create the waste geodataframe # - # Create a GeoDataFrame for waste points - gdf = gpd.GeoDataFrame( - data_zds, - geometry=gpd.points_from_xy( - data_zds["LIEU_COORD_GPS_X"], data_zds["LIEU_COORD_GPS_Y"] - ), - crs="EPSG:4326", - ) - - # Construct the query string - query_string = construct_query_string(**single_filter_dict) + else: + # 1/ Create the waste geodataframe # + # Create a GeoDataFrame for waste points + gdf = gpd.GeoDataFrame( + data_zds, + geometry=gpd.points_from_xy( + data_zds["LIEU_COORD_GPS_X"], data_zds["LIEU_COORD_GPS_Y"] + ), + crs="EPSG:4326", + ) - # Filter the geodataframe by region and by environment - gdf_filtered = gdf.query(query_string) + # Construct the query string + query_string = construct_query_string(**single_filter_dict) - # 2/ Create the regions geodataframe # - selected_admin_lvl = construct_admin_lvl_boundaries( - data_zds, NIVEAU_ADMIN, NIVEAUX_ADMIN_GEOJSON_PATH_DICT - ) + # Filter the geodataframe by region and by environment + gdf_filtered = gdf.query(query_string) - # 3/ Initialize folium map # - # Initialize a folium map, centered around the mean location of the waste points - map_center = [gdf_filtered.geometry.y.mean(), gdf_filtered.geometry.x.mean()] + # 2/ Create the regions geodataframe # + selected_admin_lvl = construct_admin_lvl_boundaries( + data_zds, NIVEAU_ADMIN, NIVEAUX_ADMIN_GEOJSON_PATH_DICT + ) - # Catch ValueError if the filtered geodataframe contain no rows - try: - m = folium.Map( - location=map_center, zoom_start=5 - ) # Adjust zoom_start as needed for the best initial view + # 3/ Initialize folium map # + # Initialize a folium map, centered around the mean location of the waste points + map_center = [gdf_filtered.geometry.y.mean(), gdf_filtered.geometry.x.mean()] + + # Catch ValueError if the filtered geodataframe contain no rows + try: + m = folium.Map( + location=map_center, zoom_start=5 + ) # Adjust zoom_start as needed for the best initial view + + # Return None if ValueError + except ValueError as e: + st.markdown( + "Il n'y a pas de hotspots pour les valeurs de filtres selectionnés !" + ) + st.markdown(f"{e}") + return + + # 4/ Add the markers # + # Use MarkerCluster to manage markers if dealing with a large number of points + marker_cluster = MarkerCluster().add_to(m) + + # Add each waste point as a marker on the folium map + for _, row in gdf_filtered.iterrows(): + # Define the marker color: green for adopted spots, red for others + marker_color = "darkgreen" if row["SPOT_A1S"] else "red" + # Define the icon: check-circle for adopted, info-sign for others + icon_type = "check-circle" if row["SPOT_A1S"] else "info-sign" + + folium.Marker( + location=[row.geometry.y, row.geometry.x], + popup=f"Zone: {row['NOM_ZONE']}
Date: {row['DATE']}
Volume: {row['VOLUME_TOTAL']} litres", + icon=folium.Icon(color=marker_color, icon=icon_type, prefix="fa"), + ).add_to(marker_cluster) + + # 5/ Add the region boundary # + # Add the region boundary to the map for context + folium.GeoJson( + selected_admin_lvl, + name="Region Boundary", + style_function=lambda feature: { + "weight": 2, + "fillOpacity": 0.1, + }, + ).add_to(m) - # Return None if ValueError - except ValueError as e: - st.markdown( - "Il n'y a pas de hotspots pour les valeurs de filtres selectionnés !" - ) - st.markdown(f"{e}") - return - - # 4/ Add the markers # - # Use MarkerCluster to manage markers if dealing with a large number of points - marker_cluster = MarkerCluster().add_to(m) - - # Add each waste point as a marker on the folium map - for _, row in gdf_filtered.iterrows(): - # Define the marker color: green for adopted spots, red for others - marker_color = "darkgreen" if row["SPOT_A1S"] else "red" - # Define the icon: check-circle for adopted, info-sign for others - icon_type = "check-circle" if row["SPOT_A1S"] else "info-sign" - - folium.Marker( - location=[row.geometry.y, row.geometry.x], - popup=f"Zone: {row['NOM_ZONE']}
Date: {row['DATE']}
Volume: {row['VOLUME_TOTAL']} litres", - icon=folium.Icon(color=marker_color, icon=icon_type, prefix="fa"), - ).add_to(marker_cluster) - - # 5/ Add the region boundary # - # Add the region boundary to the map for context - folium.GeoJson( - selected_admin_lvl, - name="Region Boundary", - style_function=lambda feature: { - "weight": 2, - "fillOpacity": 0.1, - }, - ).add_to(m) - - return m + return m ######################## From 439fb3fd9a8be7ac967c0d4f35111434e7b6ee3a Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 24 Apr 2024 15:57:35 +0200 Subject: [PATCH 086/147] =?UTF-8?q?[tg]=20-=20inversion=20ordre=20ann?= =?UTF-8?q?=C3=A9es=20dans=20liste=20d=C3=A9roulante=20onglets1/3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index a4b9777..f95e0dc 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -63,7 +63,7 @@ def load_df_dict_corr_dechet_materiau(): [ "Matériaux :wood:", "Top Déchets :wastebasket:", - "Secteurs, marques et responsabilités élargies producteurs :womans_clothes:", + "Secteurs, marques et filières REP :womans_clothes:", ] ) @@ -302,7 +302,7 @@ def load_df_dict_corr_dechet_materiau(): selected_annee = st.selectbox( "Choisir une année:", - options=["Aucune sélection"] + list(df_other["ANNEE"].unique()), + options=["Aucune sélection"] + annee_liste, ) if selected_annee != "Aucune sélection": filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee].copy() @@ -705,8 +705,7 @@ def load_df_dict_corr_dechet_materiau(): # Étape 1: Création des filtres selected_annee_onglet_3 = st.selectbox( "Choisir une année:", - options=["Aucune sélection"] - + list(df_other["ANNEE"].sort_values().unique()), + options=["Aucune sélection"] + annee_liste, key="année_select", ) if selected_annee_onglet_3 != "Aucune sélection": From 6fd91e4e31e2ed55c6d075d430a2dc62f3edb761 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 24 Apr 2024 17:00:36 +0200 Subject: [PATCH 087/147] [tg] - update legend in top dechets --- dashboards/app/pages/data.py | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index f95e0dc..63e41e4 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -598,7 +598,10 @@ def load_df_dict_corr_dechet_materiau(): df_top10_dechets, y="categorie", x="nb_dechet", - labels={"categorie": "Dechet", "nb_dechet": "Nombre total"}, + labels={ + "categorie": "Dechet", + "nb_dechet": "Nombre total de déchets (échelle logarithmique)", + }, title="Top 10 dechets ramassés (échelle logarithmique) ", text="nb_dechet", color="Materiau", @@ -607,7 +610,21 @@ def load_df_dict_corr_dechet_materiau(): ) fig5.update_layout(xaxis_type="log") # suppression de la légende des couleurs - fig5.update_layout(showlegend=False) + fig5.update_layout( + showlegend=True, + height=700, + uniformtext_minsize=8, + uniformtext_mode="hide", + yaxis_title=None, + # Position de la légende + legend=dict( + yanchor="bottom", + y=1.01, + xanchor="right", + x=0.95, + ), + ) + # Amélioration du visuel du graphique fig5.update_traces( # texttemplate="%{text:.2f}", @@ -615,9 +632,6 @@ def load_df_dict_corr_dechet_materiau(): textfont_color="white", textfont_size=20, ) - fig5.update_layout( - width=1400, height=900, uniformtext_minsize=8, uniformtext_mode="hide" - ) # Suppression de la colonne categorie del df_top10_dechets["Materiau"] @@ -954,7 +968,6 @@ def load_df_dict_corr_dechet_materiau(): fig_secteur.update_layout(xaxis_type="log") fig_secteur.update_traces(texttemplate="%{value:.0f}", textposition="inside") fig_secteur.update_layout( - width=800, height=500, uniformtext_mode="hide", showlegend=False, @@ -1007,7 +1020,6 @@ def load_df_dict_corr_dechet_materiau(): # fig_marque.update_traces(texttemplate="%{value:.0f}", textposition="inside") fig_marque.update_layout( - width=800, height=500, uniformtext_minsize=8, uniformtext_mode="hide", From d0d4eab1a1a2524f64f72365b28912e8f99fa7c7 Mon Sep 17 00:00:00 2001 From: DridrM Date: Wed, 24 Apr 2024 17:11:19 +0200 Subject: [PATCH 088/147] Add the create_contributors_table function --- dashboards/app/pages/hotspots.py | 86 +++++++++++++++++++++++++------- 1 file changed, 69 insertions(+), 17 deletions(-) diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index 10ec744..caf19a9 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -350,7 +350,10 @@ def construct_admin_lvl_boundaries( # Fusion and correction data_correct = pd.merge(data_zds, correction, on="ID_RELEVE", how="left") data_correct = data_correct[data_correct["SURFACE_OK"] == "OUI"] -data_zds = data_correct[data_correct["VOLUME_TOTAL"] > 0] +data_zds_correct = data_correct[data_correct["VOLUME_TOTAL"] > 0] + +# Filter data_zds for data point which have volume > 0 +data_zds = data_zds[data_zds["VOLUME_TOTAL"] > 0] ################## # 2/ Hotspot tab # @@ -378,7 +381,7 @@ def calculate_and_display_metrics(data, indicator_col1, indicator_col2, indicato ] # Remove rows with anomalously high density values # Calculate the mean of DENSITE - mean_density = data['DENSITE'].mean() + mean_density = data["DENSITE"].mean() # Check if the result is a float and then apply round if isinstance(mean_density, float): @@ -392,7 +395,7 @@ def calculate_and_display_metrics(data, indicator_col1, indicator_col2, indicato cell1.metric("Densité Moyenne :", f"{rounded_mean_density} L/m²") # Calculate the mean of VOLUME_TOTAL and check its type - mean_volume_total = data['VOLUME_TOTAL'].mean() + mean_volume_total = data["VOLUME_TOTAL"].mean() if isinstance(mean_volume_total, float): rounded_mean_volume_total = round(mean_volume_total, 2) else: @@ -402,7 +405,7 @@ def calculate_and_display_metrics(data, indicator_col1, indicator_col2, indicato cell2.metric("Volume Moyen :", f"{rounded_mean_volume_total} Litres") # Calculate the mean of SURFACE and check its type - mean_surface = data['SURFACE'].mean() + mean_surface = data["SURFACE"].mean() if isinstance(mean_surface, float): rounded_mean_surface = round(mean_surface, 2) else: @@ -477,7 +480,9 @@ def plot_density_map( lgd_txt = '{txt}' color = couleur_milieu(row["TYPE_MILIEU"]) folium.CircleMarker( - fg=folium.FeatureGroup(name=lgd_txt.format(txt=["TYPE_MILIEU"], col=color)), + fg=folium.FeatureGroup( + name=lgd_txt.format(txt=["TYPE_MILIEU"], col=color) + ), location=[row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]], radius=np.log(row["DENSITE"] + 1) * 15, popup=folium.Popup(popup_html, max_width=300), @@ -537,7 +542,7 @@ def density_table(data_zds: pd.DataFrame): # Create the map of the adopted spots def plot_adopted_waste_spots( data_zds: pd.DataFrame, - single_filter_dict: dict, + multi_filter_dict: dict, ) -> folium.Map: """Show a folium innteractive map of adopted spots within a selected region, filtered by environments of deposit. @@ -560,10 +565,14 @@ def plot_adopted_waste_spots( ) # Construct the query string - query_string = construct_query_string(**single_filter_dict) + query_string = construct_query_string(**multi_filter_dict) # Filter the geodataframe by region and by environment - gdf_filtered = gdf.query(query_string) + try: + gdf_filtered = gdf.query(query_string) + + except: + st.write("Aucune donnée disponible pour les valeurs sélectionnées.") # 2/ Create the regions geodataframe # selected_admin_lvl = construct_admin_lvl_boundaries( @@ -599,9 +608,17 @@ def plot_adopted_waste_spots( # Define the icon: check-circle for adopted, info-sign for others icon_type = "check-circle" if row["SPOT_A1S"] else "info-sign" + # Create a folium iframe for the popup window + iframe = folium.IFrame( + f"Zone: {row['NOM_ZONE']}
Date: {row['DATE']}
Volume: {row['VOLUME_TOTAL']} litres
Structure: {row['NOM_STRUCTURE']}" + ) + + # Create the popup window based on the iframe + popup = folium.Popup(iframe, min_width=200, max_width=300) + folium.Marker( location=[row.geometry.y, row.geometry.x], - popup=f"Zone: {row['NOM_ZONE']}
Date: {row['DATE']}
Volume: {row['VOLUME_TOTAL']} litres", + popup=popup, icon=folium.Icon(color=marker_color, icon=icon_type, prefix="fa"), ).add_to(marker_cluster) @@ -619,6 +636,41 @@ def plot_adopted_waste_spots( return m +def create_contributors_table(data_zds: pd.DataFrame, multi_filter_dict: dict) -> None: + """Create and show a streamlit table of the number of collects by contributors, + given a set of filters choosen by the user. + Arguments: + - data_zds: The waste dataframe + - filter_dict: dictionary mapping the name of the column in the waste df and the value you want to filter by""" + + # Handle case if there is no data + if data_zds.empty: + st.write("Aucune donnée disponible pour la région sélectionnée.") + + else: + # Construct the query string and filter the table of contributors given the user input + query_string = construct_query_string(**multi_filter_dict) + data_zds_filtered = data_zds.query(query_string) + + # Create the table (pandas serie) of contributors + contributors_table = ( + data_zds_filtered.groupby("NOM_STRUCTURE") + .count() + .loc[:, "ID_RELEVE"] + .sort_values(ascending=False) + ) + + # Create and show the table in streamlit + st.dataframe( + contributors_table, + width=800, + column_config={ + "NOM_STRUCTURE": st.column_config.TextColumn("Structure"), + "ID_RELEVE": st.column_config.NumberColumn("Nombre de ramassages"), + }, + ) + + ######################## # Dashboard Main Panel # ######################## @@ -637,7 +689,7 @@ def plot_adopted_waste_spots( # Call the function with the data and UI elements calculate_and_display_metrics( - data_zds, indicator_col1, indicator_col2, indicator_col3 + data_zds_correct, indicator_col1, indicator_col2, indicator_col3 ) st.markdown("---") @@ -646,11 +698,11 @@ def plot_adopted_waste_spots( with left_column: st.markdown("### Carte des Densités") - plot_density_map(data_zds, NIVEAUX_ADMIN_GEOJSON_PATH_DICT["Région"]) + plot_density_map(data_zds_correct, NIVEAUX_ADMIN_GEOJSON_PATH_DICT["Région"]) with right_column: st.markdown("### Tableau des Densités par Milieu") - density_table(data_zds) + density_table(data_zds_correct) with tab2: @@ -659,8 +711,6 @@ def plot_adopted_waste_spots( data_zds, filters_params=ADOPTED_SPOTS_FILTERS_PARAMS, base_key=tab2 ) - st.markdown("### Spots Adoptés") - # Construct the adopted waste spots map m = plot_adopted_waste_spots(data_zds, single_filter_dict_3) @@ -670,8 +720,10 @@ def plot_adopted_waste_spots( # Show the adopted spots map on the streamlit tab with left_column: if m: + st.markdown("### Carte des spots adoptés") folium_static(m) - -# if __name__ == "__main__": -# construct_admin_lvl_filter_list(data_zds, NIVEAU_ADMIN) + # Show the contributors table on the second column + with right_column: + st.markdown("### Tableau du nombre de ramassages par acteur") + create_contributors_table(data_zds, single_filter_dict_3) From e1d413f9270ab004215e0e97e088e37b297e8e11 Mon Sep 17 00:00:00 2001 From: DridrM Date: Wed, 24 Apr 2024 17:19:19 +0200 Subject: [PATCH 089/147] Correct zoom start level on the adopted waste spots map --- dashboards/app/pages/hotspots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index caf19a9..a630df4 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -586,7 +586,7 @@ def plot_adopted_waste_spots( # Catch ValueError if the filtered geodataframe contain no rows try: m = folium.Map( - location=map_center, zoom_start=5 + location=map_center ) # Adjust zoom_start as needed for the best initial view # Return None if ValueError From 2446d04b460efc048b28fadd54029e8f71698f24 Mon Sep 17 00:00:00 2001 From: Mendi33 Date: Wed, 24 Apr 2024 15:21:49 +0000 Subject: [PATCH 090/147] =?UTF-8?q?Last=20push=20before=20pull=20request?= =?UTF-8?q?=20Derni=C3=A8re=20correction=20demand=C3=A9es=20par=20T=C3=A9o?= =?UTF-8?q?=20faites.=20Ajoute=20dans=20requierements.txt=20du=20package?= =?UTF-8?q?=20Babel=20(pour=20affichage=20des=20noms=20des=20mois=20en=20F?= =?UTF-8?q?R)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/actions.py | 204 ++++++++++++++++++++++++-------- dashboards/app/requirements.txt | 1 + 2 files changed, 158 insertions(+), 47 deletions(-) diff --git a/dashboards/app/pages/actions.py b/dashboards/app/pages/actions.py index fea8ae1..fc3feb4 100644 --- a/dashboards/app/pages/actions.py +++ b/dashboards/app/pages/actions.py @@ -3,6 +3,7 @@ import plotly.express as px import streamlit as st import folium +from babel.dates import format_date, Locale # Page setting : wide layout st.set_page_config( @@ -60,19 +61,81 @@ def load_df_events_clean() -> pd.DataFrame: ] ) + # Locale du package Babel + locale = Locale("fr", "FR") + # Onglet 1 : Evènements with tab1: + # Convertit la colonne de date en datetime + df_other["DATE"] = pd.to_datetime(df_other["DATE"]) + + # Liste des années pour le filtre annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) # Filtre par année: - options = ["Toute la période"] + annee_liste + options = [ + f"Toute la période ({min(annee_liste)}-{max(annee_liste)})" + ] + annee_liste annee_choisie = st.selectbox("Choisissez l'année:", options, index=0) - if annee_choisie == "Toute la période": - df_other_filtre = df_other.copy() + # Selection d'une année dans le filtre -> affichage du second filtre MOIS + if isinstance(annee_choisie, int): + + # Dict de mois + mois_dict = { + "January": 1, + "February": 2, + "March": 3, + "April": 4, + "May": 5, + "June": 6, + "July": 7, + "August": 8, + "September": 9, + "October": 10, + "November": 11, + "December": 12, + } + + # Liste des mois uniques pour l'année sélectionnée + mois_liste = sorted( + df_other[df_other["ANNEE"] == annee_choisie]["DATE"] + .dt.strftime("%B") + .unique() + .tolist(), + key=lambda x: mois_dict[x], + ) - if annee_choisie != "Toute la période": - df_other_filtre = df_other[df_other["ANNEE"] == annee_choisie].copy() + # Plage des index pour le filtre par mois + range_mois_index = range(len(mois_liste) + 1) + + # Creation du select avec les mois de l'année choisie + mois_choisi_index = st.selectbox( + "Choisissez le mois:", + range_mois_index, + format_func=lambda x: f"Tous les mois de {annee_choisie}" + if x == 0 + else str.capitalize( + format_date( + datetime(2022, mois_dict[mois_liste[x - 1]], 1), + format="MMMM", + locale=locale, + ) + ), + index=0, + ) + + # Filtrer le DataFrame par année et mois sélectionnés + if mois_choisi_index != 0: # mois choisi + mois_choisi = mois_liste[mois_choisi_index - 1] + df_other_filtre = df_other[ + (df_other["ANNEE"] == annee_choisie) + & (df_other["DATE"].dt.month == mois_dict[mois_choisi]) + ].copy() + else: # que l'année choisie + df_other_filtre = df_other[df_other["ANNEE"] == annee_choisie].copy() + else: # pas d'année choisie + df_other_filtre = df_other.copy() # Copie des données pour transfo df_ramassages = df_other_filtre.copy() @@ -159,7 +222,9 @@ def load_df_events_clean() -> pd.DataFrame: df_carac = df_other_filtre.copy() df_carac_counts = df_carac["NIVEAU_CARAC"].value_counts().reset_index() + df_carac_counts = df_carac_counts.sort_values(by="NIVEAU_CARAC") df_carac_counts.columns = ["NIVEAU_CARAC", "counts"] + colors = px.colors.sequential.Blues[3:][::-1] fig1_actions = px.pie( df_carac_counts, @@ -167,17 +232,51 @@ def load_df_events_clean() -> pd.DataFrame: names="NIVEAU_CARAC", title="Répartition des niveaux de caractérisation", hole=0.5, + color_discrete_sequence=colors, + category_orders={"NIVEAU_CARAC": [0, 1, 2, 3, 4]}, + ) + fig1_actions.update_traces( + textposition="inside", texttemplate="%{label}
%{percent:.1%}" ) - fig1_actions.update_traces(textposition="inside", textinfo="percent+label") - # préparation du dataframe et figure releves types de milieux + # préparation du dataframe et figure releves types de déchets + df_type_dechet = df_other_filtre.copy() + df_type_dechet_counts = ( + df_type_dechet["TYPE_DECHET"].value_counts().reset_index() + ) + df_type_dechet_counts.columns = ["TYPE_DECHET", "counts"] + df_type_dechet_counts_sorted = df_type_dechet_counts.sort_values( + by="counts", ascending=False + ) + fig2_actions = px.bar( + df_type_dechet_counts_sorted, + y="counts", + x="TYPE_DECHET", + title="Nombre de relevés par types de déchets", + text="counts", + ) + fig2_actions.update_layout(xaxis_title="", yaxis_title="") + l3_col1, l3_col2 = st.columns(2) + cell4 = l3_col1.container(border=True) + cell5 = l3_col2.container(border=True) + + # Affichage donut + with cell4: + st.plotly_chart(fig1_actions, use_container_width=True) + # Affichage barplot + with cell5: + st.plotly_chart(fig2_actions, use_container_width=True) + + # Ligne 4 : 2 graphiques en ligne : bar chart types milieux et bar chart types de lieux + # préparation du dataframe et figure releves types de milieux df_milieux = df_other_filtre.copy() df_milieux_counts = df_milieux["TYPE_MILIEU"].value_counts().reset_index() df_milieux_counts.columns = ["TYPE_MILIEU", "counts"] df_milieux_counts_sorted = df_milieux_counts.sort_values( by="counts", ascending=True ) + # Retirer le texte entre parenthèses et les parenthèses elles-mêmes df_milieux_counts_sorted.TYPE_MILIEU = ( df_milieux_counts_sorted.TYPE_MILIEU.str.replace( @@ -185,7 +284,7 @@ def load_df_events_clean() -> pd.DataFrame: ).str.strip() ) - fig2_actions = px.bar( + fig3_actions = px.bar( df_milieux_counts_sorted, y="TYPE_MILIEU", x="counts", @@ -193,55 +292,37 @@ def load_df_events_clean() -> pd.DataFrame: text="counts", orientation="h", ) - fig2_actions.update_layout(xaxis_title="", yaxis_title="") - - l3_col1, l3_col2 = st.columns(2) - cell4 = l3_col1.container(border=True) - cell5 = l3_col2.container(border=True) - - # Affichage donut - with cell4: - st.plotly_chart(fig1_actions, use_container_width=True) - - # Affichage barplot - with cell5: - st.plotly_chart(fig2_actions, use_container_width=True) + fig3_actions.update_layout(xaxis_title="", yaxis_title="") - # Ligne 4 : 2 graphiques en ligne : bar chart types déchets et line chart volume + nb collectes par mois - # préparation du dataframe et figure releves types de déchets - df_type_dechet = df_other_filtre.copy() - df_type_dechet_counts = ( - df_type_dechet["TYPE_DECHET"].value_counts().reset_index() - ) - df_type_dechet_counts.columns = ["TYPE_DECHET", "counts"] - df_type_dechet_counts_sorted = df_type_dechet_counts.sort_values( + # préparation du dataframe et figure releves types de lieux 2 + df_type_lieu2 = df_other_filtre.copy() + df_type_lieu2_counts = df_type_lieu2["TYPE_LIEU2"].value_counts().reset_index() + df_type_lieu2_counts.columns = ["TYPE_LIEU2", "counts"] + df_type_lieu2_counts_sorted = df_type_lieu2_counts.sort_values( by="counts", ascending=False ) - fig3_actions = px.bar( - df_type_dechet_counts_sorted, - y="counts", - x="TYPE_DECHET", - title="Nombre de relevés par types de déchets", - text="counts", + + # Retirer le texte entre parenthèses et les parenthèses elles-mêmes + df_type_lieu2_counts_sorted.TYPE_LIEU2 = ( + df_type_lieu2_counts_sorted.TYPE_LIEU2.str.replace( + r"\([^()]*\)", "", regex=True + ).str.strip() ) - fig3_actions.update_layout(xaxis_title="", yaxis_title="") - # préparation du dataframe et figure volume + nb collectes volume + nb collectes par mois - df_mois = df_other_filtre.copy() - df_mois["DATE"] = pd.to_datetime(df_mois["DATE"]) - df_mois["MOIS"] = df_mois["DATE"].dt.month - df_mois_counts = df_mois["MOIS"].value_counts().reset_index() - df_mois_counts.columns = ["MOIS", "counts"] + fig4_actions = px.bar( - df_mois_counts, + df_type_lieu2_counts_sorted, y="counts", - x="MOIS", - title="Nombre de relevés par mois", + x="TYPE_LIEU2", + title="Nombre de relevés par types de lieu", text="counts", ) fig4_actions.update_layout(xaxis_title="", yaxis_title="") + fig4_actions.update_xaxes(tickangle=45) + l4_col1, l4_col2 = st.columns(2) cell6 = l4_col1.container(border=True) cell7 = l4_col2.container(border=True) + # Affichage barplot with cell6: st.plotly_chart(fig3_actions, use_container_width=True) @@ -249,6 +330,34 @@ def load_df_events_clean() -> pd.DataFrame: with cell7: st.plotly_chart(fig4_actions, use_container_width=True) + # préparation du dataframe et figure volume + nb collectes volume + nb collectes par mois + # Créer une liste ordonnée des noms de mois dans l'ordre souhaité + mois_ordre = [ + str.capitalize(format_date(dt, format="MMMM", locale=locale)) + for dt in pd.date_range(start="2022-01-01", end="2022-12-01", freq="MS") + ] + + df_mois = df_other_filtre.copy() + df_mois["DATE"] = pd.to_datetime(df_mois["DATE"]) + df_mois["MOIS"] = df_mois["DATE"].dt.month + df_mois_counts = df_mois["MOIS"].value_counts().reset_index() + df_mois_counts.columns = ["MOIS", "counts"] + + fig5_actions = px.bar( + df_mois_counts, + y="counts", + x="MOIS", + title="Nombre de relevés par mois", + text="counts", + ) + fig5_actions.update_layout(xaxis_title="", yaxis_title="") + # Utiliser la liste mois_ordre comme étiquettes sur l'axe x + fig5_actions.update_xaxes(tickvals=list(range(1, 13)), ticktext=mois_ordre) + + with st.container(border=True): + # Affichage barplot + st.plotly_chart(fig5_actions, use_container_width=True) + # onglet Evenements a venir with tab2: # Copie des données pour transfo @@ -300,7 +409,7 @@ def load_df_events_clean() -> pd.DataFrame:
- {row.DATE.strftime("%A %d %B %Y")} + {str.capitalize(format_date(row.DATE, format="full", locale=locale))}

@@ -342,7 +451,7 @@ def load_df_events_clean() -> pd.DataFrame: for idx, row in df_events_a_venir.iterrows(): with st.container(border=True): # Bloc contenant la date - date_block = f"

{row.DATE.day}
{row.DATE.strftime('%b')}
" + date_block = f"
{row.DATE.day}
{str.capitalize(locale.months['format']['wide'][row.DATE.month - 1])}
" # Bloc contenant le nom de l'événement event_block = ( f"
{row.NOM_EVENEMENT}
" @@ -357,5 +466,6 @@ def load_df_events_clean() -> pd.DataFrame: f"
{date_block}
{event_block}{type_structure_block}
", unsafe_allow_html=True, ) + else: st.markdown("## 🚨 Veuillez vous connecter pour accéder à l'onglet 🚨") diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index 2314da2..7e9c3e3 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -8,3 +8,4 @@ plotly==5.19.0 streamlit-dynamic-filters==0.1.6 streamlit-authenticator==0.3.2 st-pages==0.4.5 +babel==2.11.0 From 6f4d1d036795bf8dc02b5b6632e842dff7e3dc29 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 24 Apr 2024 18:01:31 +0200 Subject: [PATCH 091/147] [tg] - format des metrics --- dashboards/app/pages/data.py | 77 +++++++++++++++++------------------- 1 file changed, 37 insertions(+), 40 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 63e41e4..bca28e2 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -58,6 +58,17 @@ def load_df_dict_corr_dechet_materiau(): # Copier le df pour la partie filtrée par milieu/lieu/année df_other_metrics_raw = df_other.copy() + # Fonction pour améliorer l'affichage des nombres (milliers, millions, milliards) + def frenchify(x: int) -> str: + if x > 1e9: + y = x / 1e9 + return f"{y:,.2f} milliards".replace(".", ",") + if x > 1e6: + y = x / 1e6 + return f"{y:,.2f} millions".replace(".", ",") + else: + return f"{x:,.0f}".replace(",", " ") + # 3 Onglets : Matériaux, Top déchets, Filières et marques tab1, tab2, tab3 = st.tabs( [ @@ -157,19 +168,15 @@ def load_df_dict_corr_dechet_materiau(): # 1ère métrique : volume total de déchets collectés cell1 = l1_col1.container(border=True) # Trick pour séparer les milliers - volume_total = f"{volume_total:,.0f}".replace(",", " ") - cell1.metric("Volume de déchets collectés", f"{volume_total} litres") + cell1.metric("Volume de déchets collectés", frenchify(volume_total) + " litres") # 2ème métrique : poids cell2 = l1_col2.container(border=True) - poids_total = f"{poids_total:,.0f}".replace(",", " ") - - cell2.metric("Poids total collecté", f"{poids_total} kg") + cell2.metric("Poids total collecté", frenchify(poids_total) + " kg") # 3ème métrique : nombre de relevés cell3 = l1_col3.container(border=True) - nb_collectes = f"{nb_collectes_int:,.0f}".replace(",", " ") - cell3.metric("Nombre de collectes comptabilisées", f"{nb_collectes}") + cell3.metric("Nombre de collectes comptabilisées", frenchify(nb_collectes_int)) # Message d'avertissement nb de collectes en dessous de 5 if nb_collectes_int == 1: @@ -434,14 +441,14 @@ def load_df_dict_corr_dechet_materiau(): poids_total_filtered = df_filtered_metrics["POIDS_TOTAL"].sum() volume_total_filtered = df_filtered_metrics["VOLUME_TOTAL"].sum() - volume_total_filtered = f"{volume_total_filtered:,.0f}".replace(",", " ") - cell6.metric("Volume de dechets collectés", f"{volume_total_filtered} litres") + cell6.metric( + "Volume de dechets collectés", frenchify(volume_total_filtered) + " litres" + ) - poids_total_filtered = f"{poids_total_filtered:,.0f}".replace(",", " ") - cell7.metric("Poids total collecté", f"{poids_total_filtered} kg") + cell7.metric("Poids total collecté", frenchify(poids_total_filtered) + " kg") - nombre_collectes_filtered = f"{len(df_filtered):,.0f}".replace(",", " ") - cell8.metric("Nombre de collectes", f"{nombre_collectes_filtered}") + nombre_collectes_filtered = len(df_filtered) + cell8.metric("Nombre de collectes", frenchify(nombre_collectes_filtered)) # Message d'avertissement nb de collectes en dessous de 5 if len(df_filtered) == 1: @@ -533,7 +540,6 @@ def load_df_dict_corr_dechet_materiau(): nb_total_dechets = df_top[(df_top["type_regroupement"] == "GROUPE")][ "nb_dechet" ].sum() - nb_total_dechets = f"{nb_total_dechets:,.0f}".replace(",", " ") # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2, l1_col3 = st.columns(3) @@ -543,31 +549,30 @@ def load_df_dict_corr_dechet_materiau(): # Trick pour séparer les milliers # volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") - cell1.metric("Nombre de déchets catégorisés", f"{nb_total_dechets} déchets") + cell1.metric("Nombre de déchets catégorisés", frenchify(nb_total_dechets)) # 2ème métrique : équivalent volume catégorisé cell2 = l1_col2.container(border=True) - volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") cell2.metric( "Equivalent en volume ", - f"{volume_total_categorise} litres", + frenchify(volume_total_categorise) + " litres", ) # 3ème métrique : nombre de relevés cell3 = l1_col3.container(border=True) - cell3.metric("Nombre de collectes comptabilisées", f"{nb_collectes}") + cell3.metric("Nombre de collectes comptabilisées", frenchify(nb_collectes_int)) # Message d'avertissement nb de collectes en dessous de 5 if nb_collectes_int == 1: st.warning( "⚠️ Il n'y a qu' " - + str(nb_collectes) + + str(nb_collectes_int) + " collecte considérées dans les données présentées." ) elif nb_collectes_int <= 5: st.warning( "⚠️ Il n'y a que " - + str(nb_collectes) + + str(nb_collectes_int) + " collectes considérées dans les données présentées." ) @@ -887,26 +892,22 @@ def load_df_dict_corr_dechet_materiau(): cell1 = l1_col1.container(border=True) # Trick pour séparer les milliers - nb_dechet_secteur = f"{nb_dechet_secteur:,.0f}".replace(",", " ") cell1.metric( - "Nombre de déchets catégorisés par secteur", f"{nb_dechet_secteur} dechets" + "Nombre de déchets avec secteur identifié", frenchify(nb_dechet_secteur) ) # 2ème métrique : poids cell2 = l1_col2.container(border=True) - nb_secteurs = f"{nb_secteurs:,.0f}".replace(",", " ") - # poids_total = f"{poids_total:,.0f}".replace(",", " ") cell2.metric( - "Nombre de secteurs identifiés lors des collectes", - f"{nb_secteurs} secteurs", + "Nombre de secteurs identifiés dans les déchets collectés", + frenchify(nb_secteurs) + " secteurs", ) # 3ème métrique : nombre de collectes cell3 = l1_col3.container(border=True) - collectes_formatted = f"{collectes:,.0f}".replace(",", " ") cell3.metric( "Nombre de collectes comptabilisées", - f"{collectes_formatted} collectes", + frenchify(collectes) + " collectes", ) # Message d'avertissement nb de collectes en dessous de 5 @@ -989,18 +990,17 @@ def load_df_dict_corr_dechet_materiau(): cell4 = l2_col1.container(border=True) # 1er métrique : nombre de dechets categorises par marques - # Trick pour séparer les milliers - nb_dechet_marque = f"{nb_dechet_marque:,.0f}".replace(",", " ") + cell4.metric( - "Nombre de déchets catégorisés par marque", f"{nb_dechet_marque} dechets" + "Nombre de déchets dont la marque est identifiée", + frenchify(nb_dechet_marque) + " déchets", ) # 2ème métrique : nombre de marques identifiées lors des collectes cell5 = l2_col2.container(border=True) - nb_marques = f"{nb_marques:,.0f}".replace(",", " ") cell5.metric( - "Nombre de marques identifiés lors des collectes", - f"{nb_marques} marques", + "Nombre de marques identifiées lors des collectes", + frenchify(nb_marques) + " marques", ) fig_marque = px.bar( @@ -1030,7 +1030,7 @@ def load_df_dict_corr_dechet_materiau(): st.plotly_chart(fig_marque, use_container_width=True) with st.container(border=True): - st.write( + st.caption( "La Responsabilité Élargie du Producteur (REP) est une obligation qui impose aux entreprises de payer une contribution financière" + " pour la prise en charge de la gestion des déchets issus des produits qu’ils mettent sur le marché selon le principe pollueur-payeur." + " Pour ce faire, elles doivent contribuer financièrement à la collecte, du tri et au recyclage de ces produits, " @@ -1057,19 +1057,16 @@ def load_df_dict_corr_dechet_materiau(): # 1ère métrique : nombre de dechets catégorisés repartis par responsabilités cell6 = l3_col1.container(border=True) - # Trick pour séparer les milliers - nb_dechet_rep = f"{nb_dechet_rep:,.0f}".replace(",", " ") cell6.metric( "Nombre de déchets catégorisés par filière REP", - f"{nb_dechet_rep} dechets", + frenchify(nb_dechet_rep), ) # 2ème métrique : nombre de responsabilités cell7 = l3_col2.container(border=True) - nb_rep = f"{nb_rep:,.0f}".replace(",", " ") cell7.metric( "Nombre de filières REP identifiées lors des collectes", - f"{nb_rep} REP", + frenchify(nb_rep) + " filières", ) # Treemap REP From e8b595c44331fdba401166b8a642491af1307b78 Mon Sep 17 00:00:00 2001 From: Floriane Duccini Date: Wed, 24 Apr 2024 18:22:18 +0200 Subject: [PATCH 092/147] commiting structure file cleaned --- .gitignore | 4 +- .../data/structures_export_cleaned.csv | 561 ++++++++++++++++++ 2 files changed, 564 insertions(+), 1 deletion(-) create mode 100644 Exploration_visualisation/data/structures_export_cleaned.csv diff --git a/.gitignore b/.gitignore index 624c6e0..05ec177 100644 --- a/.gitignore +++ b/.gitignore @@ -162,4 +162,6 @@ dmypy.json cython_debug/ # Precommit hooks: ruff cache -.ruff_cache \ No newline at end of file +.ruff_cache + +etl/zds/.file_versions/* \ No newline at end of file diff --git a/Exploration_visualisation/data/structures_export_cleaned.csv b/Exploration_visualisation/data/structures_export_cleaned.csv new file mode 100644 index 0000000..33cd554 --- /dev/null +++ b/Exploration_visualisation/data/structures_export_cleaned.csv @@ -0,0 +1,561 @@ +,ID_STRUCT,NOM_structure,SOUS_TYPE,TYPE,ADRESSE,CODE_POSTA,DATE_INSCR,ACTION_RAM,A1S_NB_SPO,CARACT_ACT,CARACT_NB_,CARACT_N_1,CARACT_N_2,CARACT_N_3,longitude,latitude,ID,COMMUNE,INSEE_COM,dep,reg,epci,nature_epc,libepci,departement,region +0,745,CareMor,,Organisation socioprofessionnelle,"Pleumeur-Bodou, Lannion, Côtes-d'Armor, Bretagne, France métropolitaine, 22560, France",22560.0,2024/02/27,1,0,0,0,0,0,0,-3.50734,48.769042,COMMUNE_0000000009735741,Pleumeur-Bodou,22198,22,53,200065928,CA,CA Lannion-Trégor Communauté,Côtes-d'Armor,Bretagne +1,744,Ecole Notre-Dame,École primaire,Établissement scolaire ou d'enseignement supérieur,"École primaire privée Notre-Dame, 78, Route de Sète, Agde, Béziers, Hérault, Occitanie, France métropolitaine, 34300, France",34300.0,2024/02/27,1,0,0,0,0,0,0,3.486079,43.308884,COMMUNE_0000000009761167,Agde,34003,34,76,243400819,CA,CA Hérault-Méditerranée,Hérault,Occitanie +2,743,J?aime ma mer,Association de protection de l'environnement,Association ou fédération,"Strada Suttana, Piève, Calvi, Haute-Corse, Corse, France métropolitaine, 20246, France",20246.0,2024/02/24,1,0,0,0,0,0,0,9.286789,42.580113,COMMUNE_0000000009762898,Piève,2B230,2B,94,200073120,CC,CC Nebbiu - Conca d'Oro,Haute-Corse,Corse +3,742,Conservatoire d'espaces naturels Corse,Association de protection de l'environnement,Association ou fédération,"Conservatoire d'espaces naturels de Corse, 871, Avenue de Borgo, Revinco, Borgo, Bastia, Haute-Corse, Corse, France métropolitaine, 20290, France",20290.0,2024/02/21,1,0,0,0,0,0,0,9.439797,42.565082,COMMUNE_0000000009762893,Borgo,2B042,2B,94,200036499,CC,CC de Marana-Golo,Haute-Corse,Corse +4,741,Région Sud,Région,Collectivité territoriale,"Place Jules Guesde, Saint-Lazare, 3e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13003, France",13003.0,2024/02/19,1,0,0,0,0,0,0,5.375299,43.302498,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +5,740,"MALINE (Mouvement d'Actions pour le Littoral, la Nature et l'Environnement)",Association de protection de l'environnement,Association ou fédération,"Rue du Sergent Lecêtre, Moulin des Gorces, La Tremblade, Rochefort, Charente-Maritime, Nouvelle-Aquitaine, France métropolitaine, 17390, France",17390.0,2024/02/13,1,0,1,0,1,0,0,-1.150222,45.773018,COMMUNE_0000000009751935,La Tremblade,17452,17,75,241700640,CA,CA Royan Atlantique,Charente-Maritime,Nouvelle-Aquitaine +6,739,AS COLLEGE MONTAIGNE,Collège,Établissement scolaire ou d'enseignement supérieur,"Romagné - Renouveau, Conflans-Sainte-Honorine, Saint-Germain-en-Laye, Yvelines, 78700, Île-de-France, France métropolitaine, France",78700.0,2024/01/28,1,0,0,0,0,0,0,2.102224,49.003738,COMMUNE_0000000009735043,Conflans-Sainte-Honorine,78172,78,11,200059889,CU,CU Grand Paris Seine et Oise,Yvelines,Île-de-France +7,736,VIVRE AUX BORRELS,Autre,Association ou fédération,"Les Deuxièmes Borrels, Hyères, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83400, France",83400.0,2024/01/17,0,0,0,0,0,0,0,6.183353,43.165414,COMMUNE_0000000009761863,Hyères,83069,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +8,734,I Sbuleca Mare,Association Education environnement et Développement durable,Association ou fédération,"20260, France métropolitaine, France",20260.0,2024/01/11,1,1,1,0,0,0,1,7.191579,43.830338,COMMUNE_0000000009758684,Saint-Martin-du-Var,06126,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +9,733,Communauté de Communes du Pays des Paillons,Communauté de communes,Collectivité territoriale,"Route du Col de Nice, L'Escarène, Nice, Alpes-Maritimes, Provence-Alpes-Côte d'Azur, France métropolitaine, 06440, France",6440.0,2024/01/05,1,0,0,0,0,0,0,7.351287,43.828478,COMMUNE_0000000009758680,L'Escarène,06057,06,93,240600593,CC,CC du Pays des Paillons,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +10,730,Projet Heremoana,Association de protection de l'environnement,Association ou fédération,"Impasse du Calme, Gibou, La Renisière, La Gaconnière, Le Château-d'Oléron, Rochefort, Charente-Maritime, Nouvelle-Aquitaine, France métropolitaine, 17480, France",17480.0,2023/12/14,0,0,0,0,0,0,0,-1.223085,45.891097,COMMUNE_0000000009751407,Le Château-d'Oléron,17093,17,75,241700624,CC,CC de l'Île d'Oléron,Charente-Maritime,Nouvelle-Aquitaine +11,729,Lycee de Pons,Lycée,Établissement scolaire ou d'enseignement supérieur,"Rue des Cordeliers, La Croix Chaillebourg, Pons, Jonzac, Charente-Maritime, Nouvelle-Aquitaine, France métropolitaine, 17800, France",17800.0,2023/12/07,1,0,0,0,0,0,0,-0.555448,45.574349,COMMUNE_0000000009752948,Pons,17283,17,75,200041523,CC,CC de la Haute Saintonge,Charente-Maritime,Nouvelle-Aquitaine +12,728,COTA ONG,Association Education environnement et Développement durable,Association ou fédération,"13, Rue Parmentier, Centre Ville, Maisons-Alfort, Nogent-sur-Marne, Val-de-Marne, Île-de-France, France métropolitaine, 94700, France",94700.0,2023/11/28,0,0,0,0,0,0,0,2.430964,48.802829,COMMUNE_0000000009736528,Maisons-Alfort,94046,94,11,200054781,ME,Métropole du Grand Paris,Val-de-Marne,Île-de-France +13,726,Territoires sauvages,Autre,Association ou fédération,"Saintes-Maries-de-la-Mer, Arles, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13460, France",13460.0,2023/11/10,1,0,0,0,0,0,0,4.427573,43.455234,COMMUNE_0000000009760352,Saintes-Maries-de-la-Mer,13096,13,93,241300417,CA,CA d'Arles-Crau-Camargue-Montagnette,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +14,725,Collège Jules Massenet,Collège,Établissement scolaire ou d'enseignement supérieur,"Boulevard Massenet, Saint-Joseph, 14e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13014, France",13014.0,2023/11/08,0,0,0,0,0,0,0,5.381048,43.342808,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +15,723,UEHC Collonges Mont-d'Or,,Services de l'état et établissements publics,"11, Rue du Port, Collonges-au-Mont-d'Or, Lyon, Métropole de Lyon, Rhône, Auvergne-Rhône-Alpes, France métropolitaine, 69660, France",69660.0,2023/11/08,1,0,0,0,0,0,0,4.843365,45.815795,COMMUNE_0000000009751762,Collonges-au-Mont-d'Or,69063,69,84,200046977,METLYON,Métropole de Lyon,Rhône,Auvergne-Rhône-Alpes +16,722,Sauvegarde des forêts Varoises,Association de protection de l'environnement,Association ou fédération,"363, Chemin de l'Estanci, Port Auguier, Hyères, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83400, France",83400.0,2023/11/06,1,0,1,0,1,0,0,6.151693,43.033375,COMMUNE_0000000009761863,Hyères,83069,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +17,721,Brigade Verte Paris,Autre,Association ou fédération,"34, Rue de la Justice, Quartier Saint-Fargeau, Paris 20e Arrondissement, Paris, Île-de-France, France métropolitaine, 75020, France",75020.0,2023/11/04,1,0,0,0,0,0,0,2.403399,48.8716,COMMUNE_0000000009736048,Paris,75056,75,11,200054781,ME,Métropole du Grand Paris,Paris,Île-de-France +18,720,Ma Garde Propr',Association de protection de l'environnement,Association ou fédération,"Avenue de la Paix, Le Thouar, La Garde, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83130, France",83130.0,2023/11/04,1,0,0,0,0,0,0,6.01099,43.128564,COMMUNE_0000000009761866,La Garde,83062,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +19,719,Centre de plongee du pradet,Club affilié FFESSM,Association ou fédération,"Le Pradet, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83220, France",83220.0,2023/11/03,1,0,0,0,0,0,0,6.03091,43.106308,COMMUNE_0000000009761865,Le Pradet,83098,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +20,718,Centre de découverte de la Baie du mont Saint Michel,Association Education environnement et Développement durable,Association ou fédération,"Le Vivier-sur-Mer, Saint-Malo, Ille-et-Vilaine, Bretagne, France métropolitaine, 35960, France",35960.0,2023/11/02,1,0,0,0,0,0,0,-1.771464,48.600415,COMMUNE_0000000009738089,Le Vivier-sur-Mer,35361,35,53,200070670,CC,CC du Pays de Dol et de la Baie du Mont Saint-Michel,Ille-et-Vilaine,Bretagne +21,716,Ville d'Auriol,Commune,Collectivité territoriale,"Hôtel de Ville, Place de la Libération, Quartier Raton, Auriol, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13390, France",13390.0,2023/10/24,1,0,1,0,0,1,0,5.640613,43.372641,COMMUNE_0000000009760871,Auriol,13007,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +22,715,UTOPIA,Association de protection de l'environnement,Association ou fédération,"38 bis, Rue Jean Bart, Kerlizou, Carantec, Morlaix, Finistère, Bretagne, France métropolitaine, 29660, France",29660.0,2023/10/21,0,0,0,0,0,0,0,-3.919121,48.66614,COMMUNE_0000000009736262,Carantec,29023,29,53,242900835,CA,CA Morlaix Communauté,Finistère,Bretagne +23,714,Communauté de communes QRGA,Communauté de communes,Collectivité territoriale,"Saint-Antonin-Noble-Val, Montauban, Tarn-et-Garonne, Occitanie, France métropolitaine, 82140, France",82140.0,2023/10/19,1,0,0,0,0,0,0,1.755415,44.15072,COMMUNE_0000000009758007,Saint-Antonin-Noble-Val,82155,82,76,248200107,CC,CC du Quercy Rouergue et des Gorges de l'Aveyron,Tarn-et-Garonne,Occitanie +24,713,CIQ de St-Marcel,Comité d'interêt de quartier,Association ou fédération,"Boulevard de Saint-Marcel, Saint-Marcel, 11e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13011, France",13011.0,2023/10/18,1,0,0,0,0,0,0,5.465234,43.287522,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +25,712,Acceuil de Loisirs Ville de Roquevaire,Commune,Collectivité territoriale,"Avenue des Alliés, Roquevaire, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13360, France",13360.0,2023/10/18,1,0,1,2,0,0,0,5.603742,43.349191,COMMUNE_0000000009760869,Roquevaire,13086,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +26,711,IRS de Provence,Autre,Association ou fédération,"Traverse des Fabres, Les Fabres, Les Accates, 11e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13011, France",13011.0,2023/10/18,1,0,0,0,0,0,0,5.495742,43.298642,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +27,709,ASPTT Lyon Plongée,Club affilié FFESSM,Association ou fédération,"Saint-Priest, Lyon, Métropole de Lyon, Rhône, Auvergne-Rhône-Alpes, France métropolitaine, 69800, France",69800.0,2023/10/12,1,0,0,0,0,0,0,4.919782,45.700479,COMMUNE_0000000009752266,Saint-Priest,69290,69,84,200046977,METLYON,Métropole de Lyon,Rhône,Auvergne-Rhône-Alpes +28,708,Dark Massilia,,Eco-artiste,"Chemin de Morgiou, Les Baumettes, 9e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13009, France",13009.0,2023/10/12,1,0,0,0,0,0,0,5.410722,43.235179,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +29,703,Les Comp'Act,Autre,Association ou fédération,"Route des Vorziers, Les Vorziers, Luzier, Sallanches, Bonneville, Haute-Savoie, Auvergne-Rhône-Alpes, France métropolitaine, 74700, France",74700.0,2023/10/06,1,0,1,0,5,0,0,6.627336,45.975074,COMMUNE_0000000009750891,Sallanches,74256,74,84,200034882,CC,CC Pays du Mont-Blanc,Haute-Savoie,Auvergne-Rhône-Alpes +30,702,Association des Paluds,,Organisation socioprofessionnelle,"Avenue des Paluds, Zone industrielle des Paluds, Aubagne, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13400, France",13400.0,2023/10/05,1,1,1,1,0,0,0,5.599243,43.287937,COMMUNE_0000000009761151,Aubagne,13005,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +31,700,Collège Les Aravis,Collège,Établissement scolaire ou d'enseignement supérieur,"Rue du Stade, Le Pessay, Thônes, Annecy, Haute-Savoie, Auvergne-Rhône-Alpes, France métropolitaine, 74230, France",74230.0,2023/10/05,1,1,1,0,0,0,1,6.142892,45.927473,COMMUNE_0000000009751165,Annecy,74010,74,84,200066793,CA,CA du Grand Annecy,Haute-Savoie,Auvergne-Rhône-Alpes +32,699,Collège François Villon,Collège,Établissement scolaire ou d'enseignement supérieur,"Rue de Courencq, Saint-Marcel, 11e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13011, France",13011.0,2023/10/04,1,0,0,0,0,0,0,5.465234,43.287522,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +33,698,Mairie Roquefort La Bédoule,Commune,Collectivité territoriale,"Hôtel de Ville, Place de la Libération, Roquefort-la-Bédoule, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13830, France",13830.0,2023/10/04,1,0,1,3,0,0,0,5.59161,43.247833,COMMUNE_0000000009761403,Roquefort-la-Bédoule,13085,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +34,697,Côte de Grâce propre,Association de protection de l'environnement,Association ou fédération,"Honfleur, Lisieux, Calvados, Normandie, France métropolitaine, 14600, France",14600.0,2023/10/03,1,0,1,0,2,0,0,0.227195,49.423046,COMMUNE_0000000009731841,Honfleur,14333,14,28,200066827,CC,CC du Pays de Honfleur-Beuzeville,Calvados,Normandie +35,696,Domaine Vallée Verte,,Organisation socioprofessionnelle,"Rue de la Vallée Verte, Saint-Menet, 11e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13011, France",13011.0,2023/10/02,1,0,1,1,0,0,0,5.498847,43.287788,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +36,695,Ludovic Alussi,,Eco-artiste,"5, Rue Lacépède, Les Cinq-Avenues, 4e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13004, France",13004.0,2023/10/02,1,0,0,0,0,0,0,5.401299,43.30762,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +37,694,Penn Ar Kayak,Association sportive,Association ou fédération,"Plougonvelin, Brest, Finistère, Bretagne, France métropolitaine, 29217, France",29217.0,2023/10/01,1,0,1,1,1,0,0,-4.699373,48.363027,COMMUNE_0000000009738589,Plougonvelin,29190,29,53,242900074,CC,CC du Pays d'Iroise,Finistère,Bretagne +38,693,Pin Vert,École primaire,Établissement scolaire ou d'enseignement supérieur,"Aubagne, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, Metropolitan France, 13400, France",13400.0,2023/10/01,1,0,0,0,0,0,0,5.560736,43.289929,COMMUNE_0000000009761151,Aubagne,13005,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +39,691,SIPOM de Revel,Communauté de communes,Collectivité territoriale,"Avenue Marie Curie, Zone industrielle de la Pomme, Saint-Pierre, Revel, Toulouse, Haute-Garonne, Occitanie, France métropolitaine, 31250, France",31250.0,2023/09/25,0,0,1,0,4,0,0,-1.259458,49.310111,COMMUNE_0000000009732295,Carentan-les-Marais,50099,50,28,200042729,CC,CC de la Baie du Cotentin,Manche,Normandie +40,690,CÔTE VERMEILLE GARDIENS MER ET NATURE,Association de protection de l'environnement,Association ou fédération,"Rue Alexandre Ducros, Cerbère, Céret, Pyrénées-Orientales, Occitanie, France métropolitaine, 66290, France",66290.0,2023/09/24,1,0,0,0,0,0,0,3.166431,42.442734,COMMUNE_0000000009763416,Cerbère,66048,66,76,200043602,CC,"CC des Albères, de la Côte Vermeille et de l'Illibéris",Pyrénées-Orientales,Occitanie +41,688,La grande COLLECTE,Association Education environnement et Développement durable,Association ou fédération,"La Rochelle, Charente-Maritime, Nouvelle-Aquitaine, France métropolitaine, 17000, France",17000.0,2023/09/20,0,0,0,0,0,0,0,-1.12663,46.163994,COMMUNE_0000000009750154,La Rochelle,17300,17,75,241700434,CA,CA de La Rochelle,Charente-Maritime,Nouvelle-Aquitaine +42,687,Association Côte Fleurie Propre,Association de protection de l'environnement,Association ou fédération,"Rue de Troarn, Hameau Montigny, Gonneville-en-Auge, Lisieux, Calvados, Normandie, France métropolitaine, 14810, France",14810.0,2023/09/20,1,3,1,0,1,0,3,-0.193202,49.256486,COMMUNE_0000000009732695,Gonneville-en-Auge,14306,14,28,200065563,CC,CC Normandie-Cabourg-Pays d'Auge,Calvados,Normandie +43,686,La Girelle,Association de protection de l'environnement,Association ou fédération,"Bastia, Haute-Corse, Corse, France métropolitaine, France",20200.0,2023/09/17,1,0,1,0,1,0,0,7.183521,43.852277,COMMUNE_0000000009758461,Bonson,06021,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +44,685,Voiles au Vert,Association Education environnement et Développement durable,Association ou fédération,"13, Impasse du Chalet de la Marrière, Marrière, Boulevard des Poilus, Doulon - Bottière, Nantes, Loire-Atlantique, Pays de la Loire, France métropolitaine, 44300, France",44300.0,2023/09/16,1,0,0,0,0,0,0,-1.526837,47.234075,COMMUNE_0000000009746105,Nantes,44109,44,52,244400404,ME,Nantes Métropole,Loire-Atlantique,Pays de la Loire +45,684,VARNAT,Association Education environnement et Développement durable,Association ou fédération,"Avenue Ernest Reyer, Hyères, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83400, France",83400.0,2023/09/15,1,0,0,0,0,0,0,6.124006,43.115987,COMMUNE_0000000009761863,Hyères,83069,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +46,683,CIP GOLFE JUAN,Association sportive,Association ou fédération,"Quai Saint-Pierre, Golfe-Juan, Vallauris, Grasse, Maritime Alps, Provence-Alpes-Côte d'Azur, Metropolitan France, 06220, France",6220.0,2023/09/14,1,0,0,0,0,0,0,7.076291,43.572247,COMMUNE_0000000009759615,Vallauris,06155,06,93,240600585,CA,CA de Sophia Antipolis,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +47,682,Lion Environnement,Association de protection de l'environnement,Association ou fédération,"Hôtel de Ville (Villa Marcotte), 30, Rue du Général Gallieni, Lion-sur-Mer, Caen, Calvados, Normandie, France métropolitaine, 14780, France",14780.0,2023/09/14,1,0,1,0,1,0,0,-0.313426,49.301204,COMMUNE_0000000009732236,Lion-sur-Mer,14365,14,28,200065597,CU,CU Caen la Mer,Calvados,Normandie +48,679,La Poste PACA,,Services de l'état et établissements publics,"Place de l'Hôtel des Postes, Belsunce, 1er Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13001, France",13001.0,2023/09/08,1,0,0,0,0,0,0,5.377266,43.297895,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +49,678,Mairie de Banyuls-sur-mer,Commune,Collectivité territoriale,"Avenue de la République, Lotissement Mar Y Sol, Banyuls-sur-Mer, Céret, Pyrénées-Orientales, Occitanie, France métropolitaine, 66650, France",66650.0,2023/09/07,1,0,0,0,0,0,0,3.128921,42.482141,COMMUNE_0000000009763417,Banyuls-sur-Mer,66016,66,76,200043602,CC,"CC des Albères, de la Côte Vermeille et de l'Illibéris",Pyrénées-Orientales,Occitanie +50,677,NicePlogging - Agirrr,Association de protection de l'environnement,Association ou fédération,"Nice, Alpes-Maritimes, Provence-Alpes-Côte d'Azur, France métropolitaine, France",6000.0,2023/09/07,1,0,0,0,0,0,0,7.189165,43.696307,COMMUNE_0000000009759142,Nice,06088,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +51,676,Les Week Ends Solidaires,Association Education environnement et Développement durable,Association ou fédération,"Boulevard de la Madeleine, La Madeleine, Nice, Alpes-Maritimes, Provence-Alpes-Côte d'Azur, France métropolitaine, 06100, France",6100.0,2023/09/06,1,0,0,0,0,0,0,7.224277,43.723247,COMMUNE_0000000009759142,Nice,06088,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +52,675,Camille ROVERA,,Eco-artiste,"Gare Cagnes-sur-Mer, Avenue de la Gare, Cagnes-sur-Mer, Grasse, Alpes-Maritimes, Provence-Alpes-Côte d'Azur, France métropolitaine, 06800, France",6800.0,2023/09/06,0,0,0,0,0,0,0,7.138889,43.658834,COMMUNE_0000000009759377,Cagnes-sur-Mer,06027,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +53,674,The SeaCleaners,Association de protection de l'environnement,Association ou fédération,"Association The SeaCleaners, 10, Rue de la Drisse, Kervinio, La Trinité-sur-Mer, Lorient, Morbihan, Brittany, Metropolitan France, 56470, France",56470.0,2023/09/01,1,0,0,0,0,0,0,-3.045793,47.595937,COMMUNE_0000000009744048,La Trinité-sur-Mer,56258,56,53,200043123,CC,CC Auray Quiberon Terre Atlantique,Morbihan,Bretagne +54,673,École élémentaire Julie-Victoire Daubié,École primaire,Établissement scolaire ou d'enseignement supérieur,"École élémentaire Julie-Victoire Daubié, 55, Avenue Aristide Briand, Les Pavillons-sous-Bois, Le Raincy, Seine-Saint-Denis, Île-de-France, France métropolitaine, 93320, France",93320.0,2023/08/30,0,0,0,0,0,0,0,2.498897,48.908664,COMMUNE_0000000009736020,Les Pavillons-sous-Bois,93057,93,11,200054781,ME,Métropole du Grand Paris,Seine-Saint-Denis,Île-de-France +55,672,Millenium-Project,Association Education environnement et Développement durable,Association ou fédération,"Rue de France, La Fare-les-Oliviers, Aix-en-Provence, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13580, France",13580.0,2023/08/20,1,0,0,0,0,0,0,5.178787,43.546667,COMMUNE_0000000009760119,La Fare-les-Oliviers,13037,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +56,669,Save la mermaid,Association de protection de l'environnement,Association ou fédération,"Avenue des Genêts, Les Estagnots, Le Penon, Seignosse, Dax, Landes, Nouvelle-Aquitaine, France métropolitaine, 40510, France",40510.0,2023/08/09,1,0,0,0,0,0,0,-1.429213,43.69031,COMMUNE_0000000009759613,Seignosse,40296,40,75,244000865,CC,CC Maremne Adour Côte Sud,Landes,Nouvelle-Aquitaine +57,668,DK Clean Up,Association de protection de l'environnement,Association ou fédération,"113, Rue Henri Ghesquière, Coudekerque-Centre, Coudekerque-Branche, Dunkerque, Nord, Hauts-de-France, France métropolitaine, 59210, France",59210.0,2023/08/08,1,0,1,0,1,0,0,2.387649,51.020204,COMMUNE_0000000009726963,Coudekerque-Branche,59155,59,32,245900428,CU,CU de Dunkerque,Nord,Hauts-de-France +58,667,URCPIE de Normandie,Association Education environnement et Développement durable,Association ou fédération,"Rue du Moulin au Roy, Université, Caen, Calvados, Normandie, France métropolitaine, 14000, France",14000.0,2023/07/26,0,0,0,0,0,0,0,-0.360756,49.194924,COMMUNE_0000000009733198,Caen,14118,14,28,200065597,CU,CU Caen la Mer,Calvados,Normandie +59,666,ReSeaclons,Association de protection de l'environnement,Association ou fédération,"Avenue du Palais de la Mer, Port Camargue, Le Grau-du-Roi, Nîmes, Gard, Occitanie, France métropolitaine, 30240, France",30240.0,2023/07/23,0,0,1,0,3,0,0,4.144951,43.527472,COMMUNE_0000000009760353,Le Grau-du-Roi,30133,30,76,243000650,CC,CC Terre de Camargue,Gard,Occitanie +60,662,EcoTerre Orvault,Association de protection de l'environnement,Association ou fédération,"37, Avenue de la Ferrière, Le Petit Chantilly, Le Bois Saint-Louis, Plaisance, Orvault, Nantes, Loire-Atlantique, Pays de la Loire, France métropolitaine, 44700, France",44700.0,2023/07/05,1,0,1,1,3,4,0,-1.587647,47.248348,COMMUNE_0000000009745819,Orvault,44114,44,52,244400404,ME,Nantes Métropole,Loire-Atlantique,Pays de la Loire +61,661,LIFE SeaBiL,Association de protection de l'environnement,Association ou fédération,"Ligue pour la Protection des Oiseaux, 8, Rue du Docteur Jacques Pujos, Corderie, Rochefort, Charente-Maritime, Nouvelle-Aquitaine, France métropolitaine, 17305, France",17305.0,2023/07/05,1,0,1,1,0,0,0,-0.95944,45.940318,COMMUNE_0000000009751147,Rochefort,17299,17,75,200041762,CA,CA Rochefort Océan,Charente-Maritime,Nouvelle-Aquitaine +62,660,CPALB,Club affilié FFESSM,Association ou fédération,"Chemin de la Roselière, Mémard, Aix-les-Bains, Chambéry, Savoie, Auvergne-Rhône-Alpes, France métropolitaine, 73100, France",73100.0,2023/06/30,1,0,0,0,0,0,0,5.890503,45.708698,COMMUNE_0000000009751962,Aix-les-Bains,73008,73,84,200068674,CA,CA Grand Lac,Savoie,Auvergne-Rhône-Alpes +63,659,GARDE (groupement associatif régional pour la détection de loisir et l?environnement),Association de protection de l'environnement,Association ou fédération,"Cambrai, Nord, Hauts-de-France, France métropolitaine, 59400, France",59400.0,2023/06/27,1,0,0,0,0,0,0,3.256164,50.159327,COMMUNE_0000000009728326,Cambrai,59122,59,32,200068500,CA,CA de Cambrai,Nord,Hauts-de-France +64,658,SEML Piau Engaly,Commune,Collectivité territoriale,"Engaly, Route de Piau-Engaly, Piau-Engaly, Aragnouet, Bagnères-de-Bigorre, Hautes-Pyrénées, Occitanie, France métropolitaine, 65170, France",65170.0,2023/06/26,1,0,0,0,0,0,0,0.086875,43.039313,COMMUNE_0000000009762448,Bagnères-de-Bigorre,65059,65,76,246500482,CC,CC de la Haute-Bigorre,Hautes-Pyrénées,Occitanie +65,657,ESTRAN Cité de la Mer,Association Education environnement et Développement durable,Association ou fédération,"ESTRAN Cité de la mer, 37, Rue de l'Asile Thomas, Dieppe, Seine-Maritime, Normandie, France métropolitaine, 76200, France",76200.0,2023/06/23,1,0,1,0,8,0,0,1.083209,49.930353,COMMUNE_0000000009729097,Dieppe,76217,76,28,247600786,CA,CA de la Région Dieppoise,Seine-Maritime,Normandie +66,656,CORIS PLONGEE,Association sportive,Association ou fédération,"Avenue des Vauclusiens, Martigues, Istres, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, Metropolitan France, 13117, France",13500.0,2023/06/18,1,0,0,0,0,0,0,5.027119,43.387656,COMMUNE_0000000009760884,Martigues,13056,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +67,655,MJC Vic-en-Bigorre,Association Education environnement et Développement durable,Association ou fédération,"Vic-en-Bigorre, Hautes-Pyrénées, Occitanie, France métropolitaine, 65500, France",65500.0,2023/06/13,1,0,0,0,0,0,0,0.061513,43.379062,COMMUNE_0000000009761032,Vic-en-Bigorre,65460,65,76,200072106,CC,CC Adour Madiran,Hautes-Pyrénées,Occitanie +68,654,DDTM 34,Département,Collectivité territoriale,"Quadro, 181, Place Ernest Granier, Port Marianne, Montpellier, Hérault, Occitanie, France métropolitaine, 34000, France",34006.0,2023/06/13,0,0,0,0,0,0,0,3.901579,43.601853,COMMUNE_0000000009759901,Montpellier,34172,34,76,243400017,ME,Montpellier Méditerranée Métropole,Hérault,Occitanie +69,653,Sapiens Evolution,Autre,Association ou fédération,"Avenue Balcon Sud, Font-Romeu, Font-Romeu-Odeillo-Via, Prades, Pyrénées-Orientales, Occitanie, France métropolitaine, 66120, France",66120.0,2023/06/11,1,0,0,0,0,0,0,2.035166,42.502948,COMMUNE_0000000009763395,Font-Romeu-Odeillo-Via,66124,66,76,246600464,CC,CC Pyrénées catalanes,Pyrénées-Orientales,Occitanie +70,652,GARDE (groupement associatif régional pour la détection de loisir et l?environnement),Association de protection de l'environnement,Association ou fédération,"Angers, Maine-et-Loire, Pays de la Loire, France métropolitaine, France",49100.0,2023/06/10,1,0,0,0,0,0,0,6.930379,44.251615,COMMUNE_0000000009757386,Saint-Étienne-de-Tinée,06120,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +71,651,Les Petits Débrouillards Grand Ouest,Association Education environnement et Développement durable,Association ou fédération,"11, Avenue Jean-Marie Bécel, Bécel, Ouest, Campen, Vannes, Morbihan, Bretagne, France métropolitaine, 56000, France",56000.0,2023/06/06,0,0,0,0,0,0,0,-2.772584,47.657372,COMMUNE_0000000009743709,Vannes,56260,56,53,200067932,CA,CA Golfe du Morbihan - Vannes Agglomération,Morbihan,Bretagne +72,649,Les Têtes de l'Art,Autre,Association ou fédération,"29, Rue Toussaint, Saint-Mauront, 3e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13003, France",13003.0,2023/06/02,1,0,0,0,0,0,0,5.383035,43.313964,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +73,646,Secours Populaire Français (13),Autre,Association ou fédération,"169, Chemin de Gibbes, Bon-Secours, 14e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13014, France",13014.0,2023/05/30,1,1,1,0,1,0,0,5.385315,43.317811,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +74,645,CleanRide,Association Education environnement et Développement durable,Association ou fédération,"11, Impasse des Templiers, Laure, Gignac-la-Nerthe, Istres, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13180, France",13180.0,2023/05/30,1,0,1,1,0,0,0,5.223795,43.387664,COMMUNE_0000000009760880,Gignac-la-Nerthe,13043,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +75,642,Risoul,,Services de l'état et établissements publics,"Office de Tourisme, Rue des Pourrières, Risoul 1850, Risoul, Briançon, Hautes-Alpes, Provence-Alpes-Côte d'Azur, France métropolitaine, 05600, France",5600.0,2023/05/26,0,0,0,0,0,0,0,4.559364,45.659865,COMMUNE_0000000009752282,Saint-Martin-en-Haut,69227,69,84,200066587,CC,CC des Monts du Lyonnais,Rhône,Auvergne-Rhône-Alpes +76,641,MJC Puylaurens,Autre,Association ou fédération,"Puylaurens, Castres, Tarn, Occitanie, France métropolitaine, 81700, France",81700.0,2023/05/24,1,0,1,0,1,0,0,2.027007,43.567915,COMMUNE_0000000009760185,Puylaurens,81219,81,76,248100158,CC,CC du Sor et de l'Agout,Tarn,Occitanie +77,639,Marseille Trail Club,Association sportive,Association ou fédération,"162, Boulevard Michelet, Saint-Giniez, 8e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13008, France",13008.0,2023/05/16,1,0,1,0,1,0,0,5.395539,43.264484,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +78,638,Centre Culturel Valloire,Autre,Association ou fédération,"Place Claude Pinoteau, La Bonne Eau, Le Praz, Valloire, Saint-Jean-de-Maurienne, Savoie, Auvergne-Rhône-Alpes, France métropolitaine, 73450, France",73450.0,2023/05/15,1,0,1,1,0,0,0,4.943726,45.287939,COMMUNE_0000000009753991,Saint-Sorlin-en-Valloire,26330,26,84,200040491,CC,CC Porte de DrômArdèche,Drôme,Auvergne-Rhône-Alpes +79,637,Association culturelle et sportive du Lycée des Calanques,Association sportive,Association ou fédération,"89, Traverse Parangon, La Pointe-Rouge, 8e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13008, France",13008.0,2023/05/15,1,0,1,0,3,0,0,5.379556,43.2427,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +80,636,ALB KERBI,,Organisation socioprofessionnelle,"10, Rue de l'Île Stibiden, Kerihuel, Arradon, Vannes, Morbihan, Bretagne, France métropolitaine, 56610, France",56610.0,2023/05/15,1,0,0,0,0,0,0,-2.817025,47.632244,COMMUNE_0000000009743711,Arradon,56003,56,53,200067932,CA,CA Golfe du Morbihan - Vannes Agglomération,Morbihan,Bretagne +81,635,CPIE Boucles de la Marne,Association de protection de l'environnement,Association ou fédération,"Rue de la Poste, Congis-sur-Thérouanne, Meaux, Seine-et-Marne, Île-de-France, France métropolitaine, 77440, France",77440.0,2023/05/09,0,0,1,1,0,0,0,2.974997,49.007151,COMMUNE_0000000009734987,Congis-sur-Thérouanne,77126,77,11,247700065,CC,CC du Pays de l'Ourcq,Seine-et-Marne,Île-de-France +82,634,Lycée le valentin,Lycée,Établissement scolaire ou d'enseignement supérieur,"Avenue de Lyon, Bourg-lès-Valence, Valence, Drôme, Auvergne-Rhône-Alpes, France métropolitaine, 26500, France",26500.0,2023/05/08,1,0,1,0,1,0,0,4.890924,44.953281,COMMUNE_0000000009755052,Bourg-lès-Valence,26058,26,84,200068781,CA,CA Valence Romans Agglo,Drôme,Auvergne-Rhône-Alpes +83,633,Asle Nonza,Association sportive,Association ou fédération,"Nonza, Bastia, Haute-Corse, Corse, France métropolitaine, 20217, France",20217.0,2023/05/03,1,0,0,0,0,0,0,9.348226,42.786425,COMMUNE_0000000009762487,Nonza,2B178,2B,94,200042943,CC,CC du Cap Corse,Haute-Corse,Corse +84,632,Collège Henri Laugier - Forcalquier,Collège,Établissement scolaire ou d'enseignement supérieur,"Avenue Crémieux, Forcalquier, Alpes-de-Haute-Provence, Provence-Alpes-Côte d'Azur, France métropolitaine, 04300, France",4300.0,2023/05/02,1,0,0,0,0,0,0,5.779519,43.965326,COMMUNE_0000000009758494,Forcalquier,04088,04,93,240400440,CC,CC Pays Forcalquier et Montagne de Lure,Alpes-de-Haute-Provence,Provence-Alpes-Côte d'Azur +85,621,NO Fishing,,Eco-artiste,"Traverse Rampal, Les Trois-Lucs, 12e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13012, France",13012.0,2023/04/30,1,0,0,0,0,0,0,5.470692,43.302327,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +86,628,Plaisanciers labellisés Wings of the Ocean,Association de protection de l'environnement,Association ou fédération,"26, Allée Léon Gambetta, Le Chapitre, 1er Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13001, France",13001.0,2023/04/22,1,0,1,1,26,8,0,5.382349,43.298875,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +87,624,France Nature Environnement Pays de la Loire,Association de protection de l'environnement,Association ou fédération,"76ter, Rue Lionnaise, Doutre-Saint-Jacques-Nazareth, Angers, Maine-et-Loire, Pays de la Loire, France métropolitaine, 49100, France",49100.0,2023/04/19,1,0,0,0,0,0,0,-0.564097,47.47779,COMMUNE_0000000009745202,Angers,49007,49,52,244900015,CU,CU Angers Loire Métropole,Maine-et-Loire,Pays de la Loire +88,632,Communauté de Communes du Briançonnais,Communauté de communes,Collectivité territoriale,"Rue Aspirant Jan, Briançon, Hautes-Alpes, Provence-Alpes-Côte d'Azur, France métropolitaine, 05100, France",5100.0,2023/04/18,0,0,0,0,0,0,0,6.644907,44.898362,COMMUNE_0000000009755018,Briançon,05023,05,93,240500439,CC,CC du Briançonnais,Hautes-Alpes,Provence-Alpes-Côte d'Azur +89,622,Club Alpin Français de Toulon,Association sportive,Association ou fédération,"Rue Paul Lendrin, Basse Ville, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83000, France",83000.0,2023/04/16,1,0,1,1,0,0,0,5.934082,43.123209,COMMUNE_0000000009761867,Toulon,83137,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +90,621,Swim For Change,Association de protection de l'environnement,Association ou fédération,"Rue Péron, Croissy-sur-Seine, Saint-Germain-en-Laye, Yvelines, Île-de-France, France métropolitaine, 78290, France",78290.0,2023/04/11,1,0,0,0,0,0,0,2.14827,48.876657,COMMUNE_0000000009736062,Croissy-sur-Seine,78190,78,11,200058519,CA,CA Saint Germain Boucles de Seine,Yvelines,Île-de-France +91,619,Cscn Section Plongée Nimes,Association sportive,Association ou fédération,"Rue Gaston Teissier, Capouchiné, Nîmes, Gard, Occitanie, France métropolitaine, 30947, France",30947.0,2023/04/07,1,0,0,0,0,0,0,4.356703,43.824288,COMMUNE_0000000009758988,Nîmes,30189,30,76,243000643,CA,CA de Nîmes Métropole,Gard,Occitanie +92,618,CSAR PSM,Association sportive,Association ou fédération,"Rue du Pont Neuf, Ruelle-sur-Touvre, Angoulême, Charente, Nouvelle-Aquitaine, France métropolitaine, 16600, France",16600.0,2023/04/06,0,0,0,0,0,0,0,0.2316,45.661069,COMMUNE_0000000009752398,Magnac-sur-Touvre,16199,16,75,200071827,CA,CA du Grand Angoulême,Charente,Nouvelle-Aquitaine +93,617,Des Pousses & Des Pierres,Autre,Association ou fédération,"Rue Saint-Guignol, Saint-Bonnet-du-Gard, Nîmes, Gard, Occitanie, France métropolitaine, 30210, France",30210.0,2023/04/02,1,0,1,0,2,0,0,4.542912,43.922099,COMMUNE_0000000009758744,Saint-Bonnet-du-Gard,30235,30,76,243000684,CC,CC du Pont du Gard,Gard,Occitanie +94,616,Les Peuples de la Mer,Association de protection de l'environnement,Association ou fédération,"Chemin Haut de la Mer, Leucate, Narbonne, Aude, Occitanie, France métropolitaine, 11370, France",11370.0,2023/03/29,1,0,0,0,0,0,0,3.04116,42.85681,COMMUNE_0000000009762646,Leucate,11202,11,76,241100593,CA,CA Le Grand Narbonne,Aude,Occitanie +95,614,ALE Clemenceau,École primaire,Établissement scolaire ou d'enseignement supérieur,"8, Rue Bories, Mèze, Montpellier, Hérault, Occitanie, France métropolitaine, 34140, France",34140.0,2023/03/27,1,0,1,0,1,0,0,3.606072,43.427945,COMMUNE_0000000009760632,Mèze,34157,34,76,200066355,CA,CA Sète Agglopôle Méditerranée,Hérault,Occitanie +96,613,SMITOMGA,,Services de l'état et établissements publics,"Passage des Ecoles, Guillestre, Briançon, Hautes-Alpes, Provence-Alpes-Côte d'Azur, France métropolitaine, 05600, France",5600.0,2023/03/23,0,0,0,0,0,0,0,6.650309,44.661507,COMMUNE_0000000009755901,Guillestre,05065,05,93,200067452,CC,CC du Guillestrois et du Queyras,Hautes-Alpes,Provence-Alpes-Côte d'Azur +97,612,Snorclean,Association de protection de l'environnement,Association ou fédération,"Rue Nationale, Domaine Les Coteaux, Trans-en-Provence, Draguignan, Var, Provence-Alpes-Côte d'Azur, Metropolitan France, 83720, France",83720.0,2023/03/23,1,0,0,0,0,0,0,6.493677,43.51479,COMMUNE_0000000009760099,Trans-en-Provence,83141,83,93,248300493,CA,CA Dracénie Provence Verdon Agglomération,Var,Provence-Alpes-Côte d'Azur +98,611,Meze nature,Association de protection de l'environnement,Association ou fédération,"Mèze, Montpellier, Hérault, Occitanie, France métropolitaine, 34140, France",34140.0,2023/03/23,1,0,1,0,1,0,0,3.609954,43.430282,COMMUNE_0000000009760632,Mèze,34157,34,76,200066355,CA,CA Sète Agglopôle Méditerranée,Hérault,Occitanie +99,610,Créons 2 mains,Autre,Association ou fédération,"Impasse Sylvestre, Saint-Just, 13e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13013, France",13013.0,2023/03/22,1,0,0,0,0,0,0,5.397668,43.317417,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +100,609,SMIAGE Maralpin,,Services de l'état et établissements publics,"Centre Administratif départemental - CADAM, La Provençale, Résidence Les Terrasses des Jacarandas, Nice, Alpes-Maritimes, Provence-Alpes-Côte d'Azur, France métropolitaine, 06700, France",6700.0,2023/03/22,1,0,0,0,0,0,0,7.280476,43.71723,COMMUNE_0000000009759142,Nice,06088,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +101,606,Apnée g'Lisses,Association sportive,Association ou fédération,"Rue des Peupliers, Lisses, Arrondissement d'Évry, Essonne, Île-de-France, France métropolitaine, 91090, France",91090.0,2023/03/16,1,0,0,0,0,0,0,2.418183,48.608888,COMMUNE_0000000009738375,Lisses,91340,91,11,200059228,CA,CA Grand Paris Sud Seine Essonne Sénart,Essonne,Île-de-France +102,605,Breizh Diving Autonomie,Association sportive,Association ou fédération,"Kercado, Caudan, Lorient, Morbihan, Bretagne, France métropolitaine, 56850, France",56850.0,2023/03/15,1,0,0,0,0,0,0,-3.369169,47.753183,COMMUNE_0000000009743037,Lorient,56121,56,53,200042174,CA,CA Lorient Agglomération,Morbihan,Bretagne +103,604,CSAM Plongée,Club affilié FFESSM,Association ou fédération,"Chemin de la Batterie Basse, Cap Brun, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83100, France",83100.0,2023/03/14,1,0,0,0,0,0,0,7.183521,43.852277,COMMUNE_0000000009758461,Bonson,06021,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +104,603,Estibal,,Eco-artiste,"203, Quai de Valmy, Quartier de l'Hôpital Saint-Louis, Paris 10e Arrondissement, Paris, Île-de-France, France métropolitaine, 75010, France",75010.0,2023/03/14,1,0,0,0,0,0,0,2.368651,48.88201,COMMUNE_0000000009736048,Paris,75056,75,11,200054781,ME,Métropole du Grand Paris,Paris,Île-de-France +105,602,FRONTIGNAN THAU HANDBALL,Association sportive,Association ou fédération,"Chemin de la Calade, Domaine de Selhac, La Peyrade, Frontignan, Montpellier, Hérault, Occitanie, France métropolitaine, 34110, France",34110.0,2023/03/14,1,0,1,0,1,0,0,3.739809,43.43763,COMMUNE_0000000009760628,Frontignan,34108,34,76,200066355,CA,CA Sète Agglopôle Méditerranée,Hérault,Occitanie +106,600,LINDEPERG,Club affilié FFESSM,Association ou fédération,"Gilly-sur-Isère, Albertville, Savoie, Auvergne-Rhône-Alpes, France métropolitaine, 73200, France",73200.0,2023/03/13,0,0,0,0,0,0,0,6.361325,45.655558,COMMUNE_0000000009752209,Gilly-sur-Isère,73124,73,84,200068997,CA,CA Arlysère,Savoie,Auvergne-Rhône-Alpes +107,599,ScubaDiving Club Marne et Gondoire,Club affilié FFESSM,Association ou fédération,"Chemin du Clos-Roger, Le Clos-Roger, Chelles, Torcy, Seine-et-Marne, Île-de-France, France métropolitaine, 77500, France",77500.0,2023/03/12,1,0,0,0,0,0,0,2.578328,48.887977,COMMUNE_0000000009736011,Chelles,77108,77,11,200057958,CA,CA Paris - Vallée de la Marne,Seine-et-Marne,Île-de-France +108,596,AQUA-EVASION,Club affilié FFESSM,Association ou fédération,"4, Impasse du Quartier Saint-Louis, Laure, Gignac-la-Nerthe, Istres, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13180, France",13180.0,2023/03/10,1,0,0,0,0,0,0,5.234132,43.397074,COMMUNE_0000000009760880,Gignac-la-Nerthe,13043,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +109,595,Entre 2 Eaux - Plongée,Club affilié FFESSM,Association ou fédération,"Ruelle du Port, Talhouant, Île Sainte-Catherine, Locmiquélic, Lorient, Morbihan, Bretagne, France métropolitaine, 56570, France",56570.0,2023/03/10,1,0,0,0,0,0,0,-3.348748,47.723916,COMMUNE_0000000009743038,Locmiquélic,56118,56,53,200042174,CA,CA Lorient Agglomération,Morbihan,Bretagne +110,594,Collège Jean Jaurès,Collège,Établissement scolaire ou d'enseignement supérieur,"Peyrolles-en-Provence, Aix-en-Provence, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13860, France",13860.0,2023/03/04,1,0,0,0,0,0,0,5.545618,43.643258,COMMUNE_0000000009759640,Peyrolles-en-Provence,13074,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +111,593,Commune des Orres,Commune,Collectivité territoriale,"Rue Dessus Vière, Le Pont, Les Orres, Gap, Hautes-Alpes, Provence-Alpes-Côte d'Azur, France métropolitaine, 05200, France",5200.0,2023/03/03,1,0,0,0,0,0,0,6.552072,44.514057,COMMUNE_0000000009756571,Les Orres,05098,05,93,200067742,CC,CC Serre-Ponçon,Hautes-Alpes,Provence-Alpes-Côte d'Azur +112,592,la ferme des anes,Association de protection de l'environnement,Association ou fédération,"13, Route de la Colme, Brouckerque, Dunkerque, Nord, Hauts-de-France, France métropolitaine, 59630, France",59630.0,2023/03/03,1,0,0,0,0,0,0,2.278946,50.968653,COMMUNE_0000000009726989,Brouckerque,59110,59,32,200040954,CC,CC des Hauts de Flandre,Nord,Hauts-de-France +113,587,Citeo,Autre,Association ou fédération,"50, Boulevard Haussmann, Quartier de la Chaussée-d'Antin, Paris 9e Arrondissement, Paris, Île-de-France, France métropolitaine, 75009, France",75009.0,2023/02/23,1,0,0,0,0,0,0,2.332977,48.873945,COMMUNE_0000000009736048,Paris,75056,75,11,200054781,ME,Métropole du Grand Paris,Paris,Île-de-France +114,586,Communauté de communes du Liancourtois - La Vallée Dorée,Communauté de communes,Collectivité territoriale,"Laigneville, Rue de Nogent, Laigneville, Clermont, Oise, Hauts-de-France, France métropolitaine, 60290, France",60290.0,2023/02/20,1,0,1,0,0,3,0,2.451862,49.290106,COMMUNE_0000000009732539,Laigneville,60342,60,32,246000129,CC,CC du Liancourtois,Oise,Hauts-de-France +115,584,CPIE Flandre Maritime,Association Education environnement et Développement durable,Association ou fédération,"Rue Jean Delvallez, Zuydcoote, Dunkerque, Nord, Hauts-de-France, France métropolitaine, 59123, France",59123.0,2023/02/16,1,1,1,2,3,0,0,2.488781,51.064094,COMMUNE_0000000009726955,Zuydcoote,59668,59,32,245900428,CU,CU de Dunkerque,Nord,Hauts-de-France +116,582,Ecole Elémentaire Frédéric Bazille,École primaire,Établissement scolaire ou d'enseignement supérieur,"Agde, Hérault, Occitanie, France métropolitaine, France",34300.0,2023/02/13,1,0,0,0,0,0,0,7.196613,43.77854,COMMUNE_0000000009758927,Carros,06033,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +117,581,Association Chateauloin Chemins Pluriels,Autre,Association ou fédération,"Avenue de Provence, Néoules, Brignoles, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83136, France",83136.0,2023/02/12,0,0,0,0,0,0,0,6.010701,43.309122,COMMUNE_0000000009761145,Néoules,83088,83,93,200068104,CA,CA de la Provence Verte,Var,Provence-Alpes-Côte d'Azur +118,580,Commune d'Oraison,Commune,Collectivité territoriale,"Rue Paul Jean, Oraison, Forcalquier, Alpes-de-Haute-Provence, Provence-Alpes-Côte d'Azur, France métropolitaine, 04700, France",4700.0,2023/02/09,1,0,0,0,0,0,0,5.919268,43.916386,COMMUNE_0000000009758489,Oraison,04143,04,93,200034700,CA,CA Durance-Lubéron-Verdon Agglomération,Alpes-de-Haute-Provence,Provence-Alpes-Côte d'Azur +119,578,Club des baleinières de l'Ile d'Yeu,Association sportive,Association ou fédération,"L'Île-d'Yeu, Les Sables-d'Olonne, Vendée, Pays de la Loire, France métropolitaine, 85350, France",85350.0,2023/02/05,1,0,1,1,0,0,0,-1.755968,46.479919,COMMUNE_0000002200276650,Les Sables-d'Olonne,85194,85,52,200071165,CA,CA Les Sables d'Olonne Agglomération,Vendée,Pays de la Loire +120,576,Vrac'n Roll,,Organisation socioprofessionnelle,"17, Rue de Gerland, Gerland, Lyon 7e Arrondissement, Lyon, Métropole de Lyon, Rhône, Auvergne-Rhône-Alpes, France métropolitaine, 69007, France",69007.0,2023/02/02,0,0,0,0,0,0,0,4.840824,45.740807,COMMUNE_0000000009752008,Lyon,69123,69,84,200046977,METLYON,Métropole de Lyon,Rhône,Auvergne-Rhône-Alpes +121,575,Centre de loisir Mermoz,Association Education environnement et Développement durable,Association ou fédération,"Avenue Jean Mermoz, Rue Jean Mermoz, La Coueste, Aubagne, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13400, France",13400.0,2023/02/01,1,0,0,0,0,0,0,5.578269,43.280302,COMMUNE_0000000009761151,Aubagne,13005,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +122,574,Collectif Orb et Monts Environnement,Association de protection de l'environnement,Association ou fédération,"Faugères, Béziers, Hérault, Occitanie, France métropolitaine, 34600, France",34600.0,2023/01/26,1,1,1,0,1,0,0,3.182079,43.560336,COMMUNE_0000000009760158,Faugères,34096,34,76,200071058,CC,CC Les Avant-Monts,Hérault,Occitanie +123,573,Charlie's Animal Guardians,Association de protection de l'environnement,Association ou fédération,"Rue Eugène Huret, Saint-Étienne-au-Mont, Boulogne-sur-Mer, Pas-de-Calais, Nordfrankreich, Metropolitanes Frankreich, 62360, Frankreich",62360.0,2023/01/21,1,0,0,0,0,0,0,1.637947,50.676074,COMMUNE_0000000009727315,Saint-Étienne-au-Mont,62746,62,32,246200729,CA,CA du Boulonnais,Pas-de-Calais,Hauts-de-France +124,571,SLB GALERIA,,Organisation socioprofessionnelle,"Galéria, Calvi, Haute-Corse, Corse, France métropolitaine, 20245, France",20245.0,2023/01/15,1,0,0,0,0,0,0,8.64418,42.40803,COMMUNE_0000000009763285,Galéria,2B121,2B,94,242020105,CC,CC de Calvi Balagne,Haute-Corse,Corse +125,570,SICTOM Ouest,,Services de l'état et établissements publics,"Le Houga, Route de Mont-de-Marsan, La Jalousie, Le Houga, Condom, Gers, Occitanie, France métropolitaine, 32460, France",32460.0,2023/01/06,1,0,1,1,0,0,0,-0.191726,43.778634,COMMUNE_0000000009759341,Le Houga,32155,32,76,243200409,CC,CC du Bas Armagnac,Gers,Occitanie +126,569,Base Nautique - Aviron Club Cajarcois,Association sportive,Association ou fédération,"Base nautique, Chemin des Mariniers, Jardins de la Ségalière, Les Ayroux, Cajarc, Figeac, Lot, Occitanie, France métropolitaine, 46160, France",46160.0,2023/01/06,1,0,1,1,0,0,0,-0.530841,47.966326,COMMUNE_0000000009742320,Saint-Denis-du-Maine,53212,53,52,245300223,CC,CC du Pays de Meslay-Grez,Mayenne,Pays de la Loire +127,567,E3D la Seyne sur Mer Lycée Beaussier et collèges Curie L'Herminier Wallon,Lycée,Établissement scolaire ou d'enseignement supérieur,"La Seyne-sur-Mer, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83500, France",83500.0,2023/01/05,0,2,1,0,4,0,0,5.978363,43.10691,COMMUNE_0000000009761866,La Garde,83062,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +128,565,Collectif Grek Nature et Patrimoine,Autre,Association ou fédération,"Groix, Lorient, Morbihan, Bretagne, France métropolitaine, 56590, France",56590.0,2023/01/04,1,0,1,0,10,0,0,-3.454603,47.638221,COMMUNE_0000000009743719,Groix,56069,56,53,200042174,CA,CA Lorient Agglomération,Morbihan,Bretagne +129,563,Mairie de Pointis-Inard,Commune,Collectivité territoriale,"Rue Saint-Jean de Pointis, Pointis-Inard, Saint-Gaudens, Haute-Garonne, Occitanie, France métropolitaine, 31800, France",31800.0,2023/01/03,1,0,0,0,0,0,0,0.802085,43.090193,COMMUNE_0000000009762180,Pointis-Inard,31427,31,76,200072643,CC,CC C?ur et Coteaux du Comminges,Haute-Garonne,Occitanie +130,561,Baptiste Leroy,,Eco-artiste,"Rue de Sainte-Anne, La Marne, Ouest, Vannes, Morbihan, Brittany, Metropolitan France, 56000, France",56000.0,2022/12/13,1,1,1,0,0,1,2,-2.777032,47.661218,COMMUNE_0000000009743709,Vannes,56260,56,53,200067932,CA,CA Golfe du Morbihan - Vannes Agglomération,Morbihan,Bretagne +131,560,Commune de Saint-Chamas,Commune,Collectivité territoriale,"Saint-Chamas, Istres, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13250, France",13250.0,2022/12/12,1,0,0,0,0,0,0,5.035989,43.546862,COMMUNE_0000000009760122,Saint-Chamas,13092,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +132,558,Crozon Littoral Environnement,Autre,Association ou fédération,"Crozon, Châteaulin, Finistère, Bretagne, France métropolitaine, 29160, France",29160.0,2022/12/06,1,0,1,0,0,2,0,-4.414771,48.248043,COMMUNE_0000000009739912,Crozon,29042,29,53,200066868,CC,CC Presqu'île de Crozon-Aulne maritime,Finistère,Bretagne +133,557,Gestes Propres,Association Education environnement et Développement durable,Association ou fédération,"28, Boulevard Poissonnière, Quartier du Faubourg-Montmartre, Paris 9e Arrondissement, Paris, Île-de-France, France métropolitaine, France",75009.0,2022/11/30,0,0,0,0,0,0,0,2.348719,48.876922,COMMUNE_0000000009736048,Paris,75056,75,11,200054781,ME,Métropole du Grand Paris,Paris,Île-de-France +134,556,Syndicat Mixte du Bassin de Thau,,Services de l'état et établissements publics,"Quai des Moulins, Sète, Montpellier, Hérault, Occitanie, France métropolitaine, 34200, France",34200.0,2022/11/22,0,0,1,0,0,0,8,3.712803,43.413195,COMMUNE_0000000009760885,Sète,34301,34,76,200066355,CA,CA Sète Agglopôle Méditerranée,Hérault,Occitanie +135,555,Mairie de Fuveau,Commune,Collectivité territoriale,"Boulevard Émile Loubet, Fuveau, Aix-en-Provence, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13710, France",13710.0,2022/11/15,1,0,0,0,0,0,0,5.453317,43.532619,COMMUNE_0000000009760349,Aix-en-Provence,13001,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +136,554,WeOcean,Association de protection de l'environnement,Association ou fédération,"Chemin de Roumagoua, La Fieloula, La Ciotat, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13600, France",13600.0,2022/11/15,1,0,0,0,0,0,0,5.609071,43.185601,COMMUNE_0000000009761644,La Ciotat,13028,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +137,553,"Charles Péguy, Lycée et Enseignement Supérieur",Université / IUT / BTS,Établissement scolaire ou d'enseignement supérieur,"102, Rue Sylvabelle Robert de Vernejoul, Préfecture, 6e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13006, France",13006.0,2022/11/14,1,2,1,0,1,0,0,5.374772,43.288234,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +138,549,La P'Art Belle,Association Education environnement et Développement durable,Association ou fédération,"Vannes, Morbihan, Bretagne, France métropolitaine, 56000, France",56000.0,2022/11/08,0,0,0,0,0,0,0,-2.778191,47.650565,COMMUNE_0000000009743709,Vannes,56260,56,53,200067932,CA,CA Golfe du Morbihan - Vannes Agglomération,Morbihan,Bretagne +139,548,GreenMinded,Association de protection de l'environnement,Association ou fédération,"Le Chalard, Limoges, Haute-Vienne, Nouvelle-Aquitaine, France métropolitaine, 87500, France",87500.0,2022/11/08,1,0,0,0,0,0,0,1.134405,45.551806,COMMUNE_0000000009752905,Le Chalard,87031,87,75,248700189,CC,CC du Pays de Saint-Yrieix,Haute-Vienne,Nouvelle-Aquitaine +140,547,SURFRIDER VAR OUEST,Association de protection de l'environnement,Association ou fédération,"Font de Fillol, Avenue de la Mer, Font de Fillol, Gabois, Reynier, Six-Fours-les-Plages, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83140, France",83140.0,2022/11/06,1,0,1,0,1,1,0,5.827172,43.097979,COMMUNE_0000000009761868,Six-Fours-les-Plages,83129,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +141,546,Les givrés,,Eco-artiste,"Mer de Glace, Chemin de la Bagna, Les Praz de Chamonix, Chamonix-Mont-Blanc, Bonneville, Haute-Savoie, Auvergne-Rhône-Alpes, France métropolitaine, 74400, France",74400.0,2022/11/04,0,0,0,0,0,0,0,6.875322,45.922636,COMMUNE_0000000009750652,Chamonix-Mont-Blanc,74056,74,84,200023372,CC,CC de la Vallée de Chamonix-Mont-Blanc,Haute-Savoie,Auvergne-Rhône-Alpes +142,543,Jardin Soleil,Autre,Association ou fédération,"Pignans, Brignoles, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83790, France",83790.0,2022/11/01,1,0,1,0,1,0,0,6.231641,43.294961,COMMUNE_0000000009761141,Pignans,83092,83,93,248300550,CC,CC C?ur du Var,Var,Provence-Alpes-Côte d'Azur +143,542,AM,Autre,Association ou fédération,"L'Estaque, 16e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13000, France",13000.0,2022/10/19,1,0,1,0,1,0,0,5.313404,43.360846,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +144,541,Maternelle Jean Moulin,École maternelle,Établissement scolaire ou d'enseignement supérieur,"24 bis, Avenue du Maréchal Juin, Castelnaudary, Carcassonne, Aude, Occitanie, France métropolitaine, 11400, France",11400.0,2022/10/18,1,0,0,0,0,0,0,1.943165,43.317199,COMMUNE_0000000009761214,Castelnaudary,11076,11,76,200035855,CC,CC Castelnaudary Lauragais Audois,Aude,Occitanie +145,540,Lycée Périer,Lycée,Établissement scolaire ou d'enseignement supérieur,"270, Rue Paradis, Castellane, 6e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13006, France",13008.0,2022/10/17,1,0,1,0,0,1,0,5.378691,43.281158,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +146,539,Association Marseille Aubagne de Pêche,Association sportive,Association ou fédération,"Rue des Crottes, Saint-Marcel, 11e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13011, France",13011.0,2022/10/17,1,0,0,0,0,0,0,5.465234,43.287522,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +147,538,Commune de Puget-Ville,Commune,Collectivité territoriale,"Mairie de Puget-Ville, 368, Rue de la Libération, Puget-Ville, Brignoles, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83390, France",83390.0,2022/10/15,1,0,0,0,0,0,0,6.135969,43.289154,COMMUNE_0000000009761142,Puget-Ville,83100,83,93,248300550,CC,CC C?ur du Var,Var,Provence-Alpes-Côte d'Azur +148,537,Le Tremplin,,Organisation socioprofessionnelle,"7, Rue Commandant Drogou, Kerbernier, Bellevue, Brest, Finistère, Bretagne, France métropolitaine, 29200, France",29200.0,2022/10/14,1,0,0,0,0,0,0,-4.490012,48.404247,COMMUNE_0000000009738584,Brest,29019,29,53,242900314,ME,Brest Métropole,Finistère,Bretagne +149,536,Commune de Saint Zacharie,Commune,Collectivité territoriale,"Saint-Zacharie, Brignoles, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83640, France",83640.0,2022/10/14,1,0,1,1,0,0,0,5.714767,43.385832,COMMUNE_0000000009760868,Saint-Zacharie,83120,83,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Var,Provence-Alpes-Côte d'Azur +150,535,Antide Boyer,École primaire,Établissement scolaire ou d'enseignement supérieur,"Avenue Antide Boyer, Aubagne, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13400, France",13400.0,2022/10/13,1,0,1,2,0,0,0,5.56536,43.294644,COMMUNE_0000000009761151,Aubagne,13005,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +151,534,Plaisirplonger,Club affilié FFESSM,Association ou fédération,"Marina Baie des Anges, Villeneuve-Loubet, Grasse, Alpes-Maritimes, Provence-Alpes-Côte d'Azur, France métropolitaine, 06270, France",6270.0,2022/10/11,0,0,0,0,0,0,0,7.137412,43.635457,COMMUNE_0000000009759378,Villeneuve-Loubet,06161,06,93,240600585,CA,CA de Sophia Antipolis,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +152,533,Collège Gyptis,Collège,Établissement scolaire ou d'enseignement supérieur,"Place Didier Garnier, Le Cabot, 9e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13009, France",13009.0,2022/10/10,1,0,1,2,0,0,0,5.41749,43.258445,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +153,532,ADEME,,Services de l'état et établissements publics,"155bis, Avenue Pierre Brossolette, Quartier Plein Sud, Montrouge, Antony, Hauts-de-Seine, Île-de-France, France métropolitaine, 92120, France",92120.0,2022/10/07,0,0,0,0,0,0,0,2.305116,48.815679,COMMUNE_0000000009736537,Montrouge,92049,92,11,200054781,ME,Métropole du Grand Paris,Hauts-de-Seine,Île-de-France +154,531,Collège Grande Bastide,Collège,Établissement scolaire ou d'enseignement supérieur,"Chemin Joseph Aiguier, Mazargues, 9e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13009, France",13009.0,2022/10/04,1,0,0,0,0,0,0,5.403256,43.250611,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +155,529,Team Oxygen,Association sportive,Association ou fédération,"Saint-Mitre-les-Remparts, Istres, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13920, France",13920.0,2022/09/30,1,0,1,15,0,0,0,5.000956,43.466275,COMMUNE_0000000009760623,Saint-Mitre-les-Remparts,13098,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +156,527,Collège Louise Michel,Collège,Établissement scolaire ou d'enseignement supérieur,"Rue Alfred Curtel, La Capelette, 10e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13010, France",13010.0,2022/09/29,0,0,1,1,0,0,0,5.407345,43.280818,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +157,525,Ecole Jean Rostand,École primaire,Établissement scolaire ou d'enseignement supérieur,"Impasse des Écoles, Quartier Raton, Auriol, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13390, France",13390.0,2022/09/29,1,0,1,1,0,0,0,5.640613,43.372641,COMMUNE_0000000009760871,Auriol,13007,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +158,524,Communauté de communes Conflent Canigó,Communauté de communes,Collectivité territoriale,"Route de Ria, La Riberette, Prades, Pyrénées-Orientales, Occitanie, France métropolitaine, 66500, France",66500.0,2022/09/28,1,0,1,0,1,0,0,2.415482,42.612635,COMMUNE_0000000009763314,Prades,66149,66,76,200049211,CC,CC Conflent-Canigó,Pyrénées-Orientales,Occitanie +159,523,Commune de Venelles,Commune,Collectivité territoriale,"Place Marius Trucy, Venelles, Aix-en-Provence, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13770, France",13770.0,2022/09/26,1,0,0,0,0,0,0,5.41803,43.52202,COMMUNE_0000000009760349,Aix-en-Provence,13001,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +160,522,Commune de la Destrousse,Commune,Collectivité territoriale,"La Destrousse, Avenue du Mistral, La Destrousse, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13112, France",13112.0,2022/09/26,1,0,0,0,0,0,0,5.605043,43.376085,COMMUNE_0000000009760870,La Destrousse,13031,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +161,520,QNSCNT - annexe Cahors,Autre,Association ou fédération,"Cahors, Lot, Occitanie, France métropolitaine, 46000, France",46000.0,2022/09/23,1,0,1,2,1,0,0,1.435524,44.447159,COMMUNE_0000000009757321,Cahors,46042,46,76,200023737,CA,CA du Grand Cahors,Lot,Occitanie +162,519,la bellenergie,,Organisation socioprofessionnelle,"Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, France",83800.0,2022/09/20,1,0,1,1,0,0,0,7.196051,43.667725,COMMUNE_0000000009759143,Saint-Laurent-du-Var,06123,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +163,517,FOREST CLEANING,Association de protection de l'environnement,Association ou fédération,"6, Résidence du Bel Ébat, La Celle-Saint-Cloud, Versailles, Yvelines, Île-de-France, France métropolitaine, 78170, France",78170.0,2022/09/12,1,0,1,1,0,0,0,2.132363,48.836029,COMMUNE_0000000009736561,La Celle-Saint-Cloud,78126,78,11,247800584,CA,CA Versailles Grand Parc (CAVGP),Yvelines,Île-de-France +164,516,France Nature Environnement 82,Association de protection de l'environnement,Association ou fédération,"Ça Monte En Bas, 1, Rue des Oules, Jardins familaux de Ça Monte En Bas, Pouty, Montauban, Tarn-et-Garonne, Occitanie, France métropolitaine, 82000, France",82000.0,2022/08/31,1,0,1,0,2,0,0,1.325875,44.009664,COMMUNE_0000000009758402,Montauban,82121,82,76,248200099,CA,CA Grand Montauban,Tarn-et-Garonne,Occitanie +165,514,Hopika,Association Education environnement et Développement durable,Association ou fédération,"Rue François Curt, Cluses, Bonneville, Haute-Savoie, Auvergne-Rhône-Alpes, France métropolitaine, 74300, France",74300.0,2022/08/29,1,0,0,0,0,0,0,6.580992,46.059182,COMMUNE_0000000009750416,Cluses,74081,74,84,200033116,CC,CC Cluses-Arve et Montagnes,Haute-Savoie,Auvergne-Rhône-Alpes +166,512,IBKM,Association Education environnement et Développement durable,Association ou fédération,"17, Rue Henry Monnier, Quartier Saint-Georges, Paris 9e Arrondissement, Paris, Île-de-France, France métropolitaine, 75009, France",75009.0,2022/08/19,1,0,1,0,2,0,0,2.33727,48.880077,COMMUNE_0000000009736048,Paris,75056,75,11,200054781,ME,Métropole du Grand Paris,Paris,Île-de-France +167,511,CORSICA CLEAN NATURE,Association de protection de l'environnement,Association ou fédération,"Aspretto, Ajaccio, Corse-du-Sud, Corse, France métropolitaine, 20090, France",20090.0,2022/08/17,1,0,0,0,0,0,0,8.760047,41.926793,COMMUNE_0000000009763542,Ajaccio,2A004,2A,94,242010056,CA,CA du Pays Ajaccien,Corse-du-Sud,Corse +168,509,Centre Hospitalier Universitaire Grenoble Alpes,,Services de l'état et établissements publics,"Boulevard de la Chantourne, La Tronche, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38700, France",38700.0,2022/08/08,1,0,1,1,0,0,0,5.750745,45.196799,COMMUNE_0000000009753962,La Tronche,38516,38,84,200040715,ME,Grenoble-Alpes-Métropole,Isère,Auvergne-Rhône-Alpes +169,508,Les Angles,Commune,Collectivité territoriale,"Avenue de Mont-Louis, Les Angles, Prades, Pyrénées-Orientales, Occitanie, France métropolitaine, 66210, France",66210.0,2022/08/03,1,0,1,0,1,0,0,2.072497,42.567635,COMMUNE_0000000009763329,Les Angles,66004,66,76,246600464,CC,CC Pyrénées catalanes,Pyrénées-Orientales,Occitanie +170,506,Ax 3 Domaines,,Organisation socioprofessionnelle,"Boulevard de la Griole Haut, Ax-les-Thermes, Foix, Ariège, Occitanie, France métropolitaine, 09110, France",9110.0,2022/07/25,1,0,1,0,1,0,0,1.812857,42.700427,COMMUNE_0000000009763169,Ax-les-Thermes,09032,09,76,200066363,CC,CC de la Haute Ariège,Ariège,Occitanie +171,505,Syndicat Mixte du Bassin de l'Isle,Autre,Association ou fédération,"Route des Grands Champs, Saint-Laurent-des-Hommes, Périgueux, Dordogne, Nouvelle-Aquitaine, France métropolitaine, 24400, France",24400.0,2022/07/25,1,0,1,0,0,1,0,0.264456,45.020036,COMMUNE_0000000009754976,Saint-Laurent-des-Hommes,24436,24,75,200069094,CC,CC Isle et Crempse en Périgord,Dordogne,Nouvelle-Aquitaine +172,504,EPSA LA PIERRE SAINT MARTIN,,Services de l'état et établissements publics,"Arette, Oloron-Sainte-Marie, Pyrénées-Atlantiques, Nouvelle-Aquitaine, France métropolitaine, 64570, France",64570.0,2022/07/13,1,0,1,0,1,0,0,-0.71938,43.088986,COMMUNE_0000000009762480,Arette,64040,64,75,200067262,CC,CC du Haut Béarn,Pyrénées-Atlantiques,Nouvelle-Aquitaine +173,503,EPSA GOURETTE,Autre,Association ou fédération,"Gourette, Eaux-Bonnes, Oloron-Sainte-Marie, Pyrénées-Atlantiques, Nouvelle-Aquitaine, France métropolitaine, 64440, France",64440.0,2022/07/10,1,0,0,0,0,0,0,-0.369814,42.968491,COMMUNE_0000000009762634,Eaux-Bonnes,64204,64,75,246400337,CC,CC de la Vallée d'Ossau,Pyrénées-Atlantiques,Nouvelle-Aquitaine +174,501,EPAGA,,Services de l'état et établissements publics,"Penmez, Châteaulin, Finistère, Bretagne, France métropolitaine, 29150, France",29150.0,2022/07/04,0,0,0,0,0,0,0,-4.092217,48.206316,COMMUNE_0000000009740313,Châteaulin,29026,29,53,200067247,CC,CC Pleyben-Châteaulin-Porzay,Finistère,Bretagne +175,500,Société de Remontées Mécaniques de la station de Val Thorens,,Organisation socioprofessionnelle,"Val-Thorens, Grande Rue, Balcon, Saint-Martin-de-Belleville, Val Thorens, Albertville, Savoie, Auvergne-Rhône-Alpes, France métropolitaine, 73440, France",73440.0,2022/07/04,1,0,1,0,2,0,0,6.559921,45.30194,COMMUNE_0000000009753467,Les Belleville,73257,73,84,200023299,CC,CC C?ur de Tarentaise,Savoie,Auvergne-Rhône-Alpes +176,499,Communauté de communes de la Haute Saintonge,Communauté de communes,Collectivité territoriale,"Rue Taillefer, La Perauderie, Bourg Nouveau, Jonzac, Charente-Maritime, Nouvelle-Aquitaine, France métropolitaine, 17500, France",17500.0,2022/07/04,1,0,0,0,0,0,0,-0.426245,45.447031,COMMUNE_0000000009753451,Jonzac,17197,17,75,200041523,CC,CC de la Haute Saintonge,Charente-Maritime,Nouvelle-Aquitaine +177,498,CPIE Pays de Morlaix,Association Education environnement et Développement durable,Association ou fédération,"Lanmeur, Morlaix, Finistère, Bretagne, France métropolitaine, 29620, France",29620.0,2022/06/29,1,0,0,0,0,0,0,-3.717188,48.645337,COMMUNE_0000000009736754,Lanmeur,29113,29,53,242900835,CA,CA Morlaix Communauté,Finistère,Bretagne +178,497,Les Têtes de l'Art / Banlastic Egypt,Autre,Association ou fédération,"29, Rue Toussaint, Saint-Mauront, 3e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13003, France",13003.0,2022/06/29,0,0,1,0,1,0,0,5.383035,43.313964,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +179,496,"Maison Pour Tous/Centre social Fissiaux, Association IFAC",Autre,Association ou fédération,"Marseille-Blancarde, Avenue du Maréchal Foch, La Blancarde, 4e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13004, France",13004.0,2022/06/29,1,1,0,0,0,0,0,5.401283,43.299315,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +180,495,Mairie Le Teil,Commune,Collectivité territoriale,"Rue de l'Hôtel de Ville, Le Teil, Privas, Ardèche, Auvergne-Rhône-Alpes, France métropolitaine, 07400, France",7400.0,2022/06/29,0,0,0,0,0,0,0,4.68353,44.550444,COMMUNE_0000000009756629,Le Teil,07319,07,84,200071405,CC,CC Ardèche Rhône Coiron,Ardèche,Auvergne-Rhône-Alpes +181,494,Mairie de La Seyne-sur-Mer,Commune,Collectivité territoriale,"Quai Saturnin Fabre, Centre Ville, Cavaillon, La Seyne-sur-Mer, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83500, France",83500.0,2022/06/27,1,0,1,4,0,0,0,5.881545,43.101511,COMMUNE_0000000009761869,La Seyne-sur-Mer,83126,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +182,493,Communauté de Communes Lacs et Gorges Du Verdon,Communauté de communes,Collectivité territoriale,"Place Martin Bidouré, Centre ville, Aups, Brignoles, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83630, France",83630.0,2022/06/22,0,0,0,0,0,0,0,6.223643,43.626847,COMMUNE_0000000009759634,Aups,83007,83,93,200040210,CC,CC Lacs et Gorges du Verdon,Var,Provence-Alpes-Côte d'Azur +183,492,Espace Pédagogie Formation France -EPFF,Autre,Association ou fédération,"21, Rue Roux de Brignoles, Palais de Justice, 6e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13006, France",13006.0,2022/06/21,0,0,0,0,0,0,0,5.373862,43.289547,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +184,491,Institut Universitaire Européen de la Mer (IUEM),Université / IUT / BTS,Établissement scolaire ou d'enseignement supérieur,"Rue Dumont d'Urville, Technopôle Pointe du Diable, Technopôle Brest Iroise, Plouzané, Brest, Finistère, Bretagne, France métropolitaine, 29280, France",29280.0,2022/06/21,0,0,1,0,0,4,12,-4.563231,48.35798,COMMUNE_0000000009738585,Plouzané,29212,29,53,242900314,ME,Brest Métropole,Finistère,Bretagne +185,490,Office de Tourisme de Belledonne Chartreuse,Autre,Association ou fédération,"Prapoutel, Les Adrets, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38190, France",38190.0,2022/06/20,0,0,0,0,0,0,0,5.992302,45.257513,COMMUNE_0000000009753951,Les Adrets,38002,38,84,200018166,CC,CC Le Grésivaudan,Isère,Auvergne-Rhône-Alpes +186,489,Club des Entreprises des Vallées des Paillons - EVP,,Organisation socioprofessionnelle,"Route Départementale 15, Les Pastres, Contes, Nice, Alpes-Maritimes, Provence-Alpes-Côte d'Azur, France métropolitaine, 06390, France",6390.0,2022/06/16,1,0,1,0,1,0,0,7.187475,43.697062,COMMUNE_0000000009759142,Nice,06088,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +187,486,SERVA ARTIGNOSC-SUR-VERDON,Association de protection de l'environnement,Association ou fédération,"Mairie, Chemin des Amandiers, Les Adrets, Artignosc-sur-Verdon, Brignoles, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83630, France",83630.0,2022/06/15,1,2,1,0,1,6,0,6.092412,43.705036,COMMUNE_0000000009759395,Artignosc-sur-Verdon,83005,83,93,200040210,CC,CC Lacs et Gorges du Verdon,Var,Provence-Alpes-Côte d'Azur +188,484,SEA Plastics,Association de protection de l'environnement,Association ou fédération,"Place de l'Agronomie, La Troche, Lozère, Palaiseau, Essonne, Île-de-France, France métropolitaine, 91120, France",91120.0,2022/06/10,0,0,0,0,0,0,0,2.211611,48.706485,COMMUNE_0000000009737489,Palaiseau,91477,91,11,200056232,CA,CA Communauté Paris-Saclay,Essonne,Île-de-France +189,482,ATSCAF Plongée 13,Club affilié FFESSM,Association ou fédération,"Pointe Rouge Port, Avenue d'Odessa, La Pointe-Rouge, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13008, France",13008.0,2022/06/03,1,0,0,0,0,0,0,5.368535,43.242724,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +190,481,Ville de Sanary-sur-Mer,Commune,Collectivité territoriale,"Place de la République, Sanary-sur-Mer, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83110, France",83110.0,2022/06/01,1,0,0,0,0,0,0,5.80209,43.117894,COMMUNE_0000000009761871,Sanary-sur-Mer,83123,83,93,248300394,CA,CA Sud Sainte Baume,Var,Provence-Alpes-Côte d'Azur +191,479,Syndicat National des Moniteurs de Ski,Autre,Association ou fédération,"Allée des Mitaillères, Meylan, Grenoble, Isère, Auvergne-Rhône-Alpes, Metropolitan France, 38240, France",38240.0,2022/05/25,1,0,1,0,4,0,0,5.767526,45.205418,COMMUNE_0000000009753959,Meylan,38229,38,84,200040715,ME,Grenoble-Alpes-Métropole,Isère,Auvergne-Rhône-Alpes +192,478,Grand Site Salagou - Cirque de Mourèze,,Services de l'état et établissements publics,"Cours de la Chicane, Clermont-l'Hérault, Lodève, Hérault, Occitanie, France métropolitaine, 34800, France",34800.0,2022/05/24,1,0,0,0,0,0,0,3.437265,43.62607,COMMUNE_0000000009759915,Clermont-l'Hérault,34079,34,76,243400355,CC,CC du Clermontais,Hérault,Occitanie +193,477,Méd In Nice,,Organisation socioprofessionnelle,"Boulevard des Jardiniers, Saint-Isidore, Nice, Alpes-Maritimes, Provence-Alpes-Côte d'Azur, France métropolitaine, 06284, France",6284.0,2022/05/24,1,0,0,0,0,0,0,7.193782,43.709277,COMMUNE_0000000009759142,Nice,06088,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +194,475,Association Vie Val d?Is,Autre,Association ou fédération,"Maison de Val, Route de la Face, La Balme, Val-d'Isère, Albertville, Savoy, Auvergne-Rhône-Alpes, Metropolitan France, 73150, France",73150.0,2022/05/19,1,0,0,0,0,0,0,6.972949,45.451873,COMMUNE_0000000009752963,Val-d'Isère,73304,73,84,247300254,CC,CC de Haute-Tarentaise,Savoie,Auvergne-Rhône-Alpes +195,474,Collège Emilie de Mirabeau,Collège,Établissement scolaire ou d'enseignement supérieur,"Collège Emilie de Mirabeau, Avenue des Combattants en Afrique du Nord, Marignane, Istres, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13700, France",13700.0,2022/05/19,1,0,0,0,0,0,0,1.762897,48.464693,COMMUNE_0000000009739323,Auneau-Bleury-Saint-Symphorien,28015,28,24,200069953,CC,CC des Portes Euréliennes d'Île-de-France,Eure-et-Loir,Centre-Val de Loire +196,472,Mairie de Lans en Vercors,Commune,Collectivité territoriale,"Mairie, Place de la Mairie, Petit Geymond, Lans-en-Vercors, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38250, France",38250.0,2022/05/17,1,0,1,0,0,1,0,5.588882,45.128008,COMMUNE_0000000009754404,Lans-en-Vercors,38205,38,84,243801024,CC,CC du Massif du Vercors,Isère,Auvergne-Rhône-Alpes +197,471,Centre de Formation Professionnelle et de Promotion Agricoles (CFPPA),Université / IUT / BTS,Établissement scolaire ou d'enseignement supérieur,"89, Traverse Parangon, 8e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13008, France",13008.0,2022/05/16,1,0,0,0,0,0,0,5.379556,43.2427,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +198,470,Notre Dame de France,Collège,Établissement scolaire ou d'enseignement supérieur,"Rue Théophile Decanis, Vauban, 6e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13006, France",13006.0,2022/05/16,1,1,1,0,1,1,0,5.376342,43.28375,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +199,469,Ocean academy,Association Education environnement et Développement durable,Association ou fédération,"Chemin du Bois Malatras, Chatte, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38160, France",38160.0,2022/05/12,1,0,1,1,0,0,0,5.292599,45.146374,COMMUNE_0000000009754415,Chatte,38095,38,84,200070431,CC,CC Saint-Marcellin Vercors Isère Communauté,Isère,Auvergne-Rhône-Alpes +200,468,Chateauneuf Les Martigues,Commune,Collectivité territoriale,"Place Bellot, Châteauneuf-les-Martigues, Istres, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13220, France",13220.0,2022/05/10,1,0,0,0,0,0,0,5.164041,43.382968,COMMUNE_0000000009760883,Châteauneuf-les-Martigues,13026,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +201,467,Association Gaïa,Association Education environnement et Développement durable,Association ou fédération,"imredd, Avenue Simone Veil, Caucade, Nice, Alpes-Maritimes, Provence-Alpes-Côte d'Azur, France métropolitaine, 06200, France",6200.0,2022/05/06,1,0,1,1,1,0,0,7.187475,43.697062,COMMUNE_0000000009759142,Nice,06088,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +202,466,Mes Rivages Propres/Estérel blue Water,Association de protection de l'environnement,Association ou fédération,"Fréjus, Draguignan, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, France",83600.0,2022/05/05,1,0,1,1,0,0,0,6.383546,43.492415,COMMUNE_0000000009760334,Lorgues,83072,83,93,248300493,CA,CA Dracénie Provence Verdon Agglomération,Var,Provence-Alpes-Côte d'Azur +203,464,Saint François Longchamp Tourisme,,Organisation socioprofessionnelle,"Maison du Tourisme, Route du Col de la Madeleine, Longchamp, Saint-François-Longchamp, Saint-Jean-de-Maurienne, Savoie, Auvergne-Rhône-Alpes, France métropolitaine, 73130, France",73130.0,2022/04/28,1,0,1,0,1,0,0,6.343796,45.405484,COMMUNE_0000000009753242,Saint François Longchamp,73235,73,84,247300361,CC,CC du Canton de La Chambre,Savoie,Auvergne-Rhône-Alpes +204,463,Cipec International School,École primaire,Établissement scolaire ou d'enseignement supérieur,"Route de Bouc-Bel-Air, Luynes, Aix-en-Provence, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, Metropolitan France, 13080, France",13080.0,2022/04/27,0,0,0,0,0,0,0,5.422322,43.478802,COMMUNE_0000000009760349,Aix-en-Provence,13001,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +205,461,Mayesha espoir diversité,Association Education environnement et Développement durable,Association ou fédération,"Rue de l'Hôtel de Ville, Le Teil, Privas, Ardèche, Auvergne-Rhône-Alpes, France métropolitaine, 07400, France",7400.0,2022/04/25,0,0,0,0,0,0,0,4.68353,44.550444,COMMUNE_0000000009756629,Le Teil,07319,07,84,200071405,CC,CC Ardèche Rhône Coiron,Ardèche,Auvergne-Rhône-Alpes +206,460,Graines de pensées - Ecole bilingue Montessori,École primaire,Établissement scolaire ou d'enseignement supérieur,"Grasse, Alpes-Maritimes, Provence-Alpes-Côte d'Azur, France métropolitaine, France",6130.0,2022/04/21,1,0,0,0,0,0,0,7.177338,43.660783,COMMUNE_0000000009759377,Cagnes-sur-Mer,06027,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +207,459,Station de Chamrousse,,Services de l'état et établissements publics,"Place des Trolles, Roche Béranger, Chamrousse, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38410, France",38410.0,2022/04/20,1,2,1,1,3,0,0,5.874883,45.111403,COMMUNE_0000000009754387,Chamrousse,38567,38,84,200018166,CC,CC Le Grésivaudan,Isère,Auvergne-Rhône-Alpes +208,456,Commune de Saint-Estève Janson,Commune,Collectivité territoriale,"86, Boulevard des Écoles, Saint-Estève-Janson, Aix-en-Provence, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13610, France",13610.0,2022/04/14,1,0,0,0,0,0,0,5.465362,43.525817,COMMUNE_0000000009760349,Aix-en-Provence,13001,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +209,455,Mairie Les Gets,Commune,Collectivité territoriale,"Mairie des Gets, 61, Route du Front de Neige, Le Rocher, Les Gets, Bonneville, Haute-Savoie, Auvergne-Rhône-Alpes, France métropolitaine, 74260, France",74260.0,2022/04/13,1,0,0,0,0,0,0,6.669899,46.158827,COMMUNE_0000000009749924,Les Gets,74134,74,84,247400682,CC,CC du Haut-Chablais,Haute-Savoie,Auvergne-Rhône-Alpes +210,454,Office de Tourisme d'Arêches-Beaufort,,Services de l'état et établissements publics,"Route du Grand Mont, Arêches, Beaufort, Albertville, Savoie, Auvergne-Rhône-Alpes, France métropolitaine, 73270, France",73270.0,2022/04/13,1,0,1,0,1,0,0,6.551231,45.726082,COMMUNE_0000000009751938,Beaufort,73034,73,84,200068997,CA,CA Arlysère,Savoie,Auvergne-Rhône-Alpes +211,453,Station Saint-Gervais Mont-Blanc,Commune,Collectivité territoriale,"Saint-Gervais-les-Bains, Bonneville, Haute-Savoie, Auvergne-Rhône-Alpes, France métropolitaine, 74170, France",74170.0,2022/04/08,1,0,0,0,0,0,0,6.696649,45.89806,COMMUNE_0000000009751151,Saint-Gervais-les-Bains,74236,74,84,200034882,CC,CC Pays du Mont-Blanc,Haute-Savoie,Auvergne-Rhône-Alpes +212,452,Odyssée Méditerranée,Association de protection de l'environnement,Association ou fédération,"Nice, Alpes-Maritimes, Provence-Alpes-Côte d'Azur, France métropolitaine, 06000, France",6000.0,2022/04/08,1,0,0,0,0,0,0,7.189165,43.696307,COMMUNE_0000000009759142,Nice,06088,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +213,451,Office de tourisme du corbier,Autre,Association ou fédération,"Le Corbier, Villarembert, Saint-Jean-de-Maurienne, Savoie, Auvergne-Rhône-Alpes, France métropolitaine, 73300, France",73300.0,2022/04/07,1,0,0,0,0,0,0,6.343395,45.269661,COMMUNE_0000000009753717,Saint-Jean-de-Maurienne,73248,73,84,200070464,CC,CC C?ur de Maurienne Arvan,Savoie,Auvergne-Rhône-Alpes +214,449,Région Occitanie,Région,Collectivité territoriale,"Conseil régional d'Occitanie, 22, Boulevard du Maréchal Juin, Saint-Michel, Le Busca, Empalot, Saint-Agne, Toulouse Sud-Est, Toulouse, Haute-Garonne, Occitanie, France métropolitaine, 31400, France",31400.0,2022/04/06,1,0,0,0,0,0,0,1.440857,43.5893,COMMUNE_0000000009760215,Toulouse,31555,31,76,243100518,ME,Toulouse Métropole,Haute-Garonne,Occitanie +215,447,Station Aillons-Margériaz,,Services de l'état et établissements publics,"Aillon-le-Jeune, Chambéry, Savoy, Auvergne-Rhône-Alpes, Metropolitan France, 73340, France",73340.0,2022/04/05,1,0,0,0,0,0,0,6.107441,45.615313,COMMUNE_0000000009752467,Aillon-le-Jeune,73004,73,84,200069110,CA,CA du Grand Chambéry,Savoie,Auvergne-Rhône-Alpes +216,446,Imarinair,Association de protection de l'environnement,Association ou fédération,"Tourrette-Levens, Nice, Alpes-Maritimes, Provence-Alpes-Côte d'Azur, France métropolitaine, 06690, France",6690.0,2022/04/05,1,0,1,2,3,0,0,7.272858,43.792059,COMMUNE_0000000009758920,Tourrette-Levens,06147,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +217,445,Mairie de Combloux,,Services de l'état et établissements publics,"Route de la Mairie, Plommaz, Combloux, Bonneville, Upper Savoy, Auvergne-Rhône-Alpes, Metropolitan France, 74920, France",74920.0,2022/04/01,1,0,0,0,0,0,0,6.641225,45.896674,COMMUNE_0000000009750893,Combloux,74083,74,84,200034882,CC,CC Pays du Mont-Blanc,Haute-Savoie,Auvergne-Rhône-Alpes +218,444,SEMILOM - ORCIERES,Commune,Collectivité territoriale,"Rue des Écrins, Orcières-Merlette, Orcières, Gap, Hautes-Alpes, Provence-Alpes-Côte d'Azur, France métropolitaine, 05170, France",5170.0,2022/03/31,1,0,1,0,0,0,1,6.324129,44.696929,COMMUNE_0000000009755903,Orcières,05096,05,93,200068096,CC,CC Champsaur-Valgaudemar,Hautes-Alpes,Provence-Alpes-Côte d'Azur +219,443,"Syndicat Mixte de la Reppe, du Grand Vallat et de ses affluents (SMRGV)",,Services de l'état et établissements publics,"Sanary-sur-Mer, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83110, France",83110.0,2022/03/30,0,0,0,0,0,0,0,5.793395,43.131033,COMMUNE_0000000009761871,Sanary-sur-Mer,83123,83,93,248300394,CA,CA Sud Sainte Baume,Var,Provence-Alpes-Côte d'Azur +220,442,La Renverse,Association de protection de l'environnement,Association ou fédération,"Arradon, Vannes, Morbihan, Bretagne, France métropolitaine, 56610, France",56610.0,2022/03/29,1,1,1,0,6,1,0,-2.808836,47.624022,COMMUNE_0000000009743711,Arradon,56003,56,53,200067932,CA,CA Golfe du Morbihan - Vannes Agglomération,Morbihan,Bretagne +221,441,Agirrr,Association Education environnement et Développement durable,Association ou fédération,"11, Avenue de Val en Sol, Cagnes-sur-Mer, Grasse, Alpes-Maritimes, Provence-Alpes-Côte d'Azur, France métropolitaine, 06800, France",6800.0,2022/03/29,1,0,0,0,0,0,0,7.145324,43.657049,COMMUNE_0000000009759377,Cagnes-sur-Mer,06027,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +222,440,Mairie de Megève,,Services de l'état et établissements publics,"Route du Jaillet, Megève, Bonneville, Upper Savoy, Auvergne-Rhône-Alpes, Metropolitan France, 74120, France",74120.0,2022/03/28,1,0,0,0,0,0,0,6.608636,45.861887,COMMUNE_0000000009751152,Megève,74173,74,84,200034882,CC,CC Pays du Mont-Blanc,Haute-Savoie,Auvergne-Rhône-Alpes +223,439,Collège Malraux,Collège,Établissement scolaire ou d'enseignement supérieur,"Avenue Gaspard Monge, La Farlède, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83210, France",83210.0,2022/03/28,1,0,1,0,1,0,0,6.040822,43.161128,COMMUNE_0000000009761638,La Farlède,83054,83,93,248300410,CC,CC de la Vallée du Gapeau,Var,Provence-Alpes-Côte d'Azur +224,437,Espace Grain de Sel - Noirmoutier en l'île,Commune,Collectivité territoriale,"Rue de la Poste, Le Bourg, Noirmoutier-en-l'Île, Les Sables-d'Olonne, Vendée, Pays de la Loire, France métropolitaine, 85330, France",85330.0,2022/03/25,1,0,1,0,2,0,0,-1.755561,46.479038,COMMUNE_0000002200276650,Les Sables-d'Olonne,85194,85,52,200071165,CA,CA Les Sables d'Olonne Agglomération,Vendée,Pays de la Loire +225,436,Collège La Vallée Verte,Collège,Établissement scolaire ou d'enseignement supérieur,"Avenue de la Condamine, Lotissement La Vallée Verte, Vauvert, Nîmes, Gard, Occitanie, France métropolitaine, 30600, France",30600.0,2022/03/22,1,0,1,0,1,0,0,4.272117,43.686818,COMMUNE_0000000009759889,Vauvert,30341,30,76,243000593,CC,CC de Petite Camargue,Gard,Occitanie +226,435,Institut Stanislas,Collège,Établissement scolaire ou d'enseignement supérieur,"Boulevard Pierre Delli Zotti, La Bello Visto, Saint-Raphaël, Draguignan, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83701, France",83701.0,2022/03/21,1,0,0,0,0,0,0,6.807601,43.43476,COMMUNE_0000000009760096,Saint-Raphaël,83118,83,93,200035319,CA,CA Estérel Côte d'Azur Agglomération,Var,Provence-Alpes-Côte d'Azur +227,434,Collège Lou Garlaban,Collège,Établissement scolaire ou d'enseignement supérieur,"Collège Lou Garlaban, Traverse Blanc, La Pérussone, Camp Major, Aubagne, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13400, France",13400.0,2022/03/19,1,0,0,0,0,0,0,5.541011,43.282392,COMMUNE_0000000009761151,Aubagne,13005,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +228,433,Saint Pierre de Chartreuse,Commune,Collectivité territoriale,"Saint-Pierre-de-Chartreuse, Grenoble, Isère, Auvergne-Rhône-Alpes, Metropolitan France, 38380, France",38380.0,2022/03/18,1,0,1,1,0,0,0,5.82806,45.351865,COMMUNE_0000000009753485,Saint-Pierre-de-Chartreuse,38442,38,84,200040111,CC,CC C?ur de Chartreuse,Isère,Auvergne-Rhône-Alpes +229,430,Ecole élémentaire Lodi,École primaire,Établissement scolaire ou d'enseignement supérieur,"127, Rue de Lodi, Lodi, 6e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13006, France",13006.0,2022/03/15,1,0,0,0,0,0,0,5.389942,43.286867,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +230,428,Lycée Artaud,Lycée,Établissement scolaire ou d'enseignement supérieur,"Lycée Artaud, Chemin de Notre-Dame-de-Consolation, Saint-Mitre, 13e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13013, France",13013.0,2022/03/14,1,1,1,0,1,0,0,5.427971,43.340098,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +231,427,EEDF Marseille Huveaune,Autre,Association ou fédération,"82, Rue Sénac de Meilhan, La Plaine, 1er Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13001, France",13001.0,2022/03/14,1,0,1,2,0,0,0,5.384655,43.295636,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +232,426,La Vigie,Association de protection de l'environnement,Association ou fédération,"Rue de Kerhino, Kerhino, La Trinité-sur-Mer, Lorient, Morbihan, Bretagne, France métropolitaine, 56470, France",56470.0,2022/03/12,1,0,0,0,0,0,0,-3.028068,47.580473,COMMUNE_0000000009744048,La Trinité-sur-Mer,56258,56,53,200043123,CC,CC Auray Quiberon Terre Atlantique,Morbihan,Bretagne +233,425,Ville de Saint Mandrier sur mer,Commune,Collectivité territoriale,"Place des Résistants, Pin-Rolland, Saint-Mandrier-sur-Mer, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83430, France",83430.0,2022/03/11,1,0,1,1,2,1,0,5.929092,43.077496,COMMUNE_0000000009762080,Saint-Mandrier-sur-Mer,83153,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +234,418,Université de Toulon - Master sciences de la mer,Université / IUT / BTS,Établissement scolaire ou d'enseignement supérieur,"Avenue de l'Université, La Valette-du-Var, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83160, France",83160.0,2022/03/03,1,0,0,0,0,0,0,6.006115,43.136872,COMMUNE_0000000009761636,La Valette-du-Var,83144,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +235,416,Collège André Chenier,Collège,Établissement scolaire ou d'enseignement supérieur,"Rue de l'Aiguillette, Saint-Barnabé, 12e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13012, France",13012.0,2022/03/02,1,0,1,1,0,0,0,5.411981,43.304194,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +236,415,Lycée Professionnel La Calade,Lycée,Établissement scolaire ou d'enseignement supérieur,"Chemin de la Madrague Ville, La Calade, 15e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13015, France",13015.0,2022/03/02,1,0,1,1,0,0,0,5.358877,43.332747,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +237,414,Lycée Professionnel Gustave Eiffel,Lycée,Établissement scolaire ou d'enseignement supérieur,"Avenue Manouchian, Camp Major, Aubagne, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13400, France",13400.0,2022/03/01,1,0,1,1,0,0,0,5.548493,43.28588,COMMUNE_0000000009761151,Aubagne,13005,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +238,412,Ecole des Hameaux,École primaire,Établissement scolaire ou d'enseignement supérieur,"La Bouilladisse, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13720, France",13720.0,2022/02/28,1,0,1,1,0,0,0,5.596715,43.40367,COMMUNE_0000000009760607,La Bouilladisse,13016,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +239,410,Collège Ubelka,Collège,Établissement scolaire ou d'enseignement supérieur,"Auriol, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13390, France",13390.0,2022/02/23,1,0,1,1,0,0,0,5.624874,43.351419,COMMUNE_0000000009760871,Auriol,13007,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +240,408,Les Insurgés des déchets,Association de protection de l'environnement,Association ou fédération,"Place Henri Barbusse, Le Chapus, Bourcefranc-le-Chapus, Rochefort, Charente-Maritime, Nouvelle-Aquitaine, France métropolitaine, 17560, France",17560.0,2022/02/20,1,0,1,0,20,0,0,-1.148139,45.847625,COMMUNE_0000000009751684,Bourcefranc-le-Chapus,17058,17,75,241700699,CC,CC du Bassin de Marennes,Charente-Maritime,Nouvelle-Aquitaine +241,406,Collège Yves Klein,Collège,Établissement scolaire ou d'enseignement supérieur,"La Colle-sur-Loup, Grasse, Alpes-Maritimes, Provence-Alpes-Côte d'Azur, France métropolitaine, 06480, France",6480.0,2022/02/17,1,0,0,0,0,0,0,7.073181,43.685895,COMMUNE_0000000009759147,La Colle-sur-Loup,06044,06,93,240600585,CA,CA de Sophia Antipolis,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +242,405,EPLEFPA Borgo-Marana,Lycée,Établissement scolaire ou d'enseignement supérieur,"Lycée professionel agricole de Borgo-Marana, 650, Route de Porettone, Borgo, Bastia, Haute-Corse, Corse, France métropolitaine, 20290, France",20290.0,2022/02/16,1,1,1,0,1,0,0,9.447237,42.585656,COMMUNE_0000000009762893,Borgo,2B042,2B,94,200036499,CC,CC de Marana-Golo,Haute-Corse,Corse +243,404,Association du Festival International de Jazz de Marseille des cinq continents,Autre,Association ou fédération,"Rue Beauvau, Opéra, 1er Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13001, France",13001.0,2022/02/15,0,0,0,0,0,0,0,5.375325,43.294586,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +244,402,Zéro Déchet Sud Sainte Baume,Association de protection de l'environnement,Association ou fédération,"Saint-Cyr-sur-Mer, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83270, France",83270.0,2022/02/11,0,0,0,0,0,0,0,5.714716,43.1558,COMMUNE_0000000009761642,Saint-Cyr-sur-Mer,83112,83,93,248300394,CA,CA Sud Sainte Baume,Var,Provence-Alpes-Côte d'Azur +245,401,Zorro Déchet Pays d'Aix,Association de protection de l'environnement,Association ou fédération,"Village du Soleil, Ferme des Truillas, Aix-en-Provence, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13540, France",13540.0,2022/02/09,1,0,1,1,1,1,0,5.43297,43.592478,COMMUNE_0000000009760349,Aix-en-Provence,13001,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +246,399,Echo-Mer,Association de protection de l'environnement,Association ou fédération,"Quai Georges Simenon, Le Gabut, Tasdon, La Rochelle, Charente-Maritime, Nouvelle-Aquitaine, France métropolitaine, 17000, France",17000.0,2022/02/08,1,0,1,1,5,5,0,-1.150672,46.155823,COMMUNE_0000000009750154,La Rochelle,17300,17,75,241700434,CA,CA de La Rochelle,Charente-Maritime,Nouvelle-Aquitaine +247,397,Association Ar Viltansoù,Association de protection de l'environnement,Association ou fédération,"Mairie, Rue Pasteur, Place Manigod, Le Conquet, Brest, Finistère, Bretagne, France métropolitaine, 29217, France",29217.0,2022/02/03,1,0,1,0,26,0,0,-4.770588,48.359377,COMMUNE_0000000009738160,Le Conquet,29040,29,53,242900074,CC,CC du Pays d'Iroise,Finistère,Bretagne +248,396,Unapei peypin,,Organisation socioprofessionnelle,"Peypin, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13124, France",13124.0,2022/02/01,1,0,0,0,0,0,0,5.591437,43.38572,COMMUNE_0000000009760872,Peypin,13073,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +249,395,GLOBE & COLOS,,Organisation socioprofessionnelle,"11, Rue Louis Plana, Jolimont - Soupetard - Bonhoure, Toulouse Est, Toulouse, Haute-Garonne, Occitanie, France métropolitaine, 31500, France",31500.0,2022/01/31,1,0,1,0,1,0,0,1.481174,43.60774,COMMUNE_0000000009760215,Toulouse,31555,31,76,243100518,ME,Toulouse Métropole,Haute-Garonne,Occitanie +250,390,Théâtre La Criée,,Services de l'état et établissements publics,"Quai de Rive-Neuve, Saint-Victor, 7e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13007, France",13007.0,2022/01/11,0,0,0,0,0,0,0,5.368565,43.292701,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +251,389,Le Lagon,Association sportive,Association ou fédération,"Rue Benjamin Delessert, Gerland, Lyon 7e Arrondissement, Lyon, Métropole de Lyon, Circonscription départementale du Rhône, Auvergne-Rhône-Alpes, France métropolitaine, 69007, France",69007.0,2022/01/08,1,0,0,0,0,0,0,4.833126,45.728414,COMMUNE_0000000009752008,Lyon,69123,69,84,200046977,METLYON,Métropole de Lyon,Rhône,Auvergne-Rhône-Alpes +252,388,Groupe Naturaliste de l'Université de Montpellier,Association Education environnement et Développement durable,Association ou fédération,"Université de Montpellier, Place Eugène Bataillon, Hôpitaux-Facultés, Montpellier, Hérault, Occitanie, France métropolitaine, 34095, France",34095.0,2022/01/07,0,0,0,0,0,0,0,3.875773,43.613132,COMMUNE_0000000009759901,Montpellier,34172,34,76,243400017,ME,Montpellier Méditerranée Métropole,Hérault,Occitanie +253,386,Réserve Naturelle Nationale de la Belle Henriette,Association de protection de l'environnement,Association ou fédération,"Rue du 8 Mai, Saint-Denis-du-Payré, Fontenay-le-Comte, Vendée, Pays de la Loire, France métropolitaine, 85580, France",85580.0,2021/12/27,1,0,1,0,1,0,0,-1.267531,46.406637,COMMUNE_0000000009749259,Saint-Denis-du-Payré,85207,85,52,200073260,CC,CC Sud Vendée Littoral,Vendée,Pays de la Loire +254,383,Parc naturel marin du golfe du lion,,Services de l'état et établissements publics,"Impasse Charlemagne, Le Jardin aux Fontaines, Argelès-Plage, Argelès-sur-Mer, Céret, Pyrénées-Orientales, Occitanie, France métropolitaine, 66700, France",66700.0,2021/11/26,0,0,1,0,1,0,0,2.751957,42.487741,COMMUNE_0000000009763423,Céret,66049,66,76,246600373,CC,CC du Vallespir,Pyrénées-Orientales,Occitanie +255,382,Run Eco Team,Association sportive,Association ou fédération,"Boulevard de la Liberté, Jean-Macé, Jean-Macé - Chantenay, Bellevue - Chantenay - Sainte-Anne, Nantes, Loire-Atlantique, Pays de la Loire, France métropolitaine, 44100, France",44100.0,2021/11/24,1,0,0,0,0,0,0,-1.58859,47.202592,COMMUNE_0000000009746105,Nantes,44109,44,52,244400404,ME,Nantes Métropole,Loire-Atlantique,Pays de la Loire +256,381,Préserv'Action Terre et Littoral,Association de protection de l'environnement,Association ou fédération,"Moëlan-sur-Mer, Finistère, Bretagne, France métropolitaine, France",29380.0,2021/11/23,1,0,0,0,0,0,0,7.177338,43.660783,COMMUNE_0000000009759377,Cagnes-sur-Mer,06027,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +257,379,Département de l'Hérault,Département,Collectivité territoriale,"Conseil départemental, 1977, Avenue des Moulins, La Paillade, Mosson, Montpellier, Hérault, Occitanie, France métropolitaine, 34087, France",34087.0,2021/11/22,1,0,0,0,0,0,0,3.818583,43.614809,COMMUNE_0000000009759901,Montpellier,34172,34,76,243400017,ME,Montpellier Méditerranée Métropole,Hérault,Occitanie +258,376,Ecole Bonneveine 1,École primaire,Établissement scolaire ou d'enseignement supérieur,"52, Boulevard du Sablier, Bonneveine, 8e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13008, France",13008.0,2021/11/17,0,1,1,1,0,0,0,5.382131,43.251698,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +259,374,Communauté de commune du Grand Reims,Communauté de communes,Collectivité territoriale,"Rue Arthur Décès, Reims, Marne, Grand Est, France métropolitaine, 51100, France",51100.0,2021/11/17,1,0,0,0,0,0,0,4.057758,49.258325,COMMUNE_0000000009732933,Reims,51454,51,44,200067213,CU,CU du Grand Reims,Marne,Grand Est +260,371,Les balades de charlotte,,Organisation socioprofessionnelle,Impasse de la Grande Cabane 83160 La Valette-du-Var,83160.0,2021/11/05,1,0,1,0,1,0,0,5.979727,43.150556,COMMUNE_0000000009761636,La Valette-du-Var,83144,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +261,370,CPIE Vallée de l'Orne,Association de protection de l'environnement,Association ou fédération,Boulevard Maritime 14121 Sallenelles,14121.0,2021/11/05,1,0,0,0,0,0,0,-0.232056,49.264462,COMMUNE_0000000009732693,Sallenelles,14665,14,28,200065563,CC,CC Normandie-Cabourg-Pays d'Auge,Calvados,Normandie +262,369,Le Naturoscope - Pôle Var,Association Education environnement et Développement durable,Association ou fédération,Avenue de Montredon 13008 Marseille,13008.0,2021/11/04,1,0,1,0,3,0,0,5.363734,43.237559,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +263,367,ASPTT LA LONDE,Association sportive,Association ou fédération,Bd de la Plage de l?Argentiere 83250 La Londe-les-Maures,83250.0,2021/10/27,1,0,1,0,1,0,0,6.255236,43.122399,COMMUNE_0000000009761633,La Londe-les-Maures,83071,83,93,200027100,CC,CC Méditerranée Porte des Maures,Var,Provence-Alpes-Côte d'Azur +264,366,L'Amarre,Association Education environnement et Développement durable,Association ou fédération,5 Quai des Pecheurs 83000 Toulon,83081.0,2021/10/27,1,0,1,0,2,0,0,5.939888,43.108922,COMMUNE_0000000009761867,Toulon,83137,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +265,365,GEPRONA,Autre,Association ou fédération,Chemin Saint Lazare 83400 Hyères,83400.0,2021/10/26,1,0,1,0,3,0,0,6.167013,43.117488,COMMUNE_0000000009761863,Hyères,83069,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +266,364,Les randonneurs craurois,Association sportive,Association ou fédération,Avenue Fréderic Mistral 83260 La Crau,83260.0,2021/10/25,1,0,1,0,2,0,0,6.082724,43.145655,COMMUNE_0000000009761635,La Crau,83047,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +267,363,Chavagne,Commune,Collectivité territoriale,Avenue de la Mairie 35310 Chavagne,35310.0,2021/10/25,1,0,1,2,0,0,0,-1.788396,48.052572,COMMUNE_0000000009741744,Chavagne,35076,35,53,243500139,ME,Rennes Métropole,Ille-et-Vilaine,Bretagne +268,362,Estérel Côte d'azur Agglomération,Communauté de communes,Collectivité territoriale,624 Chemin Aurélien 83700 Saint-Raphaël,83700.0,2021/10/22,1,0,0,0,0,0,0,6.768828,43.437279,COMMUNE_0000000009760096,Saint-Raphaël,83118,83,93,200035319,CA,CA Estérel Côte d'Azur Agglomération,Var,Provence-Alpes-Côte d'Azur +269,360,SuperFlip,,Eco-artiste,Espagne 47300 Villeneuve-sur-Lot,47300.0,2021/10/17,1,0,0,0,0,0,0,0.734226,44.428259,COMMUNE_0000000009757342,Villeneuve-sur-Lot,47323,47,75,200023307,CA,CA du Grand Villeneuvois,Lot-et-Garonne,Nouvelle-Aquitaine +270,359,13 Envie de Sport,Autre,Association ou fédération,17 Rue Rolland 13010 Marseille,13010.0,2021/10/14,0,1,1,3,0,0,0,5.398032,43.282154,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +271,358,Coup de balai dans l'eau,Association de protection de l'environnement,Association ou fédération,Quai Antoine Riboud 69002 Lyon,69002.0,2021/10/11,1,0,1,0,0,0,1,4.817059,45.742761,COMMUNE_0000000009752008,Lyon,69123,69,84,200046977,METLYON,Métropole de Lyon,Rhône,Auvergne-Rhône-Alpes +272,357,Odyssee.green,Association Education environnement et Développement durable,Association ou fédération,50 Impasse de la Glaciere 42190 Chandon,42190.0,2021/10/11,1,0,0,0,0,0,0,4.175535,46.154474,COMMUNE_0000000009750267,Chandon,42048,42,84,200035202,CC,CC Charlieu-Belmont,Loire,Auvergne-Rhône-Alpes +273,354,Communauté de communes de l'Ile d'Oléron,Communauté de communes,Collectivité territoriale,59 Route des Allées 17310 Saint-Pierre-d'Oléron,17310.0,2021/10/06,1,0,0,0,0,0,0,-1.303525,45.936181,COMMUNE_0000000009751150,Saint-Pierre-d'Oléron,17385,17,75,241700624,CC,CC de l'Île d'Oléron,Charente-Maritime,Nouvelle-Aquitaine +274,353,Mairie Tourrette-Levens,Commune,Collectivité territoriale,Place du Docteur Paul Simon 06690 Tourrette-Levens,6690.0,2021/10/03,1,0,1,1,0,0,0,7.275913,43.786905,COMMUNE_0000000009758920,Tourrette-Levens,06147,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +275,352,Moana by reef protect,Association de protection de l'environnement,Association ou fédération,7 Avenue de la Tartane 83400 Hyères,83400.0,2021/09/30,1,0,1,6,0,0,0,6.174057,43.102192,COMMUNE_0000000009761863,Hyères,83069,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +276,351,BioDiversTissons,Autre,Association ou fédération,43 Rue des Cypres 29190 Pleyben,29190.0,2021/09/29,0,0,1,2,0,0,3,-3.952737,48.228761,COMMUNE_0000000009739905,Pleyben,29162,29,53,200067247,CC,CC Pleyben-Châteaulin-Porzay,Finistère,Bretagne +277,350,CPIE Bastia - U marinu,Association Education environnement et Développement durable,Association ou fédération,Montesoro Provence Logis 20600 Bastia,20600.0,2021/09/27,1,2,1,2,1,2,2,9.435923,42.673935,COMMUNE_0000000009762642,Bastia,2B033,2B,94,242000354,CA,CA de Bastia,Haute-Corse,Corse +278,349,BILBOK,Association Education environnement et Développement durable,Association ou fédération,22 Place Gambetta 83143 Le Val,83143.0,2021/09/23,0,0,1,0,1,0,0,6.073905,43.439493,COMMUNE_0000000009760340,Le Val,83143,83,93,200068104,CA,CA de la Provence Verte,Var,Provence-Alpes-Côte d'Azur +279,348,Planète Actions,Association de protection de l'environnement,Association ou fédération,Chemin du Counillier 83600 Fréjus,83600.0,2021/09/15,1,0,1,5,109,3,0,6.745395,43.441776,COMMUNE_0000000009760330,Fréjus,83061,83,93,200035319,CA,CA Estérel Côte d'Azur Agglomération,Var,Provence-Alpes-Côte d'Azur +280,347,Castera-Verduzan,Commune,Collectivité territoriale,Place de l?Ancien Foirail 32410 Castéra-Verduzan,32410.0,2021/09/10,1,0,1,0,1,0,0,0.427916,43.80545,COMMUNE_0000000009759316,Castéra-Verduzan,32083,32,76,200066926,CA,CA Grand Auch C?ur de Gascogne,Gers,Occitanie +281,346,Collège Montjoie,Collège,Établissement scolaire ou d'enseignement supérieur,Rue Maurice Claret 45770 Saran,45770.0,2021/09/09,1,0,0,0,0,0,0,1.886689,47.950553,COMMUNE_0000000009742588,Saran,45302,45,24,244500468,ME,Orléans Métropole,Loiret,Centre-Val de Loire +282,345,PROVENCE DURABLE,,Organisation socioprofessionnelle,2 Avenue des Belges 13100 Aix-en-Provence,13100.0,2021/09/07,1,0,1,1,0,0,0,5.444745,43.525307,COMMUNE_0000000009760349,Aix-en-Provence,13001,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +283,344,CPIE des Iles de Lérins et Pays d'Azur,Association de protection de l'environnement,Association ou fédération,5 Rue de Mimont 06400 Cannes,6400.0,2021/09/06,0,0,0,0,0,0,0,7.020172,43.55482,COMMUNE_0000000009759860,Cannes,06029,06,93,200039915,CA,CA Cannes Pays de Lérins,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +284,343,Lycée de la Mer - Sète,Université / IUT / BTS,Établissement scolaire ou d'enseignement supérieur,Rue des Cormorans 34200 Sète,34200.0,2021/09/06,0,0,1,0,1,0,0,3.672964,43.419407,COMMUNE_0000000009760885,Sète,34301,34,76,200066355,CA,CA Sète Agglopôle Méditerranée,Hérault,Occitanie +285,341,PikPik Environnement,Association Education environnement et Développement durable,Association ou fédération,4 Rue de l?Abbe Gregoire 92130 Issy-les-Moulineaux,92130.0,2021/08/31,0,0,0,0,0,0,0,2.275753,48.822558,COMMUNE_0000000009736549,Issy-les-Moulineaux,92040,92,11,200054781,ME,Métropole du Grand Paris,Hauts-de-Seine,Île-de-France +286,338,EAU DE PARIS,,Services de l'état et établissements publics,19 Rue Neuve Tolbiac 75013 Paris,75013.0,2021/08/23,1,0,0,0,0,0,0,2.378111,48.831098,COMMUNE_0000000009736048,Paris,75056,75,11,200054781,ME,Métropole du Grand Paris,Paris,Île-de-France +287,337,conseil départemental de la Manche,Département,Collectivité territoriale,98 Route de Candol 50000 Saint-Lô,50000.0,2021/08/19,1,0,0,0,0,0,0,-1.117092,49.100652,COMMUNE_0000000009734197,Saint-Lô,50502,50,28,200066389,CA,CA Saint-Lô Agglo,Manche,Normandie +288,334,Mairie de Frontignan,,Services de l'état et établissements publics,Place Hotel de Ville 34110 Frontignan,34110.0,2021/08/02,1,0,1,0,1,0,0,3.756228,43.445697,COMMUNE_0000000009760628,Frontignan,34108,34,76,200066355,CA,CA Sète Agglopôle Méditerranée,Hérault,Occitanie +289,333,Girls Inspired For Tomorrow (GIFT),Association de protection de l'environnement,Association ou fédération,58 Rue de l?Hysope 13300 Salon-de-Provence,13300.0,2021/07/31,1,0,1,4,0,0,0,5.118434,43.634654,COMMUNE_0000000009759648,Salon-de-Provence,13103,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +290,332,Ville d'Agde,Commune,Collectivité territoriale,Rue Alsace Lorraine 34300 Agde,34300.0,2021/07/30,1,0,1,0,1,0,0,3.479576,43.314635,COMMUNE_0000000009761167,Agde,34003,34,76,243400819,CA,CA Hérault-Méditerranée,Hérault,Occitanie +291,330,Blog Géronimo22,Autre,Association ou fédération,6 Rue Jean Baptiste Lully 22300 Lannion,22300.0,2021/07/27,1,0,0,0,0,0,0,-3.455002,48.746923,COMMUNE_0000000009736261,Lannion,22113,22,53,200065928,CA,CA Lannion-Trégor Communauté,Côtes-d'Armor,Bretagne +292,328,Festival de Marseille,Autre,Association ou fédération,Rue de la Republique 13002 Marseille,13001.0,2021/07/23,1,0,1,0,1,0,0,5.370541,43.300484,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +293,326,ARVIK Ocean,Association de protection de l'environnement,Association ou fédération,44 Rue de Kerquer 56000 Vannes,56000.0,2021/07/20,1,0,0,0,0,0,0,-2.7596,47.668564,COMMUNE_0000000009743709,Vannes,56260,56,53,200067932,CA,CA Golfe du Morbihan - Vannes Agglomération,Morbihan,Bretagne +294,323,A Votre Bonheur,,Organisation socioprofessionnelle,27 Rue Abel Hovelacque 75013 Paris,75013.0,2021/07/14,0,0,0,0,0,0,0,2.352818,48.831409,COMMUNE_0000000009736048,Paris,75056,75,11,200054781,ME,Métropole du Grand Paris,Paris,Île-de-France +295,322,Pacha kleaner,Association de protection de l'environnement,Association ou fédération,Rte des Vallees 74100 Annemasse,74100.0,2021/07/11,1,0,0,0,0,0,0,6.245698,46.192654,COMMUNE_0000000009749941,Annemasse,74012,74,84,200011773,CA,CA Annemasse-Les Voirons-Agglomération,Haute-Savoie,Auvergne-Rhône-Alpes +296,321,Océan Protection France,Association de protection de l'environnement,Association ou fédération,Boulevard Frédéric Fabreges 34250 Palavas-les-Flots,34250.0,2021/07/11,1,0,1,9,3,0,0,3.926165,43.528063,COMMUNE_0000000009760354,Palavas-les-Flots,34192,34,76,243400470,CA,CA du Pays de l'Or,Hérault,Occitanie +297,320,Le Plastique C'est Dramatique,Association de protection de l'environnement,Association ou fédération,12 Chemin de Las Hyéros 11360 Villesèque-des-Corbières,11360.0,2021/07/09,1,0,0,0,0,0,0,2.848955,43.012843,COMMUNE_0000000009762491,Villesèque-des-Corbières,11436,11,76,200070365,CC,CC Corbières Salanque Méditerranée,Aude,Occitanie +298,319,Aquacaux,Autre,Association ou fédération,Chemin de Saint-Andrieux 76930 Octeville-sur-Mer,76930.0,2021/07/09,1,0,0,0,0,0,0,0.095042,49.543562,COMMUNE_0000000009730807,Octeville-sur-Mer,76481,76,28,200084952,CU,CU Le Havre Seine Métropole,Seine-Maritime,Normandie +299,318,AS ROTO SPORTS Plongée,Association sportive,Association ou fédération,9 Boulevard de l?Industrie 41000 Blois,41000.0,2021/07/08,1,0,0,0,0,0,0,1.322419,47.60227,COMMUNE_0000000009744591,Blois,41018,41,24,200030385,CA,"CA de Blois ""Agglopolys",Loir-et-Cher,Centre-Val de Loire +300,317,Valras-Plage,Commune,Collectivité territoriale,11 Allee General Charles de Gaulle 34350 Valras-Plage,34350.0,2021/07/07,1,0,1,0,1,0,0,3.293895,43.24676,COMMUNE_0000000009761645,Valras-Plage,34324,34,76,243400769,CA,CA de Béziers-Méditerranée,Hérault,Occitanie +301,315,Côte Waste,Autre,Association ou fédération,Rue Jeanne de Beauregard 29780 Plouhinec,29780.0,2021/07/07,0,0,0,0,0,0,0,-4.530039,48.021737,COMMUNE_0000000009741086,Plouhinec,29197,29,53,242900629,CC,CC Cap Sizun - Pointe du Raz,Finistère,Bretagne +302,313,MAIRIE DE LE GRAU DU ROI,Commune,Collectivité territoriale,1 Place de la Libération 30240 Le Grau-du-Roi,30240.0,2021/07/01,1,0,1,0,1,0,0,4.13822,43.533628,COMMUNE_0000000009760353,Le Grau-du-Roi,30133,30,76,243000650,CC,CC Terre de Camargue,Gard,Occitanie +303,311,Argelès-sur-Mer,Commune,Collectivité territoriale,Avenue de la Libération 66700 Argelès-sur-Mer,66700.0,2021/06/30,1,0,1,0,2,0,0,3.027283,42.547386,COMMUNE_0000000009763364,Argelès-sur-Mer,66008,66,76,200043602,CC,"CC des Albères, de la Côte Vermeille et de l'Illibéris",Pyrénées-Orientales,Occitanie +304,310,VIAS,Commune,Collectivité territoriale,6 Place des Arenes 34450 Vias,34450.0,2021/06/30,1,0,1,0,1,0,0,3.418291,43.312998,COMMUNE_0000000009761409,Vias,34332,34,76,243400819,CA,CA Hérault-Méditerranée,Hérault,Occitanie +305,309,LAGUEPIE,Commune,Collectivité territoriale,Route de Varen 82250 Laguépie,82250.0,2021/06/29,1,0,1,0,1,0,0,1.96054,44.148001,COMMUNE_0000000009758000,Laguépie,82088,82,76,248200107,CC,CC du Quercy Rouergue et des Gorges de l'Aveyron,Tarn-et-Garonne,Occitanie +306,307,Les z'héros déchet,Association de protection de l'environnement,Association ou fédération,9bis Rue Jean Jaures 65600 Séméac,65600.0,2021/06/28,1,0,0,0,0,0,0,0.106927,43.22598,COMMUNE_0000000009761783,Séméac,65417,65,76,200069300,CA,CA Tarbes-Lourdes-Pyrénées,Hautes-Pyrénées,Occitanie +307,306,Ville de Narbonne,Commune,Collectivité territoriale,Quai Dillon 11100 Narbonne,11100.0,2021/06/25,1,0,1,0,1,0,0,3.002034,43.183787,COMMUNE_0000000009761873,Narbonne,11262,11,76,241100593,CA,CA Le Grand Narbonne,Aude,Occitanie +308,305,Environnement 93,Association de protection de l'environnement,Association ou fédération,11 Allée des Sources 93220 Gagny,93220.0,2021/06/24,0,0,0,0,0,0,0,2.541067,48.886644,COMMUNE_0000000009736014,Gagny,93032,93,11,200054781,ME,Métropole du Grand Paris,Seine-Saint-Denis,Île-de-France +309,304,FRENE,Association Education environnement et Développement durable,Association ou fédération,164 Rue des Albatros 34000 Montpellier,34000.0,2021/06/24,0,0,0,0,0,0,0,3.900646,43.607414,COMMUNE_0000000009759901,Montpellier,34172,34,76,243400017,ME,Montpellier Méditerranée Métropole,Hérault,Occitanie +310,303,Commune de Saint-Cyprien,Commune,Collectivité territoriale,39 Avenue du Roussillon 66750 Saint-Cyprien,66750.0,2021/06/24,0,0,1,0,1,0,0,3.002262,42.618001,COMMUNE_0000000009763210,Saint-Cyprien,66171,66,76,246600282,CC,CC Sud-Roussillon,Pyrénées-Orientales,Occitanie +311,159,Eurasia Net,Autre,Association ou fédération,67 Voie la Canebiere 13001 Marseille,13001.0,2021/06/22,0,1,1,1,0,0,0,5.379563,43.297208,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +312,301,HOW LUCKY WE ARE,Association Education environnement et Développement durable,Association ou fédération,9 Rue Thiberville 94250 Gentilly,94250.0,2021/06/18,1,0,0,0,0,0,0,2.351664,48.817534,COMMUNE_0000000009736534,Gentilly,94037,94,11,200054781,ME,Métropole du Grand Paris,Val-de-Marne,Île-de-France +313,79,la tortue qui secoue le monde,Association de protection de l'environnement,Association ou fédération,1256 Chemin de Berre 13760 Saint-Cannat,13760.0,2021/06/17,1,0,1,1,0,0,0,5.290507,43.610358,COMMUNE_0000000009759883,Saint-Cannat,13091,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +314,269,CIQ de La Parette Auriol,Comité d'interêt de quartier,Association ou fédération,977 Chemin de la Parette 13390 Auriol,13390.0,2021/06/17,1,0,0,0,0,0,0,5.644242,43.351779,COMMUNE_0000000009760871,Auriol,13007,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +315,12,Mairie de Marseillan,Commune,Collectivité territoriale,1 Rue General de Gaulle 34340 Marseillan,34340.0,2021/06/11,1,0,1,0,1,0,0,3.527895,43.356008,COMMUNE_0000000009761165,Marseillan,34150,34,76,200066355,CA,CA Sète Agglopôle Méditerranée,Hérault,Occitanie +316,212,Association de Sauvegarde du Littoral des Orpeillères et du Biterrois,Association de protection de l'environnement,Association ou fédération,1 Rue Kleber 34410 Sérignan,34410.0,2021/06/10,1,0,0,0,0,0,0,3.280893,43.281894,COMMUNE_0000000009761411,Sérignan,34299,34,76,243400769,CA,CA de Béziers-Méditerranée,Hérault,Occitanie +317,250,CPIE Loire Océane,Association de protection de l'environnement,Association ou fédération,Rue Aristide Briand 44350 Guérande,44350.0,2021/06/10,0,0,1,1,0,0,0,-2.430611,47.3308,COMMUNE_0000000009745535,Guérande,44069,44,52,244400610,CA,CA de la Presqu'île de Guérande Atlantique (Cap Atlantique),Loire-Atlantique,Pays de la Loire +318,81,SOS laisse de mer,Association Education environnement et Développement durable,Association ou fédération,138 Rue du Maréchal Foch 80410 Cayeux-sur-Mer,80410.0,2021/06/10,1,0,1,1,1,0,0,1.493258,50.179167,COMMUNE_0000000009728427,Cayeux-sur-Mer,80182,80,32,200070993,CA,CA de la Baie de Somme,Somme,Hauts-de-France +319,104,Agence ComCi ComCa,,Organisation socioprofessionnelle,Avenue Sidi Brahim 06130 Grasse,6130.0,2021/06/08,1,0,0,0,0,0,0,6.930429,43.649812,COMMUNE_0000000009759383,Grasse,06069,06,93,200039857,CA,CA du Pays de Grasse,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +320,177,Eau Pop Pop,,Organisation socioprofessionnelle,Rue du Breganconnet 83250 La Londe-les-Maures,83250.0,2021/06/06,1,0,0,0,0,0,0,6.243039,43.123186,COMMUNE_0000000009761633,La Londe-les-Maures,83071,83,93,200027100,CC,CC Méditerranée Porte des Maures,Var,Provence-Alpes-Côte d'Azur +321,274,SoS Pla'nette,Association de protection de l'environnement,Association ou fédération,"Saverne, Bas-Rhin, Grand Est, France métropolitaine, 67700, France",67700.0,2021/06/03,1,1,1,3,35,0,0,7.363024,48.741027,COMMUNE_0000000009736308,Saverne,67437,67,44,200068112,CC,CC du Pays de Saverne,Bas-Rhin,Grand Est +322,160,Plombkemon Upcycle,Association de protection de l'environnement,Association ou fédération,15 Rue Paul Lazari 64200 Biarritz,64200.0,2021/06/03,1,0,0,0,0,0,0,-1.566984,43.46974,COMMUNE_0000000009760593,Biarritz,64122,64,75,200067106,CA,CA du Pays Basque,Pyrénées-Atlantiques,Nouvelle-Aquitaine +323,65,L'Ecorrigans de l'Elorn,Autre,Association ou fédération,Vern Ar Piquet 29460 Daoulas,29460.0,2021/06/02,1,0,0,0,0,0,0,-4.256572,48.365516,COMMUNE_0000000009739037,Daoulas,29043,29,53,242900801,CA,CA du Pays de Landerneau-Daoulas,Finistère,Bretagne +324,267,Office de Tourisme C?ur Sud-Ouest,Autre,Association ou fédération,21 Place de l?Hôtel de Ville 32230 Marciac,32230.0,2021/06/01,0,0,1,0,1,0,0,0.161059,43.524367,COMMUNE_0000000009760503,Marciac,32233,32,76,243200508,CC,CC Bastides et Vallons du Gers,Gers,Occitanie +325,278,Green Sailing Generation,Association Education environnement et Développement durable,Association ou fédération,"27, Rue François Rocca, Saint-Giniez, 8e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13008, France",13008.0,2021/05/31,1,2,1,1,11,0,0,5.38524,43.269415,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +326,123,CAF MP,Association sportive,Association ou fédération,14 Quai de Rive Neuve 13007 Marseille,13007.0,2021/05/28,1,0,1,0,2,0,0,5.370824,43.293043,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +327,54,CoLLecT-IF environnement,Association de protection de l'environnement,Association ou fédération,Place Evariste Gras 13600 La Ciotat,13600.0,2021/05/25,1,1,1,1,1,0,0,5.604384,43.175517,COMMUNE_0000000009761644,La Ciotat,13028,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +328,58,CERBERE A JAMAIS / CERBERE GARDIENS MER ET NATURE,Association de protection de l'environnement,Association ou fédération,Rue Alexandre Ducros 66290 Cerbère,66290.0,2021/05/19,1,6,1,8,60,1,1,3.166431,42.442734,COMMUNE_0000000009763416,Cerbère,66048,66,76,200043602,CC,"CC des Albères, de la Côte Vermeille et de l'Illibéris",Pyrénées-Orientales,Occitanie +329,162,Mairie de Palavas-les-Flots,Commune,Collectivité territoriale,16 Boulevard Marechal Joffre 34250 Palavas-les-Flots,34250.0,2021/05/12,0,0,0,0,0,0,0,3.932927,43.528305,COMMUNE_0000000009760354,Palavas-les-Flots,34192,34,76,243400470,CA,CA du Pays de l'Or,Hérault,Occitanie +330,34,MAIRIE MONTREJEAU,Commune,Collectivité territoriale,Avenue des Toureilles 31210 Montréjeau,31210.0,2021/05/12,1,0,1,0,1,0,0,0.561341,43.093481,COMMUNE_0000000009762196,Montréjeau,31390,31,76,200072643,CC,CC C?ur et Coteaux du Comminges,Haute-Garonne,Occitanie +331,107,Aire Marine Educative de Port Vendre,École primaire,Établissement scolaire ou d'enseignement supérieur,Rue Violet 66660 Port-Vendres,66660.0,2021/05/11,0,0,0,0,0,0,0,3.104582,42.517755,COMMUNE_0000000009763362,Port-Vendres,66148,66,76,200043602,CC,"CC des Albères, de la Côte Vermeille et de l'Illibéris",Pyrénées-Orientales,Occitanie +332,13,Unis-Terre,Association Education environnement et Développement durable,Association ou fédération,Avenue de Luminy 13009 Marseille,13009.0,2021/05/10,1,0,0,0,0,0,0,5.435843,43.232668,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +333,198,Faisons des MERveilles -Région Occitanie,Région,Collectivité territoriale,201 Avenue de la Pompignane 34000 Montpellier,34000.0,2021/05/07,1,0,0,0,0,0,0,3.898276,43.606665,COMMUNE_0000000009759901,Montpellier,34172,34,76,243400017,ME,Montpellier Méditerranée Métropole,Hérault,Occitanie +334,87,Office de Tourisme Cahors - Vallée du Lot,Autre,Association ou fédération,Place Francois Mitterrand 46000 Cahors,46000.0,2021/05/07,1,0,1,1,0,0,0,1.440908,44.445773,COMMUNE_0000000009757321,Cahors,46042,46,76,200023737,CA,CA du Grand Cahors,Lot,Occitanie +335,223,Mauguio Carnon,Commune,Collectivité territoriale,Carnon-Plage 34280 Mauguio,34280.0,2021/05/07,1,0,1,0,1,0,0,3.993161,43.548923,COMMUNE_0000000009760129,Mauguio,34154,34,76,243400470,CA,CA du Pays de l'Or,Hérault,Occitanie +336,277,Le Barcares,Commune,Collectivité territoriale,Boulevard du 14 Juillet 66420 Le Barcarès,66420.0,2021/05/07,0,0,1,0,1,0,0,3.035154,42.788508,COMMUNE_0000000009762911,Le Barcarès,66017,66,76,200027183,CU,CU Perpignan Méditerranée Métropole,Pyrénées-Orientales,Occitanie +337,201,Base de Loisirs de MOLIERES,Commune,Collectivité territoriale,Rue de la Mairie 82220 Molières,82220.0,2021/05/07,1,0,1,0,2,0,0,1.363757,44.193803,COMMUNE_0000000009758014,Molières,82113,82,76,248200057,CC,CC du Quercy Caussadais,Tarn-et-Garonne,Occitanie +338,191,Project Rescue Océan Antenne Marseille,Association de protection de l'environnement,Association ou fédération,"Rue Edmond Rostand, Castellane, 6e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13006, France",13006.0,2021/05/06,1,1,1,1,0,0,0,5.381396,43.285757,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +339,249,Ville de Saint-Raphaël,Commune,Collectivité territoriale,26 Place Sadi Carnot 83700 Saint-Raphaël,83700.0,2021/05/05,1,4,1,5,8,0,0,6.768535,43.425097,COMMUNE_0000000009760096,Saint-Raphaël,83118,83,93,200035319,CA,CA Estérel Côte d'Azur Agglomération,Var,Provence-Alpes-Côte d'Azur +340,210,Espace jeune municipal Septémois,Commune,Collectivité territoriale,50 Avenue du 8 Mai 1945 13240 Septèmes-les-Vallons,13240.0,2021/05/05,1,0,0,0,0,0,0,5.366515,43.398503,COMMUNE_0000000009760615,Septèmes-les-Vallons,13106,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +341,169,Biiom,,Eco-artiste,Chemin du Moulin 84190 Vacqueyras,84190.0,2021/05/04,1,0,0,0,0,0,0,4.976174,44.138047,COMMUNE_0000000009757938,Vacqueyras,84136,84,93,248400053,CA,CA Ventoux-Comtat-Venaissin (COVE),Vaucluse,Provence-Alpes-Côte d'Azur +342,40,SANDKEEPERS,Association de protection de l'environnement,Association ou fédération,Rue Guy de Maupassant 59000 Lille,59000.0,2021/05/01,1,0,0,0,0,0,0,3.075321,50.617188,COMMUNE_0000000009727244,Lille,59350,59,32,200093201,ME,Métropole Européenne de Lille,Nord,Hauts-de-France +343,56,Sauvage Méditerranée,Association de protection de l'environnement,Association ou fédération,Route d?Avignon 13100 Aix-en-Provence,13100.0,2021/04/30,0,0,0,0,0,0,0,5.398777,43.568308,COMMUNE_0000000009760349,Aix-en-Provence,13001,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +344,109,SKLerner,,Eco-artiste,Avenue de la Timone 13010 Marseille,13010.0,2021/04/30,0,0,0,0,0,0,0,5.404582,43.286066,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +345,120,Ville d'Ensuès la Redonne,Commune,Collectivité territoriale,15 Avenue General de Monsabert 13820 Ensuès-la-Redonne,13820.0,2021/04/30,1,0,1,0,3,0,0,5.20578,43.356495,COMMUNE_0000000009760881,Ensuès-la-Redonne,13033,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +346,195,CCI Nice Côte D'Azur,,Organisation socioprofessionnelle,Quai Lunel 06300 Nice,6300.0,2021/04/29,1,0,1,0,1,0,0,7.28318,43.695346,COMMUNE_0000000009759142,Nice,06088,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +347,75,Atelier Canopé 29,Autre,Association ou fédération,45 Rue de l?Elorn 29200 Brest,29200.0,2021/04/28,0,0,0,0,0,0,0,-4.476705,48.385361,COMMUNE_0000000009738584,Brest,29019,29,53,242900314,ME,Brest Métropole,Finistère,Bretagne +348,156,Ministère de la Transition Ecologique,,Services de l'état et établissements publics,Rue de la République 92800 Puteaux,92800.0,2021/04/21,0,0,0,0,0,0,0,2.236659,48.884435,COMMUNE_0000000009736050,Puteaux,92062,92,11,200054781,ME,Métropole du Grand Paris,Hauts-de-Seine,Île-de-France +349,298,LES SORGUES DU COMTAT,Communauté de communes,Collectivité territoriale,340 Boulevard d?Avignon 84170 Monteux,84170.0,2021/04/20,1,0,0,0,0,0,0,4.987264,44.032469,COMMUNE_0000000009758292,Monteux,84080,84,93,248400293,CA,CA des Sorgues du Comtat,Vaucluse,Provence-Alpes-Côte d'Azur +350,166,Jardinot PACA,Association Education environnement et Développement durable,Association ou fédération,Rue le Chatelier 13015 Marseille,13015.0,2021/04/20,0,0,1,1,3,0,0,5.366452,43.346279,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +351,29,"Service Environnement, Mairie de SAUSSET LES PINS",Commune,Collectivité territoriale,Place des Droits de l?Homme 13960 Sausset-les-Pins,13960.0,2021/04/15,0,0,1,1,0,0,0,5.109237,43.331774,COMMUNE_0000000009760882,Sausset-les-Pins,13104,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +352,110,Mairie de Bormes les Mimosas,Commune,Collectivité territoriale,Place Saint Francois 83230 Bormes-les-Mimosas,83230.0,2021/04/08,1,0,0,0,0,0,0,6.344588,43.150748,COMMUNE_0000000009761632,Bormes-les-Mimosas,83019,83,93,200027100,CC,CC Méditerranée Porte des Maures,Var,Provence-Alpes-Côte d'Azur +353,152,Scouts et Guides de France,Autre,Association ou fédération,"11, Impasse Flammarion, Saint-Charles, 1er Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13001, France",13001.0,2021/04/08,1,2,1,0,8,0,0,5.387775,43.303545,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +354,292,Horizons Naviguer et Partager,Association Education environnement et Développement durable,Association ou fédération,"Le 1901, Maison des associations, Rue Germaine Tillion, La Guérinière Ouest, Caen, Calvados, Normandie, France métropolitaine, 14000, France",14000.0,2021/04/07,1,0,1,0,0,2,8,-0.817184,46.093233,COMMUNE_0000000009750643,Saint-Pierre-La-Noue,17340,17,75,200041614,CC,CC Aunis Sud,Charente-Maritime,Nouvelle-Aquitaine +355,264,Les Ateliers Ecocitoyens,Association Education environnement et Développement durable,Association ou fédération,93 Voie la Canebiere 13001 Marseille,13001.0,2021/04/01,0,1,1,0,2,0,0,5.381708,43.298128,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +356,31,Festival de la Camargue,Autre,Association ou fédération,19 Avenue Pierre Gabrielli 13230 Port-Saint-Louis-du-Rhône,13230.0,2021/03/31,1,0,0,0,0,0,0,4.799466,43.395383,COMMUNE_0000000009760626,Port-Saint-Louis-du-Rhône,13078,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +357,170,Nettoyons Lyon,Association de protection de l'environnement,Association ou fédération,23 Rue du Plat 69002 Lyon,69002.0,2021/03/30,1,0,1,1,3,0,0,4.828536,45.75704,COMMUNE_0000000009752008,Lyon,69123,69,84,200046977,METLYON,Métropole de Lyon,Rhône,Auvergne-Rhône-Alpes +358,265,ligue sud provence alpes cote azur de voile,Association sportive,Association ou fédération,11 Avenue Leon Gambetta 83500 La Seyne-sur-Mer,83500.0,2021/03/25,0,0,0,0,0,0,0,5.879551,43.103189,COMMUNE_0000000009761869,La Seyne-sur-Mer,83126,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +359,286,Ecole Paul Langevin,École primaire,Établissement scolaire ou d'enseignement supérieur,Rue Joliot Curie les Vignes 83130 La Garde,83130.0,2021/03/22,0,1,0,0,0,0,0,5.98886,43.111476,COMMUNE_0000000009761866,La Garde,83062,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +360,262,Recyclo 1000,Association Education environnement et Développement durable,Association ou fédération,282 Route Departementale 96 13710 Fuveau,13710.0,2021/03/17,0,0,0,0,0,0,0,5.54652,43.462808,COMMUNE_0000000009760346,Fuveau,13040,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +361,196,US Valbonne,Association sportive,Association ou fédération,685 Route de Biot 06560 Valbonne,6560.0,2021/03/17,0,0,0,0,0,0,0,7.028203,43.642122,COMMUNE_0000000009759616,Valbonne,06152,06,93,240600585,CA,CA de Sophia Antipolis,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +362,85,Watertrek,Association Education environnement et Développement durable,Association ou fédération,Passage du Génie 75012 Paris,75012.0,2021/03/15,0,0,0,0,0,0,0,2.389387,48.848448,COMMUNE_0000000009736048,Paris,75056,75,11,200054781,ME,Métropole du Grand Paris,Paris,Île-de-France +363,219,Trashbusters Pays de Grasse,Association de protection de l'environnement,Association ou fédération,101 Allée Charles Bonome 06460 Saint-Vallier-de-Thiey,6460.0,2021/03/06,1,0,0,0,0,0,0,6.848674,43.700356,COMMUNE_0000000009759150,Saint-Vallier-de-Thiey,06130,06,93,200039857,CA,CA du Pays de Grasse,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +364,161,Aix en Provence Plongée,Club affilié FFESSM,Association ou fédération,1209 Avenue Fortune Ferrini 13100 Aix-en-Provence,13080.0,2021/03/06,1,1,1,2,0,0,0,5.434803,43.501554,COMMUNE_0000000009760349,Aix-en-Provence,13001,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +365,295,Association des Usagers du Port de Meschers sur Gironde (AUPM),Autre,Association ou fédération,3 Route des Salines 17132 Meschers-sur-Gironde,17132.0,2021/03/06,1,0,0,0,0,0,0,-0.945405,45.555849,COMMUNE_0000000009752700,Meschers-sur-Gironde,17230,17,75,241700640,CA,CA Royan Atlantique,Charente-Maritime,Nouvelle-Aquitaine +366,136,Association Chers Voisins,Autre,Association ou fédération,Rue du Docteur Gariel 13360 Roquevaire,13360.0,2021/02/25,1,1,0,0,0,0,0,5.604786,43.349731,COMMUNE_0000000009760869,Roquevaire,13086,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +367,143,Orléans Zéro Plastique,Association de protection de l'environnement,Association ou fédération,5 Impasse Nicolas Poussin 45650 Saint-Jean-le-Blanc,45650.0,2021/02/24,1,0,0,0,0,0,0,1.926567,47.882686,COMMUNE_0000000009742917,Saint-Jean-le-Blanc,45286,45,24,244500468,ME,Orléans Métropole,Loiret,Centre-Val de Loire +368,20,La Consigne Maintenant,Association de protection de l'environnement,Association ou fédération,7 Rue Duvergier 75019 Paris,75019.0,2021/02/24,1,0,0,0,0,0,0,2.377283,48.889132,COMMUNE_0000000009736048,Paris,75056,75,11,200054781,ME,Métropole du Grand Paris,Paris,Île-de-France +369,180,SOS Grand Bleu,Association de protection de l'environnement,Association ou fédération,Avenue Jean Mermoz 06230 Saint-Jean-Cap-Ferrat,6230.0,2021/02/23,1,0,0,0,0,0,0,7.336819,43.68804,COMMUNE_0000000009759139,Saint-Jean-Cap-Ferrat,06121,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +370,84,Les Ami.e.s des POsidonies,Association de protection de l'environnement,Association ou fédération,Avenue des Corbières 66540 Baho,66540.0,2021/02/13,1,0,0,0,0,0,0,2.822295,42.703553,COMMUNE_0000000009763142,Baho,66012,66,76,200027183,CU,CU Perpignan Méditerranée Métropole,Pyrénées-Orientales,Occitanie +371,209,OTARY,Association de protection de l'environnement,Association ou fédération,25 Boulevard Paul Pons 84800 L'Isle-sur-la-Sorgue,84800.0,2021/02/05,1,0,1,1,0,0,0,5.048509,43.916183,COMMUNE_0000000009758729,L'Isle-sur-la-Sorgue,84054,84,93,248400319,CC,CC du Pays des Sorgues et des Monts de Vaucluse,Vaucluse,Provence-Alpes-Côte d'Azur +372,89,TRESOR DE NATURE,Association Education environnement et Développement durable,Association ou fédération,Les Gites 04120 Demandolx,4120.0,2021/01/25,1,0,0,0,0,0,0,6.576906,43.86999,COMMUNE_0000000009758702,Demandolx,04069,04,93,200068625,CC,CC Alpes-Provence-Verdon - Sources de Lumière,Alpes-de-Haute-Provence,Provence-Alpes-Côte d'Azur +373,6,Perennis,Association Education environnement et Développement durable,Association ou fédération,Boulevard de Chatenay 16100 Cognac,16100.0,2021/01/25,1,0,1,0,0,0,1,-0.31865,45.69924,COMMUNE_0000000009752420,Cognac,16102,16,75,200070514,CA,CA du Grand Cognac,Charente,Nouvelle-Aquitaine +374,164,Unité de Valorisation des Déchets,Association de protection de l'environnement,Association ou fédération,Rue d'Abidjan 47300 Villeneuve-sur-Lot,47300.0,2021/01/23,0,0,0,0,0,0,0,0.707531,44.420709,COMMUNE_0000000009757342,Villeneuve-sur-Lot,47323,47,75,200023307,CA,CA du Grand Villeneuvois,Lot-et-Garonne,Nouvelle-Aquitaine +375,37,Ville de Fréjus,Commune,Collectivité territoriale,Place Formige 83600 Fréjus,83600.0,2021/01/16,1,0,0,0,0,0,0,6.736646,43.432922,COMMUNE_0000000009760330,Fréjus,83061,83,93,200035319,CA,CA Estérel Côte d'Azur Agglomération,Var,Provence-Alpes-Côte d'Azur +376,67,Carqueiranne Var Basket,Association sportive,Association ou fédération,Allee des Grandes Vignes 83320 Carqueiranne,83320.0,2021/01/15,1,0,1,0,1,0,0,6.072821,43.091804,COMMUNE_0000000009761864,Carqueiranne,83034,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +377,78,Sea What's Possible,Association de protection de l'environnement,Association ou fédération,126 Descente de Lerins 83600 Les Adrets-de-l'Estérel,83600.0,2021/01/12,1,0,1,0,3,0,0,6.838273,43.531496,COMMUNE_0000000009759863,Les Adrets-de-l'Estérel,83001,83,93,200035319,CA,CA Estérel Côte d'Azur Agglomération,Var,Provence-Alpes-Côte d'Azur +378,130,Collège Irène et Frédéric Joliot-Curie,Collège,Établissement scolaire ou d'enseignement supérieur,Avenue du Collège 83320 Carqueiranne,83320.0,2021/01/11,1,0,1,0,3,0,0,6.073316,43.092794,COMMUNE_0000000009761864,Carqueiranne,83034,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +379,59,Pierrefeu Terres de Partage,Association Education environnement et Développement durable,Association ou fédération,1 Rue Gabriel Péri 83390 Pierrefeu-du-Var,83390.0,2021/01/11,1,0,1,0,1,0,0,6.143147,43.227205,COMMUNE_0000000009761397,Pierrefeu-du-Var,83091,83,93,200027100,CC,CC Méditerranée Porte des Maures,Var,Provence-Alpes-Côte d'Azur +380,47,Association Milvi,Association Education environnement et Développement durable,Association ou fédération,24 Impasse Frédéric Mistral 13200 Arles,13200.0,2021/01/08,0,0,0,0,0,0,0,4.625737,43.676737,COMMUNE_0000000009760125,Arles,13004,13,93,241300417,CA,CA d'Arles-Crau-Camargue-Montagnette,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +381,294,Eaux Cristallines,Association de protection de l'environnement,Association ou fédération,121 Avenue Clot Bey 13008 Marseille,13008.0,2021/01/06,1,8,1,0,2,0,12,5.383363,43.257694,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +382,243,Mairie des Taillades,Commune,Collectivité territoriale,Place de la Mairie 84300 Taillades,84300.0,2021/01/05,1,0,0,0,0,0,0,5.093132,43.833886,COMMUNE_0000000009758966,Taillades,84131,84,93,200040442,CA,CA Luberon Monts de Vaucluse,Vaucluse,Provence-Alpes-Côte d'Azur +383,181,SAINT PAUL DE VENCE,Commune,Collectivité territoriale,Place de la Mairie 06570 Saint-Paul-de-Vence,6570.0,2021/01/05,1,0,0,0,0,0,0,7.122282,43.697004,COMMUNE_0000000009759146,Saint-Paul-de-Vence,06128,06,93,240600585,CA,CA de Sophia Antipolis,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +384,41,Association HISA,Association de protection de l'environnement,Association ou fédération,65 Rue Saint-jean 33800 Bordeaux,33800.0,2021/01/04,1,0,0,0,0,0,0,-0.568361,44.825575,COMMUNE_0000000009755644,Bordeaux,33063,33,75,243300316,ME,Bordeaux Métropole,Gironde,Nouvelle-Aquitaine +385,116,Bleu gorgone,Association de protection de l'environnement,Association ou fédération,Rue Sainte-claire 06300 Nice,6300.0,2020/12/28,1,1,1,40,2,0,0,7.278374,43.698151,COMMUNE_0000000009759142,Nice,06088,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +386,92,Valbonne,Commune,Collectivité territoriale,Place de l'Hotel de Ville 06560 Valbonne,6560.0,2020/12/25,1,0,1,3,0,0,0,7.007853,43.640824,COMMUNE_0000000009759616,Valbonne,06152,06,93,240600585,CA,CA de Sophia Antipolis,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +387,16,Club environnement lycée Montgrand,Lycée,Établissement scolaire ou d'enseignement supérieur,13 Rue Montgrand 13006 Marseille,13006.0,2020/12/05,1,1,1,0,1,0,0,5.378343,43.291568,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +388,238,Mordus Spearfishing,Association sportive,Association ou fédération,71 Chemin de la Paveigne 83200 Toulon,83200.0,2020/11/29,1,3,1,5,15,0,0,5.896601,43.145369,COMMUNE_0000000009761867,Toulon,83137,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +389,90,Ekkopol,,Organisation socioprofessionnelle,Route de Combau 83670 Pontevès,83670.0,2020/11/29,1,0,0,0,0,0,0,6.052787,43.572484,COMMUNE_0000000009759876,Pontevès,83095,83,93,200040202,CC,CC Provence Verdon,Var,Provence-Alpes-Côte d'Azur +390,11,ASSE SUBAQUATIQUE,Club affilié FFESSM,Association ou fédération,23 Avenue de Corinthe 13006 Marseille,13006.0,2020/11/29,1,0,1,1,0,0,0,5.390363,43.283436,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +391,108,Environat,Association Education environnement et Développement durable,Association ou fédération,"Rue des Vignes, Rouffiac, Saintes, Charente-Maritime, Nouvelle-Aquitaine, France métropolitaine, 17800, France",17800.0,2020/10/23,1,5,1,6,6,20,5,-0.486663,45.680596,COMMUNE_0000000009752426,Rouffiac,17304,17,75,200036473,CA,CA de Saintes,Charente-Maritime,Nouvelle-Aquitaine +392,22,S'PECE,Association de protection de l'environnement,Association ou fédération,1877 Les Espreveires 83136 Méounes-lès-Montrieux,83136.0,2020/10/17,1,0,1,0,2,0,0,5.967372,43.267579,COMMUNE_0000000009761146,Méounes-lès-Montrieux,83077,83,93,200068104,CA,CA de la Provence Verte,Var,Provence-Alpes-Côte d'Azur +393,157,Un Océan de Vie Lyon,Association Education environnement et Développement durable,Association ou fédération,33 Rue Clément Michut 69100 Villeurbanne,69100.0,2020/10/15,1,0,1,0,1,0,0,4.881435,45.766363,COMMUNE_0000000009751760,Villeurbanne,69266,69,84,200046977,METLYON,Métropole de Lyon,Rhône,Auvergne-Rhône-Alpes +394,184,A.I.L GRANS,Association sportive,Association ou fédération,Place de la Grande Fontaine 13450 Grans,13450.0,2020/10/14,1,0,0,0,0,0,0,5.065115,43.609776,COMMUNE_0000000009759886,Grans,13044,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +395,271,Recyclop,Association Education environnement et Développement durable,Association ou fédération,41 Rue Jobin 13003 Marseille,13003.0,2020/10/14,1,0,0,0,0,0,0,5.390367,43.309593,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +396,19,Union Calanques Littoral,Association de protection de l'environnement,Association ou fédération,Traverse des Baudillons 13013 Marseille,13013.0,2020/10/07,1,1,1,1,1,0,0,5.438596,43.327896,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +397,158,Marseille Sport Loisirs Culture,Association sportive,Association ou fédération,Allée Callelongue 13008 Marseille,13008.0,2020/09/24,1,0,0,0,0,0,0,5.390139,43.255086,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +398,101,Société Nautique Corniche,Societé nautique,Association ou fédération,"Traverse de la Fausse Monnaie, Endoume, 7e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13007, France",13007.0,2020/09/23,1,0,0,0,0,0,0,5.353031,43.280958,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +399,186,Association d'Entreprises des Bois de Grasse (EBG),,Organisation socioprofessionnelle,7 Avenue Michel Chevalier 06130 Grasse,6130.0,2020/09/17,1,1,1,0,3,0,0,6.937814,43.626358,COMMUNE_0000000009759383,Grasse,06069,06,93,200039857,CA,CA du Pays de Grasse,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +400,95,Centre socio-culturel de la Gavotte Peyret,Autre,Association ou fédération,La Gavotte Peyret 13240 Septèmes-les-Vallons,13240.0,2020/09/17,1,1,1,2,0,0,0,5.353457,43.38884,COMMUNE_0000000009760615,Septèmes-les-Vallons,13106,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +401,183,On sème pour demain,Association de protection de l'environnement,Association ou fédération,"Le Pradet, Toulon, Var, Provence-Alpes-Côte d'Azur, France métropolitaine, 83220, France",83220.0,2020/09/15,1,6,1,5,28,0,0,6.03091,43.106308,COMMUNE_0000000009761865,Le Pradet,83098,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +402,165,Association le Grand Bleu,Association sportive,Association ou fédération,Plage de l'Estaque 13016 Marseille,13016.0,2020/09/14,1,0,0,0,0,0,0,5.313404,43.360846,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +403,193,Groupe nautique de Callelongue,Societé nautique,Association ou fédération,Avenue des Pebrons 13008 Marseille,13008.0,2020/09/12,1,0,1,1,0,0,0,5.354681,43.213185,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +404,39,Association des Cabanonniers de la Calanque de Marseilleveyre,Autre,Association ou fédération,24 Rue Roux de Corse 13013 Marseille,13013.0,2020/09/11,1,1,1,1,2,0,0,5.410528,43.314997,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +405,175,dolphinclubofmaldorme snc,Association sportive,Association ou fédération,Anse de Maldorme 13007 Marseille,13007.0,2020/09/10,1,0,0,0,0,0,0,5.350471,43.280585,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +406,246,Dune Marseille,,Organisation socioprofessionnelle,Avenue de la Pointe Rouge 13008 Marseille,13008.0,2020/09/10,1,0,0,0,0,0,0,5.374647,43.247663,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +407,300,CNPRS (Club Nautique Provençal),Societé nautique,Association ou fédération,Avenue de la Pointe Rouge 13008 Marseille,13008.0,2020/09/10,1,0,1,2,0,0,0,5.374647,43.247663,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +408,142,Lycée Thiers,Lycée,Établissement scolaire ou d'enseignement supérieur,5 Place du Lycee-albert Cohen 13001 Marseille,13001.0,2020/09/06,1,1,1,0,2,0,0,5.382614,43.296855,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +409,72,Lycée La Tourrache,Université / IUT / BTS,Établissement scolaire ou d'enseignement supérieur,450 Avenue Francois Arago 83130 La Garde,83130.0,2020/08/24,1,0,0,0,0,0,0,6.04303,43.147317,COMMUNE_0000000009761866,La Garde,83062,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +410,172,IADYS,,Organisation socioprofessionnelle,La Plaine du Caire 13830 Roquefort-la-Bédoule,13830.0,2020/08/24,0,0,1,3,0,0,0,5.582291,43.255011,COMMUNE_0000000009761403,Roquefort-la-Bédoule,13085,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +411,173,Sentinelles de Rivières,Autre,Association ou fédération,190 Rue Fra Angelico 34000 Montpellier,34000.0,2020/08/04,1,0,1,0,1,0,0,3.90294,43.607052,COMMUNE_0000000009759901,Montpellier,34172,34,76,243400017,ME,Montpellier Méditerranée Métropole,Hérault,Occitanie +412,204,Mairie du Rayol-Canadel-Sur-Mer,Commune,Collectivité territoriale,Route Departementale 559 83820 Rayol-Canadel-sur-Mer,83820.0,2020/07/20,1,0,0,0,0,0,0,6.45433,43.15655,COMMUNE_0000000009761630,Rayol-Canadel-sur-Mer,83152,83,93,200036077,CC,CC du Golfe de Saint-Tropez,Var,Provence-Alpes-Côte d'Azur +413,21,Clean my Calanques,Association Education environnement et Développement durable,Association ou fédération,Boulevard Baptistin Cayol 13008 Marseille,13008.0,2020/07/09,1,0,1,1,7,0,0,5.384005,43.249465,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +414,194,STE NAUTIQUE PORT CROISETTES,Societé nautique,Association ou fédération,Route de la Marronnaise Ou Mongeret Che 13008 Marseille,13008.0,2020/07/07,1,0,0,0,0,0,0,5.34373,43.21631,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +415,234,Association Marseillaise d'Apnée,Club affilié FFESSM,Association ou fédération,2 Rue de la Gorge 13007 Marseille,13007.0,2020/07/06,1,3,1,0,5,0,0,5.364508,43.286839,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +416,299,MEGAPTERA,Association de protection de l'environnement,Association ou fédération,23 Rue Alexandre Dumas 75011 Paris,75011.0,2020/07/02,1,0,0,0,0,0,0,2.391076,48.8533,COMMUNE_0000000009736048,Paris,75056,75,11,200054781,ME,Métropole du Grand Paris,Paris,Île-de-France +417,224,Lycée François Raynouard,Lycée,Établissement scolaire ou d'enseignement supérieur,Av des Martyrs de la Resistance 83170 Brignoles,83170.0,2020/06/25,1,0,0,0,0,0,0,6.065807,43.411322,COMMUNE_0000000009760601,Brignoles,83023,83,93,200068104,CA,CA de la Provence Verte,Var,Provence-Alpes-Côte d'Azur +418,106,Ecole élémentaire Paul Bert,École primaire,Établissement scolaire ou d'enseignement supérieur,Traverse de la Marine 13600 La Ciotat,13600.0,2020/06/25,1,0,0,0,0,0,0,5.604866,43.181562,COMMUNE_0000000009761644,La Ciotat,13028,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +419,28,La Ruche Solidaire,Autre,Association ou fédération,Rue Ducouédic 29200 Brest,29200.0,2020/06/24,0,0,0,0,0,0,0,-4.492347,48.385606,COMMUNE_0000000009738584,Brest,29019,29,53,242900314,ME,Brest Métropole,Finistère,Bretagne +420,74,LYCEE MISTRAL,Lycée,Établissement scolaire ou d'enseignement supérieur,Boulevard de Sainte Anne 13008 Marseille,13008.0,2020/06/17,1,0,0,0,0,0,0,5.393131,43.257532,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +421,129,Delta France Associations,Autre,Association ou fédération,2 Rue Gustave Ricard 13006 Marseille,13006.0,2020/06/12,0,0,0,0,0,0,0,5.377061,43.29107,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +422,182,Opération Mer Propre,Association de protection de l'environnement,Association ou fédération,534 Route de Nice 06600 Antibes,6600.0,2020/06/10,1,0,1,14,0,0,0,7.123231,43.604687,COMMUNE_0000000009759614,Antibes,06004,06,93,240600585,CA,CA de Sophia Antipolis,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +423,138,SIVED NG,,Services de l'état et établissements publics,174 Route du Val 83170 Brignoles,83170.0,2020/06/08,1,0,0,0,0,0,0,6.069464,43.416025,COMMUNE_0000000009760601,Brignoles,83023,83,93,200068104,CA,CA de la Provence Verte,Var,Provence-Alpes-Côte d'Azur +424,97,Subsea Tech,,Organisation socioprofessionnelle,Plage de l'Estaque 13016 Marseille,13016.0,2020/05/30,1,0,0,0,0,0,0,5.313404,43.360846,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +425,171,CDMM [Centre de Découverte Mer et Montagne),Association Education environnement et Développement durable,Association ou fédération,50 Boulevard Franck Pilatte 06300 Nice,6300.0,2020/05/26,0,0,1,2,0,0,0,7.290688,43.692424,COMMUNE_0000000009759142,Nice,06088,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +426,27,Compagnons SJBS,Autre,Association ou fédération,70 Rue Falguière 75015 Paris,75015.0,2020/05/15,1,0,0,0,0,0,0,2.312836,48.840524,COMMUNE_0000000009736048,Paris,75056,75,11,200054781,ME,Métropole du Grand Paris,Paris,Île-de-France +427,221,RIEM-Réseau Initiatives des Eco-explorateurs de la Mer,Association de protection de l'environnement,Association ou fédération,31 Rue Guillaume le Bartz 56000 Vannes,56000.0,2020/05/13,1,0,1,1,0,0,0,-2.779727,47.646311,COMMUNE_0000000009743709,Vannes,56260,56,53,200067932,CA,CA Golfe du Morbihan - Vannes Agglomération,Morbihan,Bretagne +428,283,Projet Azur,Association Education environnement et Développement durable,Association ou fédération,"Impasse Pignotte, Magnanen - Teinturiers, Quartier Nord Rocade, Avignon, Vaucluse, Provence-Alpes-Côte d'Azur, France métropolitaine, 84000, France",84000.0,2020/05/12,1,0,1,3,51,1,0,4.811869,43.948424,COMMUNE_0000000009758734,Avignon,84007,84,93,248400251,CA,CA du Grand Avignon (COGA),Vaucluse,Provence-Alpes-Côte d'Azur +429,257,MediSea,Association de protection de l'environnement,Association ou fédération,114 Avenue de la Californie 06200 Nice,6200.0,2020/04/28,1,0,1,1,0,0,0,7.23676,43.686729,COMMUNE_0000000009759142,Nice,06088,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +430,149,Odysseus 3.1,Association de protection de l'environnement,Association ou fédération,31 Grande Rue de la Guillotière 69007 Lyon,69007.0,2020/04/13,1,0,0,0,0,0,0,4.844062,45.753987,COMMUNE_0000000009752008,Lyon,69123,69,84,200046977,METLYON,Métropole de Lyon,Rhône,Auvergne-Rhône-Alpes +431,263,Communauté de communes du Sisteronais-Buëch,Communauté de communes,Collectivité territoriale,Route d'Orpierre (Lagrand) 05300 Garde-Colombe,5300.0,2020/03/20,1,0,0,0,0,0,0,5.768375,44.343503,COMMUNE_0000000009757225,Garde-Colombe,05053,05,93,200068765,CC,CC du Sisteronais-Buëch,Hautes-Alpes,Provence-Alpes-Côte d'Azur +432,222,Lycée LP Brochier,Lycée,Établissement scolaire ou d'enseignement supérieur,9 Boulevard Mireille Lauze 13010 Marseille,13010.0,2020/03/04,1,0,0,0,0,0,0,5.403797,43.282242,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +433,53,Planete Biodiv,Association de protection de l'environnement,Association ou fédération,Voie la Canebiere 13001 Marseille,13001.0,2020/02/28,0,0,0,0,0,0,0,5.379726,43.297268,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +434,288,PLONGEE PASSION CARRY,,Organisation socioprofessionnelle,12 Bd du Lieutenant Jean Valensi 13620 Carry-le-Rouet,13620.0,2020/02/26,1,0,0,0,0,0,0,5.153878,43.331863,COMMUNE_0000000009761164,Carry-le-Rouet,13021,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +435,192,Université de Toulon,Université / IUT / BTS,Établissement scolaire ou d'enseignement supérieur,Avenue de l'Université 83160 La Valette-du-Var,83160.0,2020/02/17,1,0,0,0,0,0,0,6.006115,43.136872,COMMUNE_0000000009761636,La Valette-du-Var,83144,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +436,114,CSN,Association sportive,Association ou fédération,Quai Jean Bouteille (plage) 11100 Narbonne,11100.0,2020/02/11,1,0,0,0,0,0,0,3.180262,43.170242,COMMUNE_0000000009761873,Narbonne,11262,11,76,241100593,CA,CA Le Grand Narbonne,Aude,Occitanie +437,68,CAP AU LARGE,Societé nautique,Association ou fédération,11 Chemin des Bouilles 34660 Cournonterral,34660.0,2020/02/11,1,0,0,0,0,0,0,3.708905,43.560946,COMMUNE_0000000009760137,Cournonterral,34088,34,76,243400017,ME,Montpellier Méditerranée Métropole,Hérault,Occitanie +438,117,LPO Occitanie,Association de protection de l'environnement,Association ou fédération,Lotissement les Cigales 34560 Villeveyrac,34560.0,2020/02/11,0,0,1,1,2,0,0,3.608292,43.50083,COMMUNE_0000000009760362,Villeveyrac,34341,34,76,200066355,CA,CA Sète Agglopôle Méditerranée,Hérault,Occitanie +439,230,Planète Mer,Association de protection de l'environnement,Association ou fédération,137 Avenue Clot Bey 13008 Marseille,13008.0,2020/02/11,0,1,1,10,0,0,0,5.38279,43.256768,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +440,213,SauveTaMerveille,Université / IUT / BTS,Établissement scolaire ou d'enseignement supérieur,413 Avenue Gaston Berger 13090 Aix-en-Provence,13090.0,2020/02/11,1,0,1,1,0,0,0,5.451572,43.513448,COMMUNE_0000000009760349,Aix-en-Provence,13001,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +441,150,Institut Marin du Seaquarium,Association Education environnement et Développement durable,Association ou fédération,Avenue du Palais de la Mer 30240 Le Grau-du-Roi,30240.0,2020/02/03,1,0,1,0,24,0,0,4.144951,43.527472,COMMUNE_0000000009760353,Le Grau-du-Roi,30133,30,76,243000650,CC,CC Terre de Camargue,Gard,Occitanie +442,247,CSP Plongée Pertuis,Club affilié FFESSM,Association ou fédération,128 Rue du Stade 84120 Pertuis,84120.0,2020/02/02,1,0,1,0,1,0,0,5.505389,43.691171,COMMUNE_0000000009759641,Pertuis,84089,84,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Vaucluse,Provence-Alpes-Côte d'Azur +443,261,LPO Langevin,Lycée,Établissement scolaire ou d'enseignement supérieur,Boulevard de l'Europe 83500 La Seyne-sur-Mer,83500.0,2020/01/31,1,0,0,0,0,0,0,5.855307,43.11498,COMMUNE_0000000009761869,La Seyne-sur-Mer,83126,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +444,63,Mare Vivu,Association de protection de l'environnement,Association ou fédération,Pino,20228.0,2020/01/16,1,7,1,0,0,0,53,8.852079,41.988894,COMMUNE_0000000009763506,Peri,2A209,2A,94,242010056,CA,CA du Pays Ajaccien,Corse-du-Sud,Corse +445,227,Festival de plongée du Var Environnement,Association de protection de l'environnement,Association ou fédération,Boulevard du Port 83230 Bormes-les-Mimosas,83230.0,2020/01/16,1,0,1,5,9,0,0,6.36319,43.121752,COMMUNE_0000000009761632,Bormes-les-Mimosas,83019,83,93,200027100,CC,CC Méditerranée Porte des Maures,Var,Provence-Alpes-Côte d'Azur +446,62,Parc naturel régional de Camargue,Parc régional,Gestionnaire d'aire protégée,Route des Saintes Maries de la Mer 13200 Arles,13200.0,2020/01/16,0,0,1,0,0,0,1,4.574889,43.649686,COMMUNE_0000000009760125,Arles,13004,13,93,241300417,CA,CA d'Arles-Crau-Camargue-Montagnette,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +447,178,NaturDive,Association de protection de l'environnement,Association ou fédération,"1, Avenue des Broussailles, Hautes Vallergues, Cannes, Grasse, Alpes-Maritimes, Provence-Alpes-Côte d'Azur, France métropolitaine, 06110, France",6110.0,2020/01/16,0,0,1,0,0,0,6,7.010659,43.559545,COMMUNE_0000000009759860,Cannes,06029,06,93,200039915,CA,CA Cannes Pays de Lérins,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +448,42,Maison de l'estuaire,Réserve naturelle,Gestionnaire d'aire protégée,20 Rue Jean Caurret 76600 Le Havre,76600.0,2020/01/13,1,0,1,0,0,0,1,0.135017,49.485853,COMMUNE_0000000009731163,Le Havre,76351,76,28,200084952,CU,CU Le Havre Seine Métropole,Seine-Maritime,Normandie +449,103,Plastic Odyssey Expedition,Association de protection de l'environnement,Association ou fédération,4 Place Sadi Carnot 13002 Marseille,13002.0,2020/01/08,1,0,0,0,0,0,0,5.371935,43.299552,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +450,52,"Laboratoire Eau, Environnement et Systèmes Urbains (Leesu)",Université / IUT / BTS,Établissement scolaire ou d'enseignement supérieur,61 Avenue du Général de Gaulle 94000 Créteil,94010.0,2020/01/08,0,0,0,0,0,0,0,2.442388,48.788799,COMMUNE_0000000009737006,Créteil,94028,94,11,200054781,ME,Métropole du Grand Paris,Val-de-Marne,Île-de-France +451,290,Parc national de Port-Cros,Parc national,Gestionnaire d'aire protégée,181 Allee du Castel Sainte Claire 83400 Hyères,83400.0,2020/01/03,1,0,0,0,0,0,0,6.12502,43.120844,COMMUNE_0000000009761863,Hyères,83069,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +452,229,TÉO,Autre,Association ou fédération,Rue Alfred Kastler 17000 La Rochelle,17000.0,2019/12/24,1,0,0,0,0,0,0,-1.158847,46.139386,COMMUNE_0000000009750154,La Rochelle,17300,17,75,241700434,CA,CA de La Rochelle,Charente-Maritime,Nouvelle-Aquitaine +453,46,Nature Libre,Association de protection de l'environnement,Association ou fédération,19 Rue de Wicardenne 62200 Boulogne-sur-Mer,62200.0,2019/12/23,1,0,0,0,0,0,0,1.620302,50.728357,COMMUNE_0000000009727231,Boulogne-sur-Mer,62160,62,32,246200729,CA,CA du Boulonnais,Pas-de-Calais,Hauts-de-France +454,275,Les Mains Dans Le Sable,Association de protection de l'environnement,Association ou fédération,"Lorient, Morbihan, Bretagne, France métropolitaine, 56100, France",56100.0,2019/12/23,1,0,1,0,15,0,0,-3.38644,47.75964,COMMUNE_0000000009743037,Lorient,56121,56,53,200042174,CA,CA Lorient Agglomération,Morbihan,Bretagne +455,220,PolyGreen,Association de protection de l'environnement,Association ou fédération,Avenue de Luminy 13009 Marseille,13009.0,2019/12/19,1,0,0,0,0,0,0,5.435843,43.232668,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +456,168,Parc naturel régional de la Sainte-Baume,Parc régional,Gestionnaire d'aire protégée,Route de Nans 83640 Plan-d'Aups-Sainte-Baume,83640.0,2019/12/17,0,0,0,0,0,0,0,5.756996,43.335551,COMMUNE_0000000009760867,Plan-d'Aups-Sainte-Baume,83093,83,93,200068104,CA,CA de la Provence Verte,Var,Provence-Alpes-Côte d'Azur +457,132,Zéro Déchet Nice,Association Education environnement et Développement durable,Association ou fédération,Place Masséna 06000 Nice,6000.0,2019/12/16,0,0,0,0,0,0,0,7.270284,43.697571,COMMUNE_0000000009759142,Nice,06088,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +458,43,IRFSS CROIX-ROUGE FRANCAISE,Autre,Association ou fédération,32 Cours des Arts et Metiers 13100 Aix-en-Provence,13100.0,2019/12/13,0,0,0,0,0,0,0,5.456922,43.531201,COMMUNE_0000000009760349,Aix-en-Provence,13001,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +459,25,DLV2030 (Durance Luberon Verdon),Association de protection de l'environnement,Association ou fédération,234 Chemin des Griottes 04100 Manosque,4100.0,2019/12/13,0,0,0,0,0,0,0,5.771268,43.822358,COMMUNE_0000000009758948,Manosque,04112,04,93,200034700,CA,CA Durance-Lubéron-Verdon Agglomération,Alpes-de-Haute-Provence,Provence-Alpes-Côte d'Azur +460,113,LYCEE PROFESSIONNEL AUGUSTE ESCOFFIER,Lycée,Établissement scolaire ou d'enseignement supérieur,Montee du Brecq 06800 Cagnes-sur-Mer,6800.0,2019/12/12,1,0,0,0,0,0,0,7.146583,43.670149,COMMUNE_0000000009759377,Cagnes-sur-Mer,06027,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +461,71,GAIA DIMENSION,Association Education environnement et Développement durable,Association ou fédération,Rue Saint-mayeul 04210 Valensole,4210.0,2019/11/21,1,0,1,1,2,0,0,5.983756,43.837762,COMMUNE_0000000009758947,Valensole,04230,04,93,200034700,CA,CA Durance-Lubéron-Verdon Agglomération,Alpes-de-Haute-Provence,Provence-Alpes-Côte d'Azur +462,287,Zéro Déchet Toulon,Association de protection de l'environnement,Association ou fédération,Place de la Liberte 83000 Toulon,83000.0,2019/11/13,0,0,0,0,0,0,0,5.930755,43.126396,COMMUNE_0000000009761867,Toulon,83137,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +463,115,Planète Sciences Méditerranée,Association de protection de l'environnement,Association ou fédération,6 Rue Louis Antelme 83500 La Seyne-sur-Mer,83500.0,2019/11/08,0,2,1,4,7,5,0,5.884833,43.099519,COMMUNE_0000000009761869,La Seyne-sur-Mer,83126,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +464,259,Mer Veille,Association de protection de l'environnement,Association ou fédération,"Rue du Docteur Frédéric Granier, Bompard, 7e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13007, France",13007.0,2019/11/07,1,1,1,0,44,0,1,5.358163,43.280926,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +465,80,Sea Optimism,Association de protection de l'environnement,Association ou fédération,4 Rue Lafon 13006 Marseille,13006.0,2019/10/30,1,0,1,1,0,0,0,5.381139,43.29125,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +466,237,La chouette liberté,Association Education environnement et Développement durable,Association ou fédération,6 Residence Sainte-lucie 83170 Brignoles,83170.0,2019/10/30,1,0,0,0,0,0,0,6.06678,43.413232,COMMUNE_0000000009760601,Brignoles,83023,83,93,200068104,CA,CA de la Provence Verte,Var,Provence-Alpes-Côte d'Azur +467,23,Challenge zéro bouteille plastique,Autre,Association ou fédération,9 bis Rue du Prolétariat 13150 Tarascon,13150.0,2019/10/19,0,0,0,0,0,0,0,4.662406,43.806941,COMMUNE_0000000009759190,Tarascon,13108,13,93,241300417,CA,CA d'Arles-Crau-Camargue-Montagnette,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +468,244,Le Jardin de Noailles,Association de protection de l'environnement,Association ou fédération,6 Rue Rodolphe Pollak 13001 Marseille,13001.0,2019/10/19,0,0,0,0,0,0,0,5.379868,43.295504,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +469,4,FSN13 (Fédération des Sociétés Nautiques),Societé nautique,Association ou fédération,233 Corniche President John F Kennedy 13007 Marseille,13007.0,2019/10/18,1,0,0,0,0,0,0,5.351231,43.281861,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +470,281,C TO SEA,,Organisation socioprofessionnelle,18 Rue du Moulin des Barres 44400 Rezé,44400.0,2019/10/16,0,0,0,0,0,0,0,-1.533886,47.170164,COMMUNE_0000000009746380,Rezé,44143,44,52,244400404,ME,Nantes Métropole,Loire-Atlantique,Pays de la Loire +471,151,Agence Régionale pour l'Environnement et la Bodiversité,Région,Collectivité territoriale,22 Rue Sainte Barbe 13001 Marseille,13002.0,2019/10/16,0,0,0,0,0,0,0,5.374564,43.300295,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +472,69,Communes de Roquevaire,Commune,Collectivité territoriale,368 Rue des Platrieres 13360 Roquevaire,13360.0,2019/10/16,0,0,0,0,0,0,0,5.610287,43.348303,COMMUNE_0000000009760869,Roquevaire,13086,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +473,82,Ecole Parette-Mazenode,École primaire,Établissement scolaire ou d'enseignement supérieur,427 Boulevard Mireille Lauze 13011 Marseille,13011.0,2019/10/08,1,0,0,0,0,0,0,5.430109,43.286446,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +474,273,Collège les Chatreux,Collège,Établissement scolaire ou d'enseignement supérieur,56 Avenue des Chartreux 13004 Marseille,13004.0,2019/10/07,1,0,0,0,0,0,0,5.398762,43.304536,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +475,270,Louis Aragon,Collège,Établissement scolaire ou d'enseignement supérieur,Avenue Elsa Triolet 13360 Roquevaire,13360.0,2019/10/07,1,0,1,2,0,0,0,5.602122,43.345286,COMMUNE_0000000009760869,Roquevaire,13086,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +476,228,Lycée technologique Frédéric Joliot-Curie,Lycée,Établissement scolaire ou d'enseignement supérieur,Avenue des Goums 13400 Aubagne,13400.0,2019/10/03,1,0,1,0,1,0,0,5.561947,43.291686,COMMUNE_0000000009761151,Aubagne,13005,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +477,254,Lycée des Calanques Marseille,Lycée,Établissement scolaire ou d'enseignement supérieur,Traverse Parangon 13008 Marseille,13008.0,2019/10/02,0,0,0,0,0,0,0,5.379556,43.2427,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +478,233,Ecole Saint Barnabé,École primaire,Établissement scolaire ou d'enseignement supérieur,27 Rue du Docteur Cauvin 13012 Marseille,13012.0,2019/10/02,1,0,0,0,0,0,0,5.417411,43.302425,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +479,256,AIAES Groupe Addap 13,Autre,Association ou fédération,15 Chemin des Jonquilles 13013 Marseille,13013.0,2019/10/02,1,0,0,0,0,0,0,5.42603,43.326751,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +480,112,Collège Nathalie Sarraute,Collège,Établissement scolaire ou d'enseignement supérieur,Route d'Eoures 13400 Aubagne,13400.0,2019/10/01,1,0,0,0,0,0,0,5.551625,43.306634,COMMUNE_0000000009761151,Aubagne,13005,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +481,199,FAIL 13,Association Education environnement et Développement durable,Association ou fédération,192 Rue Horace Bertin 13005 Marseille,13005.0,2019/10/01,0,0,0,0,0,0,0,5.397218,43.296069,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +482,5,Lei barquieu,École primaire,Établissement scolaire ou d'enseignement supérieur,Route de Lascours 13360 Roquevaire,13360.0,2019/10/01,1,0,0,0,0,0,0,5.593787,43.345208,COMMUNE_0000000009760869,Roquevaire,13086,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +483,279,HUNAMAR,Association de protection de l'environnement,Association ou fédération,2 Avenue du Vingt Quatre Avril 1915 13012 Marseille,13012.0,2019/10/01,1,0,1,1,0,0,0,5.434723,43.308157,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +484,26,ARFPPMA PACA,Association de protection de l'environnement,Association ou fédération,8 Zone d'Activite Bompertuis 13120 Gardanne,13120.0,2019/10/01,1,0,0,0,0,0,0,5.452703,43.45464,COMMUNE_0000000009760614,Gardanne,13041,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +485,44,Lycée Professionel Briand Aristide,Lycée,Établissement scolaire ou d'enseignement supérieur,Cours Aristide Briand 84100 Orange,84100.0,2019/09/30,0,0,0,0,0,0,0,4.805323,44.136535,COMMUNE_0000000009757941,Orange,84087,84,93,248400236,CC,CC Pays d'Orange en Provence,Vaucluse,Provence-Alpes-Côte d'Azur +486,153,Collège Seize Fontaines,Collège,Établissement scolaire ou d'enseignement supérieur,N 560 83640 Saint-Zacharie,83640.0,2019/09/28,1,0,0,0,0,0,0,5.694196,43.377627,COMMUNE_0000000009760868,Saint-Zacharie,83120,83,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Var,Provence-Alpes-Côte d'Azur +487,155,Mer-Nature,Association Education environnement et Développement durable,Association ou fédération,56 Rue Daillon 83000 Toulon,83000.0,2019/09/25,0,0,1,1,0,0,0,5.937669,43.109095,COMMUNE_0000000009761867,Toulon,83137,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +488,70,Déchet Zéro & co.,Association de protection de l'environnement,Association ou fédération,3 bis Place Evariste Gras 13600 La Ciotat,13600.0,2019/09/21,1,0,1,2,0,0,0,5.604384,43.175517,COMMUNE_0000000009761644,La Ciotat,13028,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +489,215,Association Les Amis de l'Huveaune,Association de protection de l'environnement,Association ou fédération,9 Montée des Gaulois 13011 Marseille,13011.0,2019/09/21,1,0,0,0,0,0,0,5.465331,43.283148,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +490,66,Les Cousardes,,Organisation socioprofessionnelle,18 Place de la Libération 83143 Le Val,83143.0,2019/09/19,0,0,0,0,0,0,0,6.072274,43.439217,COMMUNE_0000000009760340,Le Val,83143,83,93,200068104,CA,CA de la Provence Verte,Var,Provence-Alpes-Côte d'Azur +491,302,Les Jardins des Savoirs de R.I.E.Z.,Association Education environnement et Développement durable,Association ou fédération,4 Avenue de Verdun 04500 Riez,4500.0,2019/09/18,1,0,1,0,4,0,0,6.090428,43.818221,COMMUNE_0000000009758945,Riez,04166,04,93,200034700,CA,CA Durance-Lubéron-Verdon Agglomération,Alpes-de-Haute-Provence,Provence-Alpes-Côte d'Azur +492,14,Explore & Preserve,Association de protection de l'environnement,Association ou fédération,Route des Vieux Salins 83400 Hyères,83400.0,2019/09/18,1,0,1,5,33,10,1,6.194555,43.115166,COMMUNE_0000000009761863,Hyères,83069,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +493,141,Planète Zéro Déchet,Association Education environnement et Développement durable,Association ou fédération,2 Rue Jacques Duclos 13740 Le Rove,13740.0,2019/09/17,0,0,1,0,3,0,0,5.251001,43.369791,COMMUNE_0000000009760879,Le Rove,13088,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +494,255,CPIE bassin de thau,Association de protection de l'environnement,Association ou fédération,60 Boulevard Victor Hugo 34110 Frontignan,34110.0,2019/09/12,0,1,1,1,5,0,16,3.753862,43.446264,COMMUNE_0000000009760628,Frontignan,34108,34,76,200066355,CA,CA Sète Agglopôle Méditerranée,Hérault,Occitanie +495,217,FRANCE NATURE ENVIRONNEMENT VAUCLUSE,Association de protection de l'environnement,Association ou fédération,10 Boulevard du Nord 84200 Carpentras,84200.0,2019/09/11,1,0,1,7,1,0,0,5.047223,44.05668,COMMUNE_0000000009758290,Carpentras,84031,84,93,248400053,CA,CA Ventoux-Comtat-Venaissin (COVE),Vaucluse,Provence-Alpes-Côte d'Azur +496,119,Cékikifétou,,Eco-artiste,10 Rue Docteur Lachapelle 34080 Montpellier,34080.0,2019/09/10,0,0,0,0,0,0,0,3.829762,43.61414,COMMUNE_0000000009759901,Montpellier,34172,34,76,243400017,ME,Montpellier Méditerranée Métropole,Hérault,Occitanie +497,284,Les Petites choses - ressources créatives,Association Education environnement et Développement durable,Association ou fédération,14 Rue Leon Honore Labande 84000 Avignon,84000.0,2019/09/09,0,0,0,0,0,0,0,4.798322,43.937283,COMMUNE_0000000009758734,Avignon,84007,84,93,248400251,CA,CA du Grand Avignon (COGA),Vaucluse,Provence-Alpes-Côte d'Azur +498,205,Le Caméléon,Autre,Association ou fédération,32 Grande Rue 83570 Montfort-sur-Argens,83570.0,2019/09/06,1,0,0,0,0,0,0,6.122189,43.47291,COMMUNE_0000000009760339,Montfort-sur-Argens,83083,83,93,200068104,CA,CA de la Provence Verte,Var,Provence-Alpes-Côte d'Azur +499,36,Chercheurs en herbe,Association Education environnement et Développement durable,Association ou fédération,250 Avenue Franklin Roosevelt 83000 Toulon,83081.0,2019/09/04,1,6,1,3,96,3,0,5.938437,43.119558,COMMUNE_0000000009761867,Toulon,83137,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +500,248,Ecoscience Provence,Association de protection de l'environnement,Association ou fédération,724 Avenue des Berges 83170 Brignoles,83170.0,2019/08/29,0,0,1,0,1,0,0,6.054446,43.40783,COMMUNE_0000000009760601,Brignoles,83023,83,93,200068104,CA,CA de la Provence Verte,Var,Provence-Alpes-Côte d'Azur +501,203,CIETM,Association Education environnement et Développement durable,Association ou fédération,17 Rue Ernest Reyer 83400 Hyères,83400.0,2019/08/23,1,0,1,7,12,0,0,6.123736,43.115905,COMMUNE_0000000009761863,Hyères,83069,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +502,147,LABELBLEU,Association de protection de l'environnement,Association ou fédération,16 bis Avenue d'Assas 34000 Montpellier,34967.0,2019/08/06,1,0,1,9,12,0,0,3.866642,43.613628,COMMUNE_0000000009759901,Montpellier,34172,34,76,243400017,ME,Montpellier Méditerranée Métropole,Hérault,Occitanie +503,1,Groupement d'Intérêt Scientifique pour les Mammifères Marins de Méditerranée et leur environnement (GIS3M),Association de protection de l'environnement,Association ou fédération,1 Avenue Clément Monnier 13960 Sausset-les-Pins,13960.0,2019/08/02,0,0,0,0,0,0,0,5.111206,43.331094,COMMUNE_0000000009760882,Sausset-les-Pins,13104,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +504,45,École Joseph Martinat,École primaire,Établissement scolaire ou d'enseignement supérieur,20 Route du Pont de Garniere 13360 Roquevaire,13360.0,2019/08/01,1,1,0,0,0,0,0,5.601392,43.349044,COMMUNE_0000000009760869,Roquevaire,13086,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +505,99,Watch The Sea,Association de protection de l'environnement,Association ou fédération,61 Rue Breteuil 13006 Marseille,13006.0,2019/07/27,1,0,1,6,14,3,3,5.376137,43.289705,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +506,146,Wings of the Ocean,Association Education environnement et Développement durable,Association ou fédération,"Rue Amelot, Quartier de la Folie-Méricourt, Paris 11e Arrondissement, Paris, Île-de-France, France métropolitaine, 75011, France",75011.0,2019/07/26,1,5,1,15,145,233,4,2.371232,48.86485,COMMUNE_0000000009736048,Paris,75056,75,11,200054781,ME,Métropole du Grand Paris,Paris,Île-de-France +507,131,Cercle des Nageurs de Marseille,Association sportive,Association ou fédération,Boulevard Charles Livon 13007 Marseille 7e Arrondissement,13007.0,2019/07/26,0,0,0,0,0,0,0,5.357774,43.291931,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +508,38,SailGP,,Organisation socioprofessionnelle,10 Rue de Penthievre 75008 Paris 8e Arrondissement,75008.0,2019/07/24,1,0,0,0,0,0,0,2.316605,48.873057,COMMUNE_0000000009736048,Paris,75056,75,11,200054781,ME,Métropole du Grand Paris,Paris,Île-de-France +509,163,MOUNTAIN WILDERNESS FRANCE,Association de protection de l'environnement,Association ou fédération,5 Place Bir Hakeim 38000 Grenoble,38000.0,2019/07/16,0,0,1,0,7,0,0,5.737333,45.188556,COMMUNE_0000000009754185,Grenoble,38185,38,84,200040715,ME,Grenoble-Alpes-Métropole,Isère,Auvergne-Rhône-Alpes +510,188,SCOLAEarth,,Organisation socioprofessionnelle,41 Rue Gervais Bussière 69100 Villeurbanne,69100.0,2019/07/07,1,0,0,0,0,0,0,4.869828,45.773279,COMMUNE_0000000009751760,Villeurbanne,69266,69,84,200046977,METLYON,Métropole de Lyon,Rhône,Auvergne-Rhône-Alpes +511,242,Belle Île en Ville,Association de protection de l'environnement,Association ou fédération,Quai d'Honneur Île Ratonneau 13007 Marseille,13007.0,2019/07/02,1,0,1,1,0,0,0,5.368565,43.292701,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +512,88,1 déchet par jour,Association de protection de l'environnement,Association ou fédération,93 Voie la Canebiere 13001 Marseille,13001.0,2019/06/26,1,0,1,0,39,1,0,5.381708,43.298128,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +513,296,École primaire publique Hozier,École primaire,Établissement scolaire ou d'enseignement supérieur,2 Rue d'Hozier 13002 Marseille,13002.0,2019/06/14,0,0,0,0,0,0,0,5.370018,43.30445,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +514,214,Métropole Aix Marseille Provence,Communauté de communes,Collectivité territoriale,12 Rue Henri Barrelet 13700 Marignane,13700.0,2019/06/14,1,0,0,0,0,0,0,5.211168,43.417166,COMMUNE_0000000009760621,Marignane,13054,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +515,251,EPAGE HuCA,,Services de l'état et établissements publics,932 Avenue de la Fleuride 13400 Aubagne,13400.0,2019/06/12,1,0,1,31,0,0,0,5.596328,43.283857,COMMUNE_0000000009761151,Aubagne,13005,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +516,174,Marseille Vert,Association de protection de l'environnement,Association ou fédération,15 Impasse Figueroa 13008 Marseille,13008.0,2019/06/07,0,0,0,0,0,0,0,5.393325,43.277292,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +517,137,Eco Plongée La Londe,Association sportive,Association ou fédération,Place Carré du Port 83250 La Londe-les-Maures,83250.0,2019/06/07,1,0,0,0,0,0,0,6.246582,43.115998,COMMUNE_0000000009761633,La Londe-les-Maures,83071,83,93,200027100,CC,CC Méditerranée Porte des Maures,Var,Provence-Alpes-Côte d'Azur +518,239,collège Frédéric Mistral,Collège,Établissement scolaire ou d'enseignement supérieur,59 Avenue Yvonne Vittone 06200 Nice,6200.0,2019/06/07,0,0,0,0,0,0,0,7.212765,43.67327,COMMUNE_0000000009759142,Nice,06088,06,93,200030195,ME,Métropole Nice Côte d'Azur,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +519,35,Les excursionnistes marseillais,Association sportive,Association ou fédération,16 Rue de la Rotonde 13001 Marseille,13001.0,2019/06/03,1,1,1,0,3,0,0,5.384937,43.301174,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +520,49,association naturiste phocéenne,Association sportive,Association ou fédération,16 Avenue Mistral 13009 Marseille,13009.0,2019/05/24,1,0,0,0,0,0,0,5.402109,43.267934,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +521,3,THEATRE DU CENTAURE,,Eco-artiste,2 Rue Marguerite de Provence 13009 Marseille,13009.0,2019/05/22,1,0,0,0,0,0,0,5.402218,43.237101,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +522,216,Septentrion Environnement,Association de protection de l'environnement,Association ou fédération,89 Traverse Parangon 13008 Marseille,13008.0,2019/05/21,0,0,1,1,1,0,0,5.379556,43.2427,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +523,293,Société Nautique de Couronne Vieille,Association de protection de l'environnement,Association ou fédération,La Couronne Vieille 13500 Martigues,13500.0,2019/05/20,1,0,1,1,1,0,0,5.054192,43.328869,COMMUNE_0000000009760884,Martigues,13056,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +524,24,Vivre Malmousque,Association de protection de l'environnement,Association ou fédération,4 Rue Montplaisir 13007 Marseille,13007.0,2019/05/20,1,0,0,0,0,0,0,5.349117,43.282507,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +525,289,SAINTE VICTOIRE INTERNATIONAL SCHOOL,Lycée,Établissement scolaire ou d'enseignement supérieur,Chemin de Maurel 13710 Fuveau,13710.0,2019/05/20,1,0,0,0,0,0,0,5.597496,43.466244,COMMUNE_0000000009760346,Fuveau,13040,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +526,187,Citeo Région Sud Est,,Organisation socioprofessionnelle,1 Quai de la Joliette 13002 Marseille,13002.0,2019/05/15,0,0,0,0,0,0,0,5.365408,43.304402,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +527,10,Le Naturoscope,Association de protection de l'environnement,Association ou fédération,155 Avenue de Montredon 13008 Marseille,13008.0,2019/05/14,0,1,1,4,24,7,0,5.367024,43.24008,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +528,260,jefaismapart,Association Education environnement et Développement durable,Association ou fédération,Allée des Sources 83350 Ramatuelle,83350.0,2019/05/09,1,0,0,0,0,0,0,6.610131,43.21838,COMMUNE_0000000009761393,Ramatuelle,83101,83,93,200036077,CC,CC du Golfe de Saint-Tropez,Var,Provence-Alpes-Côte d'Azur +529,124,Parc national des Calanques,Parc national,Gestionnaire d'aire protégée,141 Avenue du Prado 13008 Marseille,13008.0,2019/05/07,0,1,1,2,37,1,0,5.386106,43.269435,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +530,86,RACE FOR PURE OCEAN,Association de protection de l'environnement,Association ou fédération,5 Allée de la Vigie 13620 Carry-le-Rouet,13620.0,2019/05/03,1,0,0,0,0,0,0,5.163959,43.328592,COMMUNE_0000000009761164,Carry-le-Rouet,13021,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +531,76,Team MALMOUSQUE,Autre,Association ou fédération,Anse de Malmousque 13007 Marseille,13007.0,2019/04/29,1,0,1,1,0,0,0,5.348499,43.282799,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +532,232,Union Nautique de Port-Miou et des Calanques,Societé nautique,Association ou fédération,6 Rue Docteur Séverin Icard 13260 Cassis,13260.0,2019/04/28,1,0,1,2,0,0,0,5.538819,43.214593,COMMUNE_0000000009761406,Cassis,13022,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +533,241,Association Espace Plaine,Association Education environnement et Développement durable,Association ou fédération,Chemin de la Plaine 05400 La Roche-des-Arnauds,5400.0,2019/04/28,1,0,0,0,0,0,0,5.94855,44.563733,COMMUNE_0000000009756370,La Roche-des-Arnauds,05123,05,93,200067445,CC,CC Buëch-Dévoluy,Hautes-Alpes,Provence-Alpes-Côte d'Azur +534,225,SEA SHEPHERD MARSEILLE,Association de protection de l'environnement,Association ou fédération,22 Rue Boulard 75014 Paris,75014.0,2019/04/27,1,1,1,1,1,0,0,2.328806,48.833858,COMMUNE_0000000009736048,Paris,75056,75,11,200054781,ME,Métropole du Grand Paris,Paris,Île-de-France +535,185,collège Jean de la Fontaine,Collège,Établissement scolaire ou d'enseignement supérieur,Avenue César Baldaccini 13420 Gémenos,13420.0,2019/04/23,1,0,1,3,2,0,0,5.621134,43.288114,COMMUNE_0000000009761150,Gémenos,13042,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +536,226,Boud'mer,Association Education environnement et Développement durable,Association ou fédération,41 Rue Jobin 13003 Marseille,13003.0,2019/04/22,1,0,1,0,1,0,0,5.390367,43.309593,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +537,105,Jour de la Terre,Association de protection de l'environnement,Association ou fédération,18 Rue Guillot 92120 Montrouge,92120.0,2019/04/19,1,0,0,0,0,0,0,2.313032,48.815236,COMMUNE_0000000009736537,Montrouge,92049,92,11,200054781,ME,Métropole du Grand Paris,Hauts-de-Seine,Île-de-France +538,197,Voile Actée,Association de protection de l'environnement,Association ou fédération,Au Plot 38650 Saint-Michel-les-Portes,38650.0,2019/04/18,1,0,0,0,0,0,0,5.593433,44.869725,COMMUNE_0000000009755237,Saint-Michel-les-Portes,38429,38,84,200030658,CC,CC du Trièves,Isère,Auvergne-Rhône-Alpes +539,218,Atelier Bleu - CPIE Côte Provençale,Association Education environnement et Développement durable,Association ou fédération,250 Chemin de la Calanque du Mugel 13600 La Ciotat,13600.0,2019/04/18,0,2,1,2,39,8,0,5.605979,43.165411,COMMUNE_0000000009761644,La Ciotat,13028,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +540,77,Planète Perles,Association de protection de l'environnement,Association ou fédération,29 Rue Joliot Curie 13960 Sausset-les-Pins,13960.0,2019/04/17,1,2,1,7,9,0,0,5.133914,43.332509,COMMUNE_0000000009760882,Sausset-les-Pins,13104,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +541,50,Espace Jeunes municipal de Septèmes-les-Vallons,Commune,Collectivité territoriale,52 Avenue du 8 Mai 1945 13240 Septèmes-les-Vallons,13240.0,2019/04/16,1,0,0,0,0,0,0,5.366628,43.398539,COMMUNE_0000000009760615,Septèmes-les-Vallons,13106,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +542,30,Longitude 181 antenne PACA,Association de protection de l'environnement,Association ou fédération,270 Chemin de Saint-Marc 83000 Toulon,83000.0,2019/04/13,1,0,0,0,0,0,0,5.958191,43.11208,COMMUNE_0000000009761867,Toulon,83137,83,93,248300543,ME,Métropole Toulon-Provence-Méditerranée,Var,Provence-Alpes-Côte d'Azur +543,102,Mountain Riders,Association Education environnement et Développement durable,Association ou fédération,"180, Rue du Genevois, Parc d'activités de Côte-Rousse, Chambéry-le-Haut, Chambéry, Savoy, Auvergne-Rhône-Alpes, Metropolitan France, 73000, France",73000.0,2019/04/12,1,19,1,5,140,1,0,3.511426,50.332024,COMMUNE_0000000009727979,Aulnoy-lez-Valenciennes,59032,59,32,245901160,CA,CA Valenciennes Métropole,Nord,Hauts-de-France +544,7,conseil departemental des bouches du rhone,Département,Collectivité territoriale,52 Avenue de Saint-Just 13004 Marseille,13004.0,2019/04/09,0,0,0,0,0,0,0,5.403818,43.315419,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +545,297,Commune de Septèmes-les-Vallons,Commune,Collectivité territoriale,Place Pierre Didier Tramoni 13240 Septèmes-les-Vallons,13240.0,2019/04/08,1,0,0,0,0,0,0,5.366187,43.399139,COMMUNE_0000000009760615,Septèmes-les-Vallons,13106,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +546,121,Méditerranée 2000,Association de protection de l'environnement,Association ou fédération,29 Avenue des Cigales 06150 Cannes,6150.0,2019/04/04,1,0,0,0,0,0,0,6.968151,43.554323,COMMUNE_0000000009759860,Cannes,06029,06,93,200039915,CA,CA Cannes Pays de Lérins,Alpes-Maritimes,Provence-Alpes-Côte d'Azur +547,144,APIPR,Societé nautique,Association ou fédération,Port de la Pointe Rouge 13008 Marseille,13008.0,2019/04/02,1,0,0,0,0,0,0,5.374647,43.247663,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +548,2,ONF,,Services de l'état et établissements publics,Route Gaston Rebuffat 13009 Marseille,13009.0,2019/03/31,1,2,1,1,37,0,0,5.396854,43.267129,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +549,179,CIQ Samena,Comité d'interêt de quartier,Association ou fédération,Boulevard la Calanque de Samena 13008 Marseille,13008.0,2019/03/30,1,1,1,1,4,0,0,5.351524,43.228621,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +550,200,Bureau des Guides du Gr 2013,Autre,Association ou fédération,152 La Canebière 13001 Marseille,13001.0,2019/03/28,0,0,0,0,0,0,0,5.384264,43.298553,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +551,57,CIQ Callelongue-Marseilleveyre,Comité d'interêt de quartier,Association ou fédération,25 Boulevard de Saïgon 13010 Marseille,13010.0,2019/03/27,1,0,0,0,0,0,0,5.433573,43.280598,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +552,73,EAU SECOURS,Association de protection de l'environnement,Association ou fédération,Chemin de la Craie 13190 Allauch,13190.0,2019/03/27,1,0,0,0,0,0,0,5.493141,43.326687,COMMUNE_0000000009760873,Allauch,13002,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +553,98,AESE,Association Education environnement et Développement durable,Association ou fédération,Carraire des Arlésiens 13240 Septèmes-les-Vallons,13240.0,2019/03/27,1,0,1,0,5,0,0,5.368576,43.411655,COMMUNE_0000000009760615,Septèmes-les-Vallons,13106,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +554,94,CAP Océans,Association de protection de l'environnement,Association ou fédération,"Chemin des Prés, Parignargues, Nîmes, Gard, Occitanie, France métropolitaine, 30730, France",30730.0,2019/03/27,1,0,1,4,1,2,1,4.208079,43.872607,COMMUNE_0000000009758992,Parignargues,30193,30,76,243000296,CC,CC du Pays de Sommières,Gard,Occitanie +555,18,AREMACS SUD EST,Association Education environnement et Développement durable,Association ou fédération,16 Rue du Génie 13003 Marseille,13003.0,2019/03/27,0,0,0,0,0,0,0,5.383235,43.307278,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +556,276,SAFI,Autre,Association ou fédération,80 Rue Léon Bourgeois 13001 Marseille,13001.0,2019/03/27,0,0,0,0,0,0,0,5.393375,43.30199,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +557,8,Un Océan de Vie,Association de protection de l'environnement,Association ou fédération,8 Boulevard Pomeon 13009 Marseille,13009.0,2019/03/25,0,0,1,0,8,0,0,5.403566,43.243724,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +558,268,La Cité des Arts de la Rue,Autre,Association ou fédération,225 Avenue des Aygalades 13015 Marseille,13015.0,2019/03/22,1,1,1,0,8,0,0,5.366016,43.34514,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur +559,145,MerTerre,Association de protection de l'environnement,Association ou fédération,"68, Rue de Rome, Préfecture, 6e Arrondissement, Marseille, Bouches-du-Rhône, Provence-Alpes-Côte d'Azur, France métropolitaine, 13001, France",13001.0,2019/03/19,1,8,1,5,46,4,87,5.379877,43.292623,COMMUNE_0000000009761156,Marseille,13055,13,93,200054807,ME,Métropole d'Aix-Marseille-Provence,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur From bd525f61f220c4d8980994d19d1967571791c85b Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 24 Apr 2024 18:32:10 +0200 Subject: [PATCH 093/147] [tg] - taille police graphs et corrections orth --- dashboards/app/pages/data.py | 48 ++++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 16 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index bca28e2..2940ac1 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -268,6 +268,13 @@ def frenchify(x: int) -> str: ["TYPE_MILIEU", "Volume"], ascending=False ) + # Raccourcir les étiquettes trop longues + df_typemilieu = df_typemilieu.replace( + { + "Zone naturelle ou rurale (hors littoral et montagne)": "Zone naturelle ou rurale" + } + ) + # Graphique à barre empilées du pourcentage de volume collecté par an et type de matériau fig3 = px.histogram( df_typemilieu, @@ -442,7 +449,7 @@ def frenchify(x: int) -> str: volume_total_filtered = df_filtered_metrics["VOLUME_TOTAL"].sum() cell6.metric( - "Volume de dechets collectés", frenchify(volume_total_filtered) + " litres" + "Volume de déchets collectés", frenchify(volume_total_filtered) + " litres" ) cell7.metric("Poids total collecté", frenchify(poids_total_filtered) + " kg") @@ -509,7 +516,7 @@ def frenchify(x: int) -> str: df_totals_sorted2, path=["Matériau"], values="Volume", - title="Répartition des matériaux en volume", + title="Répartition des matériaux en volume dans le milieu ou le lieu choisi", color="Matériau", color_discrete_map=colors_map, ) @@ -519,7 +526,7 @@ def frenchify(x: int) -> str: fig4.update_traces( textinfo="label+value", texttemplate="%{label}
%{value:.0f} litres", - textfont=dict(size=16), + textfont_size=16, hovertemplate="%{label}
Volume: %{value:.0f}", ) @@ -607,7 +614,7 @@ def frenchify(x: int) -> str: "categorie": "Dechet", "nb_dechet": "Nombre total de déchets (échelle logarithmique)", }, - title="Top 10 dechets ramassés (échelle logarithmique) ", + title="Top 10 des déchets ramassés", text="nb_dechet", color="Materiau", color_discrete_map=colors_map, @@ -960,14 +967,21 @@ def frenchify(x: int) -> str: x="Nombre de déchets", y="Secteur", color="Secteur", - title="Top 10 secteurs économiques identifiés dans les déchets comptés (échelle logarithmique)", + title="Top 10 des secteurs économiques identifiés dans les déchets comptés", + labels={ + "Nombre de déchets": "Nombre total de déchets (échelle logarithmique)", + }, orientation="h", color_discrete_map=colors_map_secteur, text_auto=True, ) # add log scale to x axis fig_secteur.update_layout(xaxis_type="log") - fig_secteur.update_traces(texttemplate="%{value:.0f}", textposition="inside") + fig_secteur.update_traces( + texttemplate="%{value:.0f}", + textposition="inside", + textfont_size=14, + ) fig_secteur.update_layout( height=500, uniformtext_mode="hide", @@ -981,8 +995,8 @@ def frenchify(x: int) -> str: if nb_vide_indetermine != 0: st.warning( "⚠️ Il y a " - + str(nb_vide_indetermine) - + " dechets dont le secteur n'a pas été determiné dans la totalité des dechets" + + str(frenchify(nb_vide_indetermine)) + + " déchets dont le secteur n'a pas été determiné dans les déchets collectés." ) # Metriques et graphes marques @@ -1007,18 +1021,20 @@ def frenchify(x: int) -> str: top_marque_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), x="Nombre de déchets", y="Marque", - title="Top 10 des marques les plus ramassées (échelle logarithmique)", + title="Top 10 des marques identifiées dans les déchets comptés", + labels={ + "Nombre de déchets": "Nombre total de déchets (échelle logarithmique)", + }, color_discrete_sequence=["#1951A0"], orientation="h", text_auto=False, text=top_marque_df.tail(10)["Marque"] - + ": " + + " : " + top_marque_df.tail(10)["Nombre de déchets"].astype(str), ) # add log scale to x axis fig_marque.update_layout(xaxis_type="log") - # fig_marque.update_traces(texttemplate="%{value:.0f}", textposition="inside") - + fig_marque.update_traces(textfont_size=14) fig_marque.update_layout( height=500, uniformtext_minsize=8, @@ -1074,7 +1090,7 @@ def frenchify(x: int) -> str: top_rep_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), path=["Responsabilité élargie producteur"], values="Nombre de déchets", - title="Top 10 des Responsabilités élargies producteurs relatives aux dechets les plus ramassés", + title="Top 10 des filières REP relatives aux déchets les plus ramassés", color="Responsabilité élargie producteur", color_discrete_sequence=px.colors.qualitative.Set2, ) @@ -1091,12 +1107,12 @@ def frenchify(x: int) -> str: with st.container(border=True): st.plotly_chart(figreptree, use_container_width=True) - # Message d'avertissement Nombre de dechets dont la REP n'a pas été determine + # Message d'avertissement Nombre de déchets dont la REP n'a pas été determine if nb_vide_rep != 0: st.warning( "⚠️ Il y a " - + str(nb_vide_rep) - + " dechets dont la responsabilité producteur n'a pas été determiné dans la totalité des dechets comptabilisés" + + str(frenchify(nb_vide_rep)) + + " déchets dont la filière REP n'a pas été determinée dans les déchets collectés." ) From 2b4035c59507e4307969b41ae8b2eb6bf02d4398 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 24 Apr 2024 18:39:30 +0200 Subject: [PATCH 094/147] [tg] - update font size in charts --- dashboards/app/pages/data.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 2940ac1..335db50 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -299,7 +299,7 @@ def frenchify(x: int) -> str: texttemplate="%{y:.0f}%", textposition="inside", hovertemplate="%{x}
Part du volume collecté dans ce milieu: %{y:.0f} %", - textfont_size=10, + textfont_size=12, ) # Afficher le graphique @@ -642,7 +642,7 @@ def frenchify(x: int) -> str: # texttemplate="%{text:.2f}", textposition="inside", textfont_color="white", - textfont_size=20, + textfont_size=18, ) # Suppression de la colonne categorie @@ -983,7 +983,7 @@ def frenchify(x: int) -> str: textfont_size=14, ) fig_secteur.update_layout( - height=500, + height=700, uniformtext_mode="hide", showlegend=False, yaxis_title=None, @@ -1036,7 +1036,7 @@ def frenchify(x: int) -> str: fig_marque.update_layout(xaxis_type="log") fig_marque.update_traces(textfont_size=14) fig_marque.update_layout( - height=500, + height=700, uniformtext_minsize=8, uniformtext_mode="hide", yaxis_title=None, From b0fd915df89f695fb09117fb19353ac693f9c0cb Mon Sep 17 00:00:00 2001 From: Vincentdata Date: Wed, 24 Apr 2024 18:49:15 +0200 Subject: [PATCH 095/147] fixation bug --- dashboards/app/pages/data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 335db50..21eb522 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -881,7 +881,7 @@ def frenchify(x: int) -> str: if "VIDE" in top_secteur_df["Secteur"].unique(): df_vide_indetermine = top_secteur_df[top_secteur_df["Secteur"] == "VIDE"] nb_vide_indetermine = df_vide_indetermine["Nombre de déchets"].sum() - elif "INDÉTERMINÉ" in secteur_df["Secteur"].unique(): + elif "INDÉTERMINÉ" in top_secteur_df["Secteur"].unique(): df_vide_indetermine = top_secteur_df[ top_secteur_df["Secteur"] == "INDÉTERMINÉ" ] From 503e997a7b6e8ff77e62db8445220f539e991dee Mon Sep 17 00:00:00 2001 From: linh dinh Date: Wed, 24 Apr 2024 19:18:45 +0200 Subject: [PATCH 096/147] Ajoute filres pour carte densite --- dashboards/app/pages/hotspots.py | 280 +++++++++++++++++++++---------- 1 file changed, 193 insertions(+), 87 deletions(-) diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index a630df4..f694436 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -124,6 +124,8 @@ st.set_page_config( page_title="Hotspots", layout="wide", initial_sidebar_state="expanded" ) +# Tab title +st.markdown("""# 🔥 Hotspots : **Quelles sont les zones les plus impactées ?**""") # Execute code page if the authentication was complete if st.session_state["authentication_status"]: @@ -359,10 +361,6 @@ def construct_admin_lvl_boundaries( # 2/ Hotspot tab # ################## -# Tab title -st.markdown("""# 🔥 Hotspots : **Quelles sont les zones les plus impactées ?**""") - - ######################################################## # 2.1/ Carte densité de déchets sur les zones étudiées # ######################################################## @@ -434,105 +432,172 @@ def calculate_and_display_metrics(data, indicator_col1, indicator_col2, indicato def couleur_milieu(type): return couleur.get(type, "white") # Returns 'white' if the type is not found +def update_lieu_options(selected_milieu): + if selected_milieu and selected_milieu != "Sélectionnez un milieu...": + filtered_data = data_zds[data_zds['TYPE_MILIEU'] == selected_milieu] + return ["Sélectionnez un lieu..."] + list(filtered_data['TYPE_LIEU2'].dropna().unique()) + return ["Sélectionnez un lieu..."] # Function to plot a density map def plot_density_map( data_zds: pd.DataFrame, - region_geojson_path: str, + filtered_data: pd.DataFrame ) -> folium.Map: + # Check if the primary dataset is empty if data_zds.empty: st.write("Aucune donnée disponible pour la région sélectionnée.") # Initialize a basic map without any data-specific layers m = folium.Map(location=[46.6358, 2.5614], zoom_start=5) else: - # Calculate density - data_zds["DENSITE"] = data_zds["VOLUME_TOTAL"] / data_zds["SURFACE"] - data_zds = data_zds[ - data_zds["DENSITE"] < 20 - ] # Remove rows with anomalously high density values + # Use filtered data if available; otherwise, use the full dataset + if filtered_data.empty: + map_data = data_zds + else: + map_data = filtered_data + + # Ensure the surface area is not zero to avoid division by zero + map_data = map_data[map_data['SURFACE'] > 0] + + # Calculate density + map_data["DENSITE"] = map_data["VOLUME_TOTAL"] / map_data["SURFACE"] + map_data = map_data[map_data["DENSITE"] < 20] # Remove rows with anomalously high density values + + # Round density values for display + map_data["DENSITE"] = map_data["DENSITE"].round(4) + # Round surface values for display + map_data["SURFACE_ROND"] = map_data["SURFACE"].round(2) + + # Initialize a map centered at the mean coordinates of locations + if not map_data[['LIEU_COORD_GPS_Y', 'LIEU_COORD_GPS_X']].dropna().empty: + m = folium.Map( + location=[ + map_data["LIEU_COORD_GPS_Y"].mean(), + map_data["LIEU_COORD_GPS_X"].mean(), + ], + zoom_start=12 + ) - # Round density values for display - data_zds["DENSITE"] = data_zds["DENSITE"].round(4) - # Round surface values for display - data_zds["SURFACE_ROND"] = data_zds["SURFACE"].round(2) - - # Initialize a map centered at the mean coordinates of locations - m = folium.Map( - location=[ - data_zds["LIEU_COORD_GPS_Y"].mean(), - data_zds["LIEU_COORD_GPS_X"].mean(), - ] - ) + # Loop over each row in the DataFrame to place markers + for _, row in map_data.iterrows(): + if pd.notna(row['LIEU_COORD_GPS_Y']) and pd.notna(row['LIEU_COORD_GPS_X']): + popup_html = f""" +
+

Densité: {row['DENSITE']} L/m²

+

Volume total : {row['VOLUME_TOTAL']} litres

+

Surface total : {row['SURFACE_ROND']} m²

+

Type de milieu : {row['TYPE_MILIEU']}

+

Type de lieu : {row['TYPE_LIEU']}

+
+ """ + color = couleur_milieu(row["TYPE_MILIEU"]) + folium.CircleMarker( + location=[row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]], + radius=np.log(row["DENSITE"] + 1) * 15, + popup=folium.Popup(popup_html, max_width=300), + color=color, + fill=True, + ).add_to(m) + + # Display the map in Streamlit + st_folium(m, width='100%', height=600) # Adjust width and height as needed + else: + st.write("Aucune donnée de localisation valide à afficher sur la carte.") - # Loop over each row in the DataFrame to place markers - for index, row in data_zds.iterrows(): - popup_html = f""" -
-

Densité: {row['DENSITE']} L/m²

-

Volume total : {row['VOLUME_TOTAL']} litres

-

Surface total : {row['SURFACE_ROND']} m²

-

Type de milieu : {row['TYPE_MILIEU']}

-

Type de lieu : {row['TYPE_LIEU']}

-
- """ - lgd_txt = '{txt}' - color = couleur_milieu(row["TYPE_MILIEU"]) - folium.CircleMarker( - fg=folium.FeatureGroup( - name=lgd_txt.format(txt=["TYPE_MILIEU"], col=color) - ), - location=[row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]], - radius=np.log(row["DENSITE"] + 1) * 15, - popup=folium.Popup(popup_html, max_width=300), - color=color, - fill=True, - ).add_to(m) - - folium_static(m) +# Function for 'milieu' density table +def density_table_milieu( + data_zds: pd.DataFrame, + filtered_data: pd.DataFrame +): + if data_zds.empty: + st.write("Aucune donnée disponible pour la région sélectionnée.") -# Function for 'milieu' density table + else: + # Use filtered data if available; otherwise, use the full dataset + if filtered_data.empty: + table_data = data_zds + else: + table_data = filtered_data + # Calculate density + table_data["DENSITE"] = table_data["VOLUME_TOTAL"] / table_data["SURFACE"] + # Remove rows with anomalously high density values + table_data = table_data[table_data["DENSITE"] < 20] + + # Group by 'TYPE_MILIEU', calculate mean density, sort, and round the density + table_milieu = ( + table_data.groupby("TYPE_MILIEU")["DENSITE"] + .mean() + .reset_index() + .sort_values(by="DENSITE", ascending=False) + ) + table_milieu["DENSITE"] = table_milieu["DENSITE"].round(4) + + st.dataframe( + table_milieu, + column_order=("TYPE_MILIEU", "DENSITE"), + hide_index=True, + width=800, + column_config={ + "TYPE_MILIEU": st.column_config.TextColumn( + "Milieu", + ), + "DENSITE": st.column_config.NumberColumn( + "Densité (L/m²)", + format="%f", + min_value=0, + max_value=max(table_milieu["DENSITE"]), + ), + }, + ) -def density_table(data_zds: pd.DataFrame): +def density_table_lieu( + data_zds: pd.DataFrame, + filtered_data: pd.DataFrame +): if data_zds.empty: st.write("Aucune donnée disponible pour la région sélectionnée.") else: - # Calculate density - data_zds["DENSITE"] = data_zds["VOLUME_TOTAL"] / data_zds["SURFACE"] - # Remove rows with anomalously high density values - data_zds = data_zds[data_zds["DENSITE"] < 20] - - # Group by 'TYPE_MILIEU', calculate mean density, sort, and round the density - table_milieu = ( - data_zds.groupby("TYPE_MILIEU")["DENSITE"] - .mean() - .reset_index() - .sort_values(by="DENSITE", ascending=False) - ) - table_milieu["DENSITE"] = table_milieu["DENSITE"].round(4) - - st.dataframe( - table_milieu, - column_order=("TYPE_MILIEU", "DENSITE"), - hide_index=True, - width=800, - column_config={ - "TYPE_MILIEU": st.column_config.TextColumn( - "Milieu", - ), - "DENSITE": st.column_config.NumberColumn( - "Densité (L/m²)", - format="%f", - min_value=0, - max_value=max(table_milieu["DENSITE"]), - ), - }, - ) + # Use filtered data if available; otherwise, use the full dataset + if filtered_data.empty: + table_data = data_zds + else: + table_data = filtered_data + # Calculate density + table_data["DENSITE"] = table_data["VOLUME_TOTAL"] / table_data["SURFACE"] + # Remove rows with anomalously high density values + table_data = table_data[table_data["DENSITE"] < 20] + + # Group by 'TYPE_MILIEU', calculate mean density, sort, and round the density + table_lieu = ( + table_data.groupby("TYPE_LIEU2")["DENSITE"] + .mean() + .reset_index() + .sort_values(by="DENSITE", ascending=False) + ) + table_lieu["DENSITE"] = table_lieu["DENSITE"].round(4) + + st.dataframe( + table_lieu, + column_order=("TYPE_LIEU2", "DENSITE"), + hide_index=True, + width=800, + column_config={ + "TYPE_LIEU2": st.column_config.TextColumn( + "Milieu", + ), + "DENSITE": st.column_config.NumberColumn( + "Densité (L/m²)", + format="%f", + min_value=0, + max_value=max(table_lieu["DENSITE"]), + ), + }, + ) ################################ @@ -675,7 +740,7 @@ def create_contributors_table(data_zds: pd.DataFrame, multi_filter_dict: dict) - # Dashboard Main Panel # ######################## -tab1, tab2 = st.tabs(["Densité des déchets dans zone étudié", "Spots Adoptés"]) +tab1, tab2 = st.tabs(["Densité des déchets dans zone étudié🔍", "Spots Adoptés📍"]) with tab1: @@ -694,16 +759,57 @@ def create_contributors_table(data_zds: pd.DataFrame, multi_filter_dict: dict) - st.markdown("---") - left_column, right_column = st.columns([2, 1]) + left_column, right_column = st.columns([2, 2]) with left_column: - st.markdown("### Carte des Densités") - plot_density_map(data_zds_correct, NIVEAUX_ADMIN_GEOJSON_PATH_DICT["Région"]) - + # Add a default "Select a milieu..." option + selected_milieu = st.selectbox( + "Sélectionnez un milieu:", + ["Sélectionnez un milieu..."] + list(pd.unique(data_zds_correct['TYPE_MILIEU'])) + ) with right_column: - st.markdown("### Tableau des Densités par Milieu") - density_table(data_zds_correct) + # Update lieu options based on selected milieu + lieu_options = update_lieu_options(selected_milieu) + selected_lieu = st.selectbox("Sélectionnez un lieu:", lieu_options) + + st.markdown("### Carte des Densités") + # Automatically update the map based on the current selection + if selected_milieu != "Sélectionnez un milieu..." and selected_lieu != "Sélectionnez un lieu...": + filtered_data = data_zds_correct[(data_zds_correct['TYPE_MILIEU'] == selected_milieu) & (data_zds_correct['TYPE_LIEU2'] == selected_lieu)] + plot_density_map(data_zds_correct, filtered_data) + else: + # Optionally show the map with all data or display a message + plot_density_map(data_zds_correct, data_zds_correct) # Show all data by default + col1, col2, col3 = st.columns([3, 3, 2]) + + with col1: + st.markdown("#### Tableau des Densités par Milieu") + if selected_milieu != "Sélectionnez un milieu..." and selected_lieu != "Sélectionnez un lieu...": + filtered_data = data_zds_correct[(data_zds_correct['TYPE_MILIEU'] == selected_milieu) & (data_zds_correct['TYPE_LIEU2'] == selected_lieu)] + density_table_milieu(data_zds_correct, filtered_data) + else: + density_table_milieu(data_zds_correct, data_zds_correct) + + with col2: + st.markdown("#### Tableau des Densités par Lieu") + if selected_milieu != "Sélectionnez un milieu..." and selected_lieu != "Sélectionnez un lieu...": + filtered_data = data_zds_correct[(data_zds_correct['TYPE_MILIEU'] == selected_milieu) & (data_zds_correct['TYPE_LIEU2'] == selected_lieu)] + density_table_lieu(data_zds_correct, filtered_data) + else: + density_table_lieu(data_zds_correct, data_zds_correct) + + with col3: + with st.expander("###### Notice ℹ️", expanded=True): + st.write( + """ + **Milieu** désigne de grands types d'environnements comme le Littoral, + les Cours d'eau ou la Montagne.\n + Chaque Milieu est ensuite divisé en + **Lieux** plus spécifiques. Par exemple, sous le Milieu Littoral, + on trouve des Lieux comme les Plages, les Roches, les Digues, ou les Parkings. + """ + ) with tab2: # Use the selected filters From 61c4bbeb3732b1742671c9fcb764f8a663831da7 Mon Sep 17 00:00:00 2001 From: Floriane Duccini Date: Wed, 24 Apr 2024 19:20:05 +0200 Subject: [PATCH 097/147] taking into account new structure file --- dashboards/app/home.py | 18 +++++--- dashboards/app/pages/structures.py | 68 ++++++++++-------------------- 2 files changed, 36 insertions(+), 50 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index d42cf40..b5beb2a 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -30,12 +30,17 @@ def load_df_other() -> pd.DataFrame: # Table des structures @st.cache_data def load_structures() -> pd.DataFrame: - return pd.read_excel( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/export_structures_29022024%20(1).xlsx", + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/4-" + "onglet-structures/Exploration_visuali" + "sation/data/structures_export_cleaned.csv", index_col=0, ) + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE + # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["dep"] + " - " + df["departement"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["COMMUNE"] + return df # Appel des fonctions pour charger les données @@ -103,7 +108,10 @@ def load_structures() -> pd.DataFrame: st.session_state["df_other_filtre"] = df_other_filtre # Filtrer dataframe structures et enregistrer dans le session.state - st.session_state["structures"] = df_structures + df_structures_filtre = df_structures[ + df_other[colonne_filtre] == select_collectivite + ] + st.session_state["structures_filtre"] = df_structures_filtre # Filtrer et enregistrer le dataframe nb_dechets dans session.State # Récuperer la liste des relevés diff --git a/dashboards/app/pages/structures.py b/dashboards/app/pages/structures.py index 3e160ae..20614ef 100644 --- a/dashboards/app/pages/structures.py +++ b/dashboards/app/pages/structures.py @@ -23,7 +23,7 @@ # Appeler les dataframes filtrés depuis le session state -if "structures" not in st.session_state: +if "structures_filtre" not in st.session_state: st.write( """ ### :warning: Merci de sélectionner une collectivité\ @@ -32,46 +32,20 @@ ) st.stop() else: - df_structures = st.session_state["structures"] - -# # df_nb_dechet = pd.read_csv( -# # ( -# # "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" -# # "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" -# # "sation/data/data_releve_nb_dechet.csv" -# # ) -# # ) - -# # df_other = pd.read_csv( -# # ( -# # "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" -# # "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" -# # "sation/data/data_zds_enriched.csv" -# # ) -# # ) - -# # res_aggCategory_filGroup = duckdb.query( -# # ( -# # "SELECT categorie, sum(nb_dechet) AS total_dechet " -# # "FROM df_nb_dechet " -# # "WHERE type_regroupement = 'GROUPE' " -# # "GROUP BY categorie " -# # "HAVING sum(nb_dechet) > 10000 " -# # "ORDER BY total_dechet DESC;" -# # ) -# # ).to_df() - -# # st.bar_chart(data=res_aggCategory_filGroup, x="categorie", y="total_dechet") - -# st.altair_chart( -# alt.Chart(res_aggCategory_filGroup) -# .mark_bar() -# .encode( -# x=alt.X("categorie", sort=None, title=""), -# y=alt.Y("total_dechet", title="Total de déchet"), -# ), -# use_container_width=True, -# ) + df_structures = st.session_state["structures_filtre"] + +# Appeler les dataframes filtrés depuis le session state +if "df_other_filtre" not in st.session_state: + st.write( + """ + ### :warning: Merci de sélectionner une collectivité\ + dans l'onglet Home pour afficher les données. :warning: + """ + ) + st.stop() +else: + df_releves = st.session_state["df_other_filtre"] + if filtre_niveau == "" and filtre_collectivite == "": st.write("Aucune sélection de territoire n'a été effectuée") @@ -92,7 +66,7 @@ # 2ème métrique : nb de spots adoptés cell2 = l1_col2.container(border=True) -nb_spots_adoptes = df_structures["A1S_NB_SPOTS_ADOPTES"].sum() +nb_spots_adoptes = df_structures["A1S_NB_SPO"].sum() cell2.metric("Spots adoptés", nb_spots_adoptes) @@ -148,9 +122,13 @@ st.markdown(""" **Structures du territoire**""") df_struct_simplifie = duckdb.query( ( - "SELECT NOM as Nom, TYPE, ACTION_RAMASSAGE AS 'Nombre de collectes', A1S_NB_SPOTS_ADOPTES as 'Nombre de spots adoptés' " - "FROM df_structures " - "ORDER BY Nom DESC;" + """SELECT + NOM_structure as Nom, + TYPE, + ACTION_RAM AS 'Nombre de collectes', + A1S_NB_SPO as 'Nombre de spots adoptés' + FROM df_structures + ORDER BY Nom DESC;""" ) ).to_df() From 4c3ad30066311a90d0260dfebb0ae380e89b6198 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 24 Apr 2024 19:45:40 +0200 Subject: [PATCH 098/147] [tg] - V1 folium map with structures --- dashboards/app/pages/structures.py | 53 ++++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/dashboards/app/pages/structures.py b/dashboards/app/pages/structures.py index 20614ef..c666cb2 100644 --- a/dashboards/app/pages/structures.py +++ b/dashboards/app/pages/structures.py @@ -1,8 +1,9 @@ import streamlit as st -import altair as alt import duckdb import pandas as pd import plotly.express as px +import folium +from folium import IFrame # Configuration de la page @@ -116,6 +117,53 @@ with st.container(): st.markdown(""" **Cartographie des structures du territoire**""") + # Création de la carte centrée autour d'une localisation + # Initialisation du zoom sur la carte + if filtre_niveau == "Commune": + zoom_admin = 12 + elif filtre_niveau == "EPCI": + zoom_admin = 13 + elif filtre_niveau == "Département": + zoom_admin = 10 + else: + zoom_admin = 8 + + # Calcul des limites à partir de vos données + min_lat = df_structures["latitude"].min() + max_lat = df_structures["latitude"].max() + min_lon = df_structures["longitude"].min() + max_lon = df_structures["longitude"].max() + + map_data = folium.Map( + location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], + zoom_start=zoom_admin, + # zoom_start=8, + tiles="OpenStreetMap", + ) + + # Facteur de normalisation pour ajuster la taille des bulles + normalisation_facteur = 1000 + + for index, row in df_structures.iterrows(): + # Application de la normalisation + + # Application d'une limite minimale pour le rayon si nécessaire + + folium.Marker( + location=(row["latitude"], row["longitude"]), + color="#3186cc", + icon=folium.Icon(color="blue"), + popup=folium.Popup( + f"{row['NOM_structure']}\n ({row['COMMUNE']})", max_width=100 + ), + ).add_to(map_data) + + # Affichage de la carte Folium dans Streamlit + st_folium = st.components.v1.html + st_folium( + folium.Figure().add_child(map_data).render(), # , width=1400 + height=750, + ) # Affichage du dataframe with st.container(): @@ -123,7 +171,8 @@ df_struct_simplifie = duckdb.query( ( """SELECT - NOM_structure as Nom, + NOM_structure as Nom, + COMMUNE, TYPE, ACTION_RAM AS 'Nombre de collectes', A1S_NB_SPO as 'Nombre de spots adoptés' From 7ef6a61b93e05eadb3fc430ee52943f04af5ed20 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 24 Apr 2024 19:57:45 +0200 Subject: [PATCH 099/147] [tg] adapt home.py for auth --- dashboards/app/.credentials.yml | 14 ++ dashboards/app/home.py | 281 ++++++++++++++++++++------------ 2 files changed, 187 insertions(+), 108 deletions(-) create mode 100644 dashboards/app/.credentials.yml diff --git a/dashboards/app/.credentials.yml b/dashboards/app/.credentials.yml new file mode 100644 index 0000000..d66d7ae --- /dev/null +++ b/dashboards/app/.credentials.yml @@ -0,0 +1,14 @@ +cookie: + expiry_days: 30 + key: some_signature_key + name: some_cookie_name +credentials: + usernames: + test: + email: test@test.com + logged_in: false + name: test + password: $2b$12$fR4sp7tIG.dbeusbr695MOw/xvN1sf.21rML7t7j9pCdIVREIocUO +pre-authorized: + emails: + - test@test.com \ No newline at end of file diff --git a/dashboards/app/home.py b/dashboards/app/home.py index b5beb2a..98c9d11 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -1,127 +1,192 @@ +from pathlib import Path + import pandas as pd import streamlit as st +import streamlit_authenticator as stauth +import yaml +from st_pages import Page, show_pages +from yaml.loader import SafeLoader + +# Configuration de la page +st.set_page_config( + layout="wide", + page_title="Dashboard Zéro Déchet Sauvage", + page_icon=":dolphin:", + menu_items={ + "About": "https://www.zero-dechet-sauvage.org/", + }, +) + st.markdown( """ # Bienvenue 👋 -#### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! +#### Visualisez les collectes de déchets qui ont lieu sur votre territoire ! """, ) -st.markdown("""# À propos""") +# Login +p_cred = Path(".credentials.yml") +with p_cred.open() as file: + config = yaml.load(file, Loader=SafeLoader) -# Chargement des données et filtre géographique à l'arrivée sur le dashboard -# Table des volumes par matériaux -@st.cache_data -def load_df_other() -> pd.DataFrame: - df = pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv", - ) - # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE - # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) - df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] - df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] - return df - - -# Table des structures -@st.cache_data -def load_structures() -> pd.DataFrame: - df = pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/4-" - "onglet-structures/Exploration_visuali" - "sation/data/structures_export_cleaned.csv", - index_col=0, - ) - # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE - # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) - df["DEP_CODE_NOM"] = df["dep"] + " - " + df["departement"] - df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["COMMUNE"] - return df - - -# Appel des fonctions pour charger les données - -df_other = load_df_other() -df_structures = load_structures() - - -# Création du filtre par niveau géographique : correspondance labels et variables du dataframe -niveaux_admin_dict = { - "Région": "REGION", - "Département": "DEP_CODE_NOM", - "EPCI": "LIBEPCI", - "Commune": "COMMUNE_CODE_NOM", -} - -# 1ère étape : sélection du niveau administratif concerné (région, dép...) -# Si déjà saisi précédemment, initialiser le filtre avec les valeurs entrées précédemment -# Récupérer les index pour conserver la valeur des filtres au changement de pages -# Filtre niveau administratif -niveau_admin = st.session_state.get("niveau_admin", None) -index_admin = st.session_state.get("index_admin", None) -# Filtre collectivité -collectivite = st.session_state.get("collectivite", None) -index_collec = st.session_state.get("index_collec", None) - -# Initialiser la selectbox avec l'index récupéré -select_niveauadmin = st.selectbox( - "Niveau administratif : ", - niveaux_admin_dict.keys(), - index=index_admin, +authenticator = stauth.Authenticate( + config["credentials"], + config["cookie"]["name"], + config["cookie"]["key"], + config["cookie"]["expiry_days"], + config["pre-authorized"], +) +authenticator.login( + fields={ + "Form name": "Connexion", + "Username": "Identifiant", + "Password": "Mot de passe", + "Login": "Connexion", + }, ) - -if select_niveauadmin is not None: - # Filtrer la liste des collectivités en fonction du niveau admin - liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] - liste_collectivites = liste_collectivites.sort_values().unique() - - # 2ème filtre : sélection de la collectivité concernée - select_collectivite = st.selectbox( - "Collectivité : ", - liste_collectivites, - index=index_collec, - ) -if st.button("Enregistrer la sélection"): - # Enregistrer les valeurs sélectionnées dans le session.state - st.session_state["niveau_admin"] = select_niveauadmin - st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( - select_niveauadmin, +if st.session_state["authentication_status"]: + show_pages( + [ + Page("home.py", "Accueil", "🏠"), + ], ) - st.session_state["collectivite"] = select_collectivite - st.session_state["index_collec"] = list(liste_collectivites).index( - select_collectivite, + st.markdown("""# À propos""") + + # Chargement des données et filtre géographique à l'arrivée sur le dashboard + # Table des volumes par matériaux + @st.cache_data + def load_df_other() -> pd.DataFrame: + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv", + ) + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE + # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] + return df + + # Table des structures + @st.cache_data + def load_structures() -> pd.DataFrame: + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/4-" + "onglet-structures/Exploration_visuali" + "sation/data/structures_export_cleaned.csv", + index_col=0, + ) + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE + # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["dep"] + " - " + df["departement"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["COMMUNE"] + return df + + # Appel des fonctions pour charger les données + + df_other = load_df_other() + df_structures = load_structures() + + # Création du filtre par niveau géographique : correspondance labels et variables + niveaux_admin_dict = { + "Région": "REGION", + "Département": "DEP_CODE_NOM", + "EPCI": "LIBEPCI", + "Commune": "COMMUNE_CODE_NOM", + } + + # 1ère étape : sélection du niveau administratif concerné (région, dép...) + # Si déjà saisi précédemment, initialiser le filtre avec les valeurs entrées précédemment + # Récupérer les index pour conserver la valeur des filtres au changement de pages + # Filtre niveau administratif + niveau_admin = st.session_state.get("niveau_admin", None) + index_admin = st.session_state.get("index_admin", None) + # Filtre collectivité + collectivite = st.session_state.get("collectivite", None) + index_collec = st.session_state.get("index_collec", None) + + # Initialiser la selectbox avec l'index récupéré + select_niveauadmin = st.selectbox( + "Niveau administratif : ", + niveaux_admin_dict.keys(), + index=index_admin, ) - # Afficher la collectivité sélectionnée - st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") - - # Filtrer et enregistrer le DataFrame dans un session state pour la suite - colonne_filtre = niveaux_admin_dict[select_niveauadmin] - df_other_filtre = df_other[df_other[colonne_filtre] == select_collectivite] - st.session_state["df_other_filtre"] = df_other_filtre - - # Filtrer dataframe structures et enregistrer dans le session.state - df_structures_filtre = df_structures[ - df_other[colonne_filtre] == select_collectivite - ] - st.session_state["structures_filtre"] = df_structures_filtre - - # Filtrer et enregistrer le dataframe nb_dechets dans session.State - # Récuperer la liste des relevés - id_releves = df_other_filtre["ID_RELEVE"].unique() - # Filtrer df_nb_dechets sur la liste des relevés - # st.session_state["df_nb_dechets_filtre"] = df_nb_dechets[ - - # Afficher le nombre de relevés disponibles - nb_releves = len(st.session_state["df_other_filtre"]) - st.write( - f"{nb_releves} relevés de collecte sont disponibles \ - pour l'analyse sur votre territoire.", + if select_niveauadmin is not None: + # Filtrer la liste des collectivités en fonction du niveau admin + liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] + liste_collectivites = liste_collectivites.sort_values().unique() + + # 2ème filtre : sélection de la collectivité concernée + select_collectivite = st.selectbox( + "Collectivité : ", + liste_collectivites, + index=index_collec, + ) + + button_disabled = not select_niveauadmin or not select_collectivite + if st.button("Enregistrer la sélection", disabled=button_disabled): + # Enregistrer les valeurs sélectionnées dans le session.state + st.session_state["niveau_admin"] = select_niveauadmin + st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( + select_niveauadmin, + ) + + st.session_state["collectivite"] = select_collectivite + st.session_state["index_collec"] = list(liste_collectivites).index( + select_collectivite, + ) + + # Afficher la collectivité sélectionnée + st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") + show_pages( + [ + Page("home.py", "Accueil", "🏠"), + Page("pages/structures.py", "Structures", "🔭"), + Page("pages/actions.py", "Actions", "👊"), + Page("pages/data.py", "Data", "🔍"), + Page("pages/hotspots.py", "Hotspots", "🔥"), + ], + ) + + # Filtrer et enregistrer le DataFrame dans un session state pour la suite + colonne_filtre = niveaux_admin_dict[select_niveauadmin] + df_other_filtre = df_other[df_other[colonne_filtre] == select_collectivite] + st.session_state["df_other_filtre"] = df_other_filtre + + # Filtrer dataframe structures et enregistrer dans le session.state + df_structures_filtre = df_structures[ + df_other[colonne_filtre] == select_collectivite + ] + st.session_state["structures_filtre"] = df_structures_filtre + + # Filtrer et enregistrer le dataframe nb_dechets dans session.State + # Récuperer la liste des relevés + id_releves = df_other_filtre["ID_RELEVE"].unique() + # Filtrer df_nb_dechets sur la liste des relevés + # st.session_state["df_nb_dechets_filtre"] = df_nb_dechets[ + + # Afficher le nombre de relevés disponibles + nb_releves = len(st.session_state["df_other_filtre"]) + st.write( + f"{nb_releves} relevés de collecte sont disponibles \ + pour l'analyse sur votre territoire.", + ) + + authenticator.logout() +elif st.session_state["authentication_status"] is False: + st.error("Mauvais identifiants ou mot de passe.") +elif st.session_state["authentication_status"] is None: + st.warning("Veuillez entrer votre identifiant et mot de passe") + + show_pages( + [ + Page("home.py", "Home", "🏠 "), + Page("pages/register.py", "S'enregistrer", "🚀"), + ], ) From c792432f62d45778cd6020b32e3218983c364dbd Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 24 Apr 2024 20:07:42 +0200 Subject: [PATCH 100/147] [tg] center folium map on coordinates --- dashboards/app/pages/structures.py | 36 +++++++++++++++++------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/dashboards/app/pages/structures.py b/dashboards/app/pages/structures.py index c666cb2..dfe24fb 100644 --- a/dashboards/app/pages/structures.py +++ b/dashboards/app/pages/structures.py @@ -118,26 +118,28 @@ st.markdown(""" **Cartographie des structures du territoire**""") # Création de la carte centrée autour d'une localisation - # Initialisation du zoom sur la carte - if filtre_niveau == "Commune": - zoom_admin = 12 - elif filtre_niveau == "EPCI": - zoom_admin = 13 - elif filtre_niveau == "Département": - zoom_admin = 10 - else: - zoom_admin = 8 + # # Initialisation du zoom sur la carte + # if filtre_niveau == "Commune": + # zoom_admin = 12 + # elif filtre_niveau == "EPCI": + # zoom_admin = 13 + # elif filtre_niveau == "Département": + # zoom_admin = 10 + # else: + # zoom_admin = 8 # Calcul des limites à partir de vos données - min_lat = df_structures["latitude"].min() - max_lat = df_structures["latitude"].max() - min_lon = df_structures["longitude"].min() - max_lon = df_structures["longitude"].max() + # min_lat = df_structures["latitude"].min() + # max_lat = df_structures["latitude"].max() + # min_lon = df_structures["longitude"].min() + # max_lon = df_structures["longitude"].max() + + sw = df_structures[["latitude", "longitude"]].min().values.tolist() + ne = df_structures[["latitude", "longitude"]].max().values.tolist() map_data = folium.Map( - location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], - zoom_start=zoom_admin, - # zoom_start=8, + # zoom_start=zoom_admin, + zoom_start=8, tiles="OpenStreetMap", ) @@ -158,6 +160,8 @@ ), ).add_to(map_data) + map_data.fit_bounds([sw, ne]) + # Affichage de la carte Folium dans Streamlit st_folium = st.components.v1.html st_folium( From e35b72e7940e2d5563c4ad73ac66362b130f1d1a Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 24 Apr 2024 20:09:40 +0200 Subject: [PATCH 101/147] [tg] - fixed filter bug --- dashboards/app/home.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 98c9d11..5bace46 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -161,7 +161,7 @@ def load_structures() -> pd.DataFrame: # Filtrer dataframe structures et enregistrer dans le session.state df_structures_filtre = df_structures[ - df_other[colonne_filtre] == select_collectivite + df_structures[colonne_filtre] == select_collectivite ] st.session_state["structures_filtre"] = df_structures_filtre From 910f8088c04ba6de544dd1f6cab96d2f8f6f5289 Mon Sep 17 00:00:00 2001 From: linh dinh Date: Wed, 24 Apr 2024 20:49:46 +0200 Subject: [PATCH 102/147] =?UTF-8?q?Derni=C3=A8re=20version=20de=20Home?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/home.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 00c5d07..c7f84e6 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -49,10 +49,6 @@ def load_css(file_name: str) -> None: show_pages( [ Page("home.py", "Accueil", "🏠"), - Page("pages/actions.py", "Actions", "👊"), - Page("pages/data.py", "Data", "🔍"), - Page("pages/hotspots.py", "Hotspots", "🔥"), - Page("pages/structures.py", "Structures", "🔭"), ], ) @@ -134,8 +130,8 @@ def load_df_nb_dechet() -> pd.DataFrame: liste_collectivites, index=index_collec, ) - - if st.button("Enregistrer la sélection"): + button_disabled = not select_niveauadmin or not select_collectivite + if st.button("Enregistrer la sélection", disabled=button_disabled): # Enregistrer les valeurs sélectionnées dans le session.state st.session_state["niveau_admin"] = select_niveauadmin st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( @@ -149,6 +145,15 @@ def load_df_nb_dechet() -> pd.DataFrame: # Afficher la collectivité sélectionnée st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") + show_pages( + [ + Page("home.py", "Accueil", "🏠"), + Page("pages/structures.py", "Structures", "🔭"), + Page("pages/actions.py", "Actions", "👊"), + Page("pages/data.py", "Data", "🔍"), + Page("pages/hotspots.py", "Hotspots", "🔥"), + ], + ) # Filtrer et enregistrer le DataFrame dans un session state pour la suite colonne_filtre = niveaux_admin_dict[select_niveauadmin] From f71ef97d6c671388350c2e0f9e7742b91a05fe02 Mon Sep 17 00:00:00 2001 From: Floriane Duccini Date: Wed, 24 Apr 2024 23:00:27 +0200 Subject: [PATCH 103/147] amelioration of dataframe resentation + selection of collectivity --- dashboards/app/home.py | 3 ++ dashboards/app/pages/structures.py | 73 ++++++++++++++++++++++++++++-- 2 files changed, 72 insertions(+), 4 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index b5beb2a..c5bbebb 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -72,6 +72,7 @@ def load_structures() -> pd.DataFrame: "Niveau administratif : ", niveaux_admin_dict.keys(), index=index_admin, + placeholder="Choisir une option", ) if select_niveauadmin is not None: @@ -84,6 +85,7 @@ def load_structures() -> pd.DataFrame: "Collectivité : ", liste_collectivites, index=index_collec, + placeholder="Choisir une collectivité", ) @@ -112,6 +114,7 @@ def load_structures() -> pd.DataFrame: df_other[colonne_filtre] == select_collectivite ] st.session_state["structures_filtre"] = df_structures_filtre + st.session_state["structures"] = df_structures_filtre # Filtrer et enregistrer le dataframe nb_dechets dans session.State # Récuperer la liste des relevés diff --git a/dashboards/app/pages/structures.py b/dashboards/app/pages/structures.py index 20614ef..b74ac4a 100644 --- a/dashboards/app/pages/structures.py +++ b/dashboards/app/pages/structures.py @@ -46,12 +46,37 @@ else: df_releves = st.session_state["df_other_filtre"] +if "structures" not in st.session_state: + st.write( + """ + ### :warning: Merci de sélectionner une collectivité\ + dans l'onglet Home pour afficher les données. :warning: + """ + ) + st.stop() +else: + df_structures_full = st.session_state["structures"] if filtre_niveau == "" and filtre_collectivite == "": st.write("Aucune sélection de territoire n'a été effectuée") else: st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") +dict_agg_df_releves = {"DATE": "max", "ID_RELEVE": "count"} +df_releve_structure = ( + df_releves.groupby(["ID_STRUCTURE"]).agg(dict_agg_df_releves).reset_index() +) +df_releve_structure.rename( + columns={"DATE": "Date dernière collecte", "ID_RELEVE": "Nombre de relevés"}, + inplace=True, +) + +df_structures = df_structures.merge( + df_releve_structure, how="left", left_on="ID_STRUCT", right_on="ID_STRUCTURE" +) +df_structures["Nombre de relevés"].fillna(0, inplace=True) +df_structures["Date dernière collecte"].fillna(" ", inplace=True) + # Ligne 1 : 2 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2 = st.columns(2) @@ -117,6 +142,12 @@ st.markdown(""" **Cartographie des structures du territoire**""") +@st.cache_data(show_spinner=False) +def split_frame(input_df, rows): + df = [input_df.loc[i : i + rows - 1, :] for i in range(0, len(input_df), rows)] + return df + + # Affichage du dataframe with st.container(): st.markdown(""" **Structures du territoire**""") @@ -124,12 +155,46 @@ ( """SELECT NOM_structure as Nom, - TYPE, - ACTION_RAM AS 'Nombre de collectes', - A1S_NB_SPO as 'Nombre de spots adoptés' + TYPE as Type, + "Nombre de relevés", + A1S_NB_SPO as 'Nombre de spots adoptés', + "Date dernière collecte" FROM df_structures ORDER BY Nom DESC;""" ) ).to_df() + top_menu = st.columns(2) + with top_menu[0]: + sort_field = st.selectbox( + "Trier par", + options=[ + "Nombre de relevés", + "Type", + "Nombre de spots adoptés", + "Date dernière collecte", + ], + ) + with top_menu[1]: + sort_direction = st.radio("Direction", options=["⬇️", "⬆️"], horizontal=True) + df_struct_simplifie = df_struct_simplifie.sort_values( + by=sort_field, ascending=sort_direction == "⬆️", ignore_index=True + ) + pagination = st.container() + + bottom_menu = st.columns((4, 1, 1)) + with bottom_menu[2]: + batch_size = st.selectbox("Taille Page", options=[10, 20]) + with bottom_menu[1]: + total_pages = ( + int(len(df_struct_simplifie) / batch_size) + if int(len(df_struct_simplifie) / batch_size) > 0 + else 1 + ) + current_page = st.number_input( + "Page", min_value=1, max_value=total_pages, step=1 + ) + with bottom_menu[0]: + st.markdown(f"Page **{current_page}** sur **{total_pages}** ") - st.dataframe(df_struct_simplifie, hide_index=True) + pages = split_frame(df_struct_simplifie, batch_size) + pagination.dataframe(data=pages[current_page - 1], use_container_width=True) From 191d46fe8a58acde8b3156afdd21a28f7ea953c5 Mon Sep 17 00:00:00 2001 From: Floriane Duccini Date: Wed, 24 Apr 2024 23:48:05 +0200 Subject: [PATCH 104/147] adding requirements --- dashboards/app/requirements.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index 134d136..91fa3eb 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -4,5 +4,7 @@ folium==0.16.0 duckdb==0.10.0 streamlit==1.32.2 streamlit-folium==0.19.1 +streamlit-authenticator==0.3.2 plotly==5.19.0 -openpyxl==3.1.2 \ No newline at end of file +openpyxl==3.1.2 +st-pages==0.4.5 \ No newline at end of file From 93259da9dc9b24d04f5beab89e00fb4990cfe774 Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Sat, 20 Apr 2024 13:27:05 -0400 Subject: [PATCH 105/147] =?UTF-8?q?[kb]=20=F0=9F=99=88=20Update=20gitignor?= =?UTF-8?q?e?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 05ec177..a21d144 100644 --- a/.gitignore +++ b/.gitignore @@ -164,4 +164,10 @@ cython_debug/ # Precommit hooks: ruff cache .ruff_cache -etl/zds/.file_versions/* \ No newline at end of file +etl/zds/.file_versions/* + +# Dossier sauvegarde Thibaut +TG_sauv + +# Streamlit: credentials +dashboards/app/.credentials.yml From 3165baff19fce39a9f9382972de27340a7a314d4 Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Wed, 24 Apr 2024 17:58:21 -0400 Subject: [PATCH 106/147] =?UTF-8?q?[kb]=20=F0=9F=9A=91=20Fix=20authenticat?= =?UTF-8?q?ion?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/register.py | 47 ++++++++++++++++++++++++++++++++ dashboards/app/style.css | 13 +++++++++ 2 files changed, 60 insertions(+) create mode 100644 dashboards/app/pages/register.py create mode 100644 dashboards/app/style.css diff --git a/dashboards/app/pages/register.py b/dashboards/app/pages/register.py new file mode 100644 index 0000000..be54cb4 --- /dev/null +++ b/dashboards/app/pages/register.py @@ -0,0 +1,47 @@ +from pathlib import Path +import yaml +from yaml.loader import SafeLoader +import streamlit as st +import streamlit_authenticator as stauth + +st.markdown( + """ +# Bienvenue 👋 +#### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! +""", +) + +p_cred = Path(".credentials.yml") +with p_cred.open() as file: + config = yaml.load(file, Loader=SafeLoader) + +authenticator = stauth.Authenticate( + config["credentials"], + config["cookie"]["name"], + config["cookie"]["key"], + config["cookie"]["expiry_days"], + config["pre-authorized"], +) + +try: + ( + email_of_registered_user, + username_of_registered_user, + name_of_registered_user, + ) = authenticator.register_user( + pre_authorization=False, + fields={ + "Form name": "S'enregistrer", + "Email": "Email", + "Username": "Identifiant", + "Password": "Mot de passe", + "Repeat password": "Répeter le mot de passe", + "Register": "S'enregistrer", + }, + ) + if email_of_registered_user: + with open(".credentials.yml", "w") as file: + yaml.dump(config, file, default_flow_style=False) + st.success("Utilisateur enregistré") +except Exception as e: + st.error(e) diff --git a/dashboards/app/style.css b/dashboards/app/style.css new file mode 100644 index 0000000..3fb0486 --- /dev/null +++ b/dashboards/app/style.css @@ -0,0 +1,13 @@ +@import url('https://fonts.googleapis.com/css2?family=Montserrat:wght@500;700&display=swap'); + +/* GLOBAL FONT CHANGE */ +html, body, [class*="css"] { + font-family: 'Montserrat', sans-serif; +} + + +/* Sidebar color change */ +[data-testid="stSidebar"] { + background-color: #003463 !important; + color: #FFFFFF !important; +} From 544c91d8393799bf33009653bf910eeaa143213a Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Sat, 20 Apr 2024 13:10:25 -0400 Subject: [PATCH 107/147] =?UTF-8?q?[kb]=20=F0=9F=9A=9A=20Move=20creds=20to?= =?UTF-8?q?=20app?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/.credentials-dev.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 dashboards/app/.credentials-dev.yml diff --git a/dashboards/app/.credentials-dev.yml b/dashboards/app/.credentials-dev.yml new file mode 100644 index 0000000..716cedd --- /dev/null +++ b/dashboards/app/.credentials-dev.yml @@ -0,0 +1,14 @@ +cookie: + expiry_days: 30 + key: some_signature_key + name: some_cookie_name +credentials: + usernames: + test: + email: test@test.com + logged_in: false + name: test + password: $2b$12$fR4sp7tIG.dbeusbr695MOw/xvN1sf.21rML7t7j9pCdIVREIocUO +pre-authorized: + emails: + - test@test.com From 23febca74eb91e9394dc88ccfc347082b62236e6 Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Wed, 24 Apr 2024 18:05:22 -0400 Subject: [PATCH 108/147] =?UTF-8?q?[kb]=20=F0=9F=9A=91=20Add=20authenticat?= =?UTF-8?q?ion?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/requirements.txt | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index 134d136..54ac0f9 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -1,8 +1,13 @@ pandas==2.0.3 +numpy==1.26 geopandas==0.14.3 folium==0.16.0 duckdb==0.10.0 streamlit==1.32.2 +openpyxl==3.1.2 streamlit-folium==0.19.1 plotly==5.19.0 -openpyxl==3.1.2 \ No newline at end of file +streamlit-dynamic-filters==0.1.6 +streamlit-authenticator==0.3.2 +st-pages==0.4.5 +babel==2.11.0 From a6e200b0c22f04fa581cc3de1897c71aceed3800 Mon Sep 17 00:00:00 2001 From: Floriane Duccini Date: Thu, 25 Apr 2024 00:47:32 +0200 Subject: [PATCH 109/147] taking into account all territory's structures --- dashboards/app/home.py | 2 + dashboards/app/pages/register.py | 47 +++++++++++++++++++++ dashboards/app/pages/structures.py | 66 ++++++++++++++++++++++-------- 3 files changed, 99 insertions(+), 16 deletions(-) create mode 100644 dashboards/app/pages/register.py diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 3ab284d..406825f 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -86,6 +86,7 @@ def load_structures() -> pd.DataFrame: # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) df["DEP_CODE_NOM"] = df["dep"] + " - " + df["departement"] df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["COMMUNE"] + df.columns = [c.upper() for c in df.columns] return df # Appel des fonctions pour charger les données @@ -167,6 +168,7 @@ def load_structures() -> pd.DataFrame: df_structures[colonne_filtre] == select_collectivite ] st.session_state["structures_filtre"] = df_structures_filtre + st.session_state["structures"] = df_structures # Filtrer et enregistrer le dataframe nb_dechets dans session.State # Récuperer la liste des relevés diff --git a/dashboards/app/pages/register.py b/dashboards/app/pages/register.py new file mode 100644 index 0000000..be54cb4 --- /dev/null +++ b/dashboards/app/pages/register.py @@ -0,0 +1,47 @@ +from pathlib import Path +import yaml +from yaml.loader import SafeLoader +import streamlit as st +import streamlit_authenticator as stauth + +st.markdown( + """ +# Bienvenue 👋 +#### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! +""", +) + +p_cred = Path(".credentials.yml") +with p_cred.open() as file: + config = yaml.load(file, Loader=SafeLoader) + +authenticator = stauth.Authenticate( + config["credentials"], + config["cookie"]["name"], + config["cookie"]["key"], + config["cookie"]["expiry_days"], + config["pre-authorized"], +) + +try: + ( + email_of_registered_user, + username_of_registered_user, + name_of_registered_user, + ) = authenticator.register_user( + pre_authorization=False, + fields={ + "Form name": "S'enregistrer", + "Email": "Email", + "Username": "Identifiant", + "Password": "Mot de passe", + "Repeat password": "Répeter le mot de passe", + "Register": "S'enregistrer", + }, + ) + if email_of_registered_user: + with open(".credentials.yml", "w") as file: + yaml.dump(config, file, default_flow_style=False) + st.success("Utilisateur enregistré") +except Exception as e: + st.error(e) diff --git a/dashboards/app/pages/structures.py b/dashboards/app/pages/structures.py index 1be34b6..a974526 100644 --- a/dashboards/app/pages/structures.py +++ b/dashboards/app/pages/structures.py @@ -63,6 +63,7 @@ else: st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") +# On constitue la table qu'on va afficher en bas en combinant la table des structures et des relevés dict_agg_df_releves = {"DATE": "max", "ID_RELEVE": "count"} df_releve_structure = ( df_releves.groupby(["ID_STRUCTURE"]).agg(dict_agg_df_releves).reset_index() @@ -75,25 +76,52 @@ df_structures = df_structures.merge( df_releve_structure, how="left", left_on="ID_STRUCT", right_on="ID_STRUCTURE" ) -df_structures["Nombre de relevés"].fillna(0, inplace=True) -df_structures["Date dernière collecte"].fillna(" ", inplace=True) +structures_releves = [ + c + for c in list(df_releve_structure["ID_STRUCTURE"].unique()) + if c not in list(df_structures["ID_STRUCT"].unique()) +] + +df_structures_releves = df_structures_full.merge( + df_releve_structure[df_releve_structure["ID_STRUCTURE"].isin(structures_releves)], + how="right", + left_on="ID_STRUCT", + right_on="ID_STRUCTURE", +) +df_structures_territoire = pd.concat([df_structures, df_structures_releves]) +df_structures_territoire["Nombre de relevés"].fillna(0, inplace=True) +df_structures_territoire["Date dernière collecte"].fillna(" ", inplace=True) + # Ligne 1 : 2 cellules avec les indicateurs clés en haut de page -l1_col1, l1_col2 = st.columns(2) +l1_col1, l1_col2, l1_col3 = st.columns(3) # Pour avoir une bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) # 1ère métrique : nombre d'acteurs cell1 = l1_col1.container(border=True) -nb_acteurs = len(df_structures) +nb_acteurs = len(df_structures_territoire) # Trick pour séparer les milliers -cell1.metric("Acteurs présents sur le territoire", nb_acteurs) +cell1.metric("Acteurs* du territoire", nb_acteurs) -# 2ème métrique : nb de spots adoptés +# 2ème métrique : nombre d'acteurs actifs cell2 = l1_col2.container(border=True) -nb_spots_adoptes = df_structures["A1S_NB_SPO"].sum() -cell2.metric("Spots adoptés", nb_spots_adoptes) +nb_acteurs_actifs = len( + df_structures_territoire[df_structures_territoire["Nombre de relevés"] > 0] +) +# Trick pour séparer les milliers +cell2.metric("Acteurs ayant été actifs sur le territoire", nb_acteurs_actifs) + +# 3ème métrique : nb de spots adoptés +cell3 = l1_col3.container(border=True) +nb_spots_adoptes = df_structures_territoire["A1S_NB_SPO"].sum() +cell3.metric("Spots adoptés par les acteurs du territoire", int(nb_spots_adoptes)) + +st.markdown( + """*Acteurs * : acteurs dont l'adresse se trouve sur le territoire ou ayant réalisé un ramassage sur le territoire.* +""" +) # Ligne 2 : 2 graphiques en ligne : carte et pie chart type de structures @@ -107,7 +135,7 @@ df_aggType = duckdb.query( ( "SELECT TYPE, count(TYPE) AS nb_structures " - "FROM df_structures " + "FROM df_structures_territoire " "GROUP BY TYPE " "ORDER BY nb_structures DESC;" ) @@ -142,6 +170,12 @@ with st.container(): st.markdown(""" **Cartographie des structures du territoire**""") + st.markdown( + """ + *Ne sont représentés ici que les acteurs dont l'adresse se trouve sur le territoire.* + """ + ) + # Création de la carte centrée autour d'une localisation # # Initialisation du zoom sur la carte # if filtre_niveau == "Commune": @@ -159,8 +193,8 @@ # min_lon = df_structures["longitude"].min() # max_lon = df_structures["longitude"].max() - sw = df_structures[["latitude", "longitude"]].min().values.tolist() - ne = df_structures[["latitude", "longitude"]].max().values.tolist() + sw = df_structures[["LATITUDE", "LONGITUDE"]].min().values.tolist() + ne = df_structures[["LATITUDE", "LONGITUDE"]].max().values.tolist() map_data = folium.Map( # zoom_start=zoom_admin, @@ -177,11 +211,11 @@ # Application d'une limite minimale pour le rayon si nécessaire folium.Marker( - location=(row["latitude"], row["longitude"]), + location=(row["LATITUDE"], row["LONGITUDE"]), color="#3186cc", icon=folium.Icon(color="blue"), popup=folium.Popup( - f"{row['NOM_structure']}\n ({row['COMMUNE']})", max_width=100 + f"{row['NOM_STRUCTURE']}\n ({row['COMMUNE']})", max_width=100 ), ).add_to(map_data) @@ -207,12 +241,12 @@ def split_frame(input_df, rows): df_struct_simplifie = duckdb.query( ( """SELECT - NOM_structure as Nom, + NOM_STRUCTURE as Nom, TYPE as Type, "Nombre de relevés", A1S_NB_SPO as 'Nombre de spots adoptés', "Date dernière collecte" - FROM df_structures + FROM df_structures_territoire ORDER BY Nom DESC;""" ) ).to_df() @@ -228,7 +262,7 @@ def split_frame(input_df, rows): ], ) with top_menu[1]: - sort_direction = st.radio("Direction", options=["⬇️", "⬆️"], horizontal=True) + sort_direction = st.radio("Ordre", options=["⬇️", "⬆️"], horizontal=True) df_struct_simplifie = df_struct_simplifie.sort_values( by=sort_field, ascending=sort_direction == "⬆️", ignore_index=True ) From 8bec20192ea23dbbe9cdac59502e7b0e8d982f84 Mon Sep 17 00:00:00 2001 From: Kyllian Beguin <50613619+KyllianBeguin@users.noreply.github.com> Date: Thu, 25 Apr 2024 08:21:52 +0200 Subject: [PATCH 110/147] =?UTF-8?q?[kb]=20=F0=9F=94=A5=20Delete=20.credent?= =?UTF-8?q?ials.yml?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/.credentials.yml | 14 -------------- 1 file changed, 14 deletions(-) delete mode 100644 dashboards/app/.credentials.yml diff --git a/dashboards/app/.credentials.yml b/dashboards/app/.credentials.yml deleted file mode 100644 index d66d7ae..0000000 --- a/dashboards/app/.credentials.yml +++ /dev/null @@ -1,14 +0,0 @@ -cookie: - expiry_days: 30 - key: some_signature_key - name: some_cookie_name -credentials: - usernames: - test: - email: test@test.com - logged_in: false - name: test - password: $2b$12$fR4sp7tIG.dbeusbr695MOw/xvN1sf.21rML7t7j9pCdIVREIocUO -pre-authorized: - emails: - - test@test.com \ No newline at end of file From 0246942fd57ef31f3ee1bb18998b9f33fe0a5bcd Mon Sep 17 00:00:00 2001 From: linh dinh Date: Thu, 25 Apr 2024 11:01:28 +0200 Subject: [PATCH 111/147] Debug probleme zoom --- dashboards/app/pages/hotspots.py | 135 ++++++++++++++++--------------- 1 file changed, 70 insertions(+), 65 deletions(-) diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index f694436..6c4ae3f 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -438,72 +438,73 @@ def update_lieu_options(selected_milieu): return ["Sélectionnez un lieu..."] + list(filtered_data['TYPE_LIEU2'].dropna().unique()) return ["Sélectionnez un lieu..."] -# Function to plot a density map -def plot_density_map( - data_zds: pd.DataFrame, - filtered_data: pd.DataFrame -) -> folium.Map: +@st.cache_data +def process_data(data_zds): + # Filtering data to ensure surface area is not zero + data_zds = data_zds[data_zds['SURFACE'] > 0] + # Calculating density and filtering out anomalous values + data_zds['DENSITE'] = data_zds['VOLUME_TOTAL'] / data_zds['SURFACE'] + data_zds = data_zds[data_zds['DENSITE'] < 20] + # Rounding values for better display + data_zds['DENSITE'] = data_zds['DENSITE'].round(4) + data_zds['SURFACE_ROND'] = data_zds['SURFACE'].round(2) + return data_zds + +#Zoom from admin level +if NIVEAU_ADMIN == "Commune": + zoom_admin = 12 +elif NIVEAU_ADMIN == "EPCI": + zoom_admin = 13 +elif NIVEAU_ADMIN == "Département": + zoom_admin = 10 +else: + zoom_admin = 8 +# Function to plot a density map +def plot_density_map(data_zds: pd.DataFrame, filtered_data: pd.DataFrame) -> folium.Map: # Check if the primary dataset is empty if data_zds.empty: st.write("Aucune donnée disponible pour la région sélectionnée.") - # Initialize a basic map without any data-specific layers - m = folium.Map(location=[46.6358, 2.5614], zoom_start=5) + return folium.Map(location=[46.6358, 2.5614], zoom_start=5) else: - # Use filtered data if available; otherwise, use the full dataset - if filtered_data.empty: - map_data = data_zds - else: - map_data = filtered_data + # Use processed data + processed_data = process_data(filtered_data if not filtered_data.empty else data_zds) + + m = folium.Map( + location=[ + processed_data['LIEU_COORD_GPS_Y'].mean(), + processed_data['LIEU_COORD_GPS_X'].mean() + ], + zoom_start=zoom_admin + ) - # Ensure the surface area is not zero to avoid division by zero - map_data = map_data[map_data['SURFACE'] > 0] + # Loop over each row in the DataFrame to place markers + for index, row in processed_data.iterrows(): + popup_html = f""" +
+

Densité: {row['DENSITE']} L/m²

+

Volume total : {row['VOLUME_TOTAL']} litres

+

Surface total : {row['SURFACE_ROND']} m²

+

Type de milieu : {row['TYPE_MILIEU']}

+

Type de lieu : {row['TYPE_LIEU']}

+
+ """ + lgd_txt = '{txt}' + color = couleur_milieu(row['TYPE_MILIEU']) + folium.CircleMarker( + fg = folium.FeatureGroup(name= lgd_txt.format( txt= ['TYPE_MILIEU'], col= color)), + location=[row['LIEU_COORD_GPS_Y'], row['LIEU_COORD_GPS_X']], + radius=np.log(row['DENSITE'] + 1)*15, + popup=folium.Popup(popup_html, max_width=300), + color=color, + fill=True, - # Calculate density - map_data["DENSITE"] = map_data["VOLUME_TOTAL"] / map_data["SURFACE"] - map_data = map_data[map_data["DENSITE"] < 20] # Remove rows with anomalously high density values - - # Round density values for display - map_data["DENSITE"] = map_data["DENSITE"].round(4) - # Round surface values for display - map_data["SURFACE_ROND"] = map_data["SURFACE"].round(2) - - # Initialize a map centered at the mean coordinates of locations - if not map_data[['LIEU_COORD_GPS_Y', 'LIEU_COORD_GPS_X']].dropna().empty: - m = folium.Map( - location=[ - map_data["LIEU_COORD_GPS_Y"].mean(), - map_data["LIEU_COORD_GPS_X"].mean(), - ], - zoom_start=12 - ) + ).add_to(m) - # Loop over each row in the DataFrame to place markers - for _, row in map_data.iterrows(): - if pd.notna(row['LIEU_COORD_GPS_Y']) and pd.notna(row['LIEU_COORD_GPS_X']): - popup_html = f""" -
-

Densité: {row['DENSITE']} L/m²

-

Volume total : {row['VOLUME_TOTAL']} litres

-

Surface total : {row['SURFACE_ROND']} m²

-

Type de milieu : {row['TYPE_MILIEU']}

-

Type de lieu : {row['TYPE_LIEU']}

-
- """ - color = couleur_milieu(row["TYPE_MILIEU"]) - folium.CircleMarker( - location=[row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]], - radius=np.log(row["DENSITE"] + 1) * 15, - popup=folium.Popup(popup_html, max_width=300), - color=color, - fill=True, - ).add_to(m) - - # Display the map in Streamlit - st_folium(m, width='100%', height=600) # Adjust width and height as needed - else: - st.write("Aucune donnée de localisation valide à afficher sur la carte.") + folium_static(m) + + return m # Function for 'milieu' density table def density_table_milieu( @@ -772,14 +773,18 @@ def create_contributors_table(data_zds: pd.DataFrame, multi_filter_dict: dict) - lieu_options = update_lieu_options(selected_milieu) selected_lieu = st.selectbox("Sélectionnez un lieu:", lieu_options) - st.markdown("### Carte des Densités") - # Automatically update the map based on the current selection - if selected_milieu != "Sélectionnez un milieu..." and selected_lieu != "Sélectionnez un lieu...": - filtered_data = data_zds_correct[(data_zds_correct['TYPE_MILIEU'] == selected_milieu) & (data_zds_correct['TYPE_LIEU2'] == selected_lieu)] - plot_density_map(data_zds_correct, filtered_data) - else: - # Optionally show the map with all data or display a message - plot_density_map(data_zds_correct, data_zds_correct) # Show all data by default + + # Place the map centrally by using a wider column for the map and narrower ones on the sides + col1, map_col, col3 = st.columns([4, 10, 1]) # Adjust column ratios as needed + + with map_col: + st.markdown("### Carte des Densités") + if selected_milieu != "Sélectionnez un milieu..." and selected_lieu != "Sélectionnez un lieu...": + filtered_data = data_zds_correct[(data_zds_correct['TYPE_MILIEU'] == selected_milieu) & (data_zds_correct['TYPE_LIEU2'] == selected_lieu)] + plot_density_map(data_zds_correct, filtered_data) + else: + plot_density_map(data_zds_correct, data_zds_correct) # Show all data by default + col1, col2, col3 = st.columns([3, 3, 2]) From fb186a5f9ec6d221be17b59d9dccdbfb170824f9 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 22 May 2024 12:22:54 +0200 Subject: [PATCH 112/147] =?UTF-8?q?[tg]=20-=20valeurs=20par=20d=C3=A9faut?= =?UTF-8?q?=20des=20filtres=20et=20optimisation=20du=20layout=20des=20filt?= =?UTF-8?q?res?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 251 ++++++++++++++++++----------------- pyproject.toml | 3 - 2 files changed, 130 insertions(+), 124 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 21eb522..600081f 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -307,68 +307,84 @@ def frenchify(x: int) -> str: st.plotly_chart(fig3, use_container_width=True) # Ligne 3 : Graphe par milieu , lieu et année - st.write("**Détail par milieu, lieu ou année**") + st.write("**Filtrer les données par année, type de milieu ou type de lieu**") # Étape 1: Création des filtres df_other_metrics = df_other_metrics_raw.copy() df_other_metrics = df_other_metrics.fillna(0) - selected_annee = st.selectbox( - "Choisir une année:", - options=["Aucune sélection"] + annee_liste, - ) - if selected_annee != "Aucune sélection": - filtered_data_milieu = df_other[df_other["ANNEE"] == selected_annee].copy() - filtered_metrics_milieu = df_other_metrics[ - df_other_metrics["ANNEE"] == selected_annee - ].copy() - else: - filtered_data_milieu = df_other.copy() - filtered_metrics_milieu = df_other_metrics.copy() + with st.expander("Filtrer par année, type milieu ou type de lieu"): - selected_type_milieu = st.selectbox( - "Choisir un type de milieu:", - options=["Aucune sélection"] - + list(filtered_data_milieu["TYPE_MILIEU"].unique()), - ) + # Filtre par Année + # Valeur par défaut sous forme de liste pour concaténation avec données + valeur_par_defaut_annee = "Toute la période" - if selected_type_milieu != "Aucune sélection": - filtered_data_lieu = filtered_data_milieu[ - filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu - ] - filtered_metrics_milieu = filtered_metrics_milieu[ - filtered_metrics_milieu["TYPE_MILIEU"] == selected_type_milieu - ] - else: - filtered_data_lieu = filtered_data_milieu.copy() - filtered_metrics_milieu = df_other_metrics.copy() + selected_annee = st.selectbox( + "Choisir une année:", + options=[valeur_par_defaut_annee] + annee_liste, + ) + if selected_annee != valeur_par_defaut_annee: + filtered_data_milieu = df_other[ + df_other["ANNEE"] == selected_annee + ].copy() + filtered_metrics_milieu = df_other_metrics[ + df_other_metrics["ANNEE"] == selected_annee + ].copy() + else: + filtered_data_milieu = df_other.copy() + filtered_metrics_milieu = df_other_metrics.copy() - selected_type_lieu = st.selectbox( - "Choisir un type de lieu:", - options=["Aucune sélection"] - + list(filtered_data_lieu["TYPE_LIEU"].unique()), - ) + # Filtre par milieu + + valeur_par_defaut_milieu = "Tous les milieux" + + selected_type_milieu = st.selectbox( + "Choisir un type de milieu:", + options=[valeur_par_defaut_milieu] + + list(filtered_data_milieu["TYPE_MILIEU"].unique()), + ) + + if selected_type_milieu != valeur_par_defaut_milieu: + filtered_data_lieu = filtered_data_milieu[ + filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu + ] + filtered_metrics_milieu = filtered_metrics_milieu[ + filtered_metrics_milieu["TYPE_MILIEU"] == selected_type_milieu + ] + else: + filtered_data_lieu = filtered_data_milieu.copy() + filtered_metrics_milieu = df_other_metrics.copy() + + # Filtre par type de lieu + + valeur_par_defaut_lieu = "Tous les lieux" + + selected_type_lieu = st.selectbox( + "Choisir un type de lieu:", + options=[valeur_par_defaut_lieu] + + list(filtered_data_lieu["TYPE_LIEU"].unique()), + ) if ( - selected_annee == "Aucune sélection" - and selected_type_milieu == "Aucune sélection" - and selected_type_lieu == "Aucune sélection" + selected_annee == valeur_par_defaut_annee + and selected_type_milieu == valeur_par_defaut_milieu + and selected_type_lieu == valeur_par_defaut_lieu ): df_filtered = df_other.copy() df_filtered_metrics = df_other_metrics_raw.copy() elif ( - selected_type_milieu == "Aucune sélection" - and selected_type_lieu == "Aucune sélection" + selected_type_milieu == valeur_par_defaut_milieu + and selected_type_lieu == valeur_par_defaut_lieu ): df_filtered = df_other[df_other["ANNEE"] == selected_annee].copy() df_filtered_metrics = df_other_metrics_raw[ df_other_metrics["ANNEE"] == selected_annee ].copy() elif ( - selected_annee == "Aucune sélection" - and selected_type_lieu == "Aucune sélection" - and selected_type_milieu != "Aucune sélection" + selected_annee == valeur_par_defaut_annee + and selected_type_lieu == valeur_par_defaut_lieu + and selected_type_milieu != valeur_par_defaut_milieu ): df_filtered = df_other[ df_other["TYPE_MILIEU"] == selected_type_milieu @@ -378,9 +394,9 @@ def frenchify(x: int) -> str: ].copy() elif ( - selected_annee == "Aucune sélection" - and selected_type_lieu != "Aucune sélection" - and selected_type_milieu == "Aucune sélection" + selected_annee == valeur_par_defaut_annee + and selected_type_lieu != valeur_par_defaut_lieu + and selected_type_milieu == valeur_par_defaut_milieu ): df_filtered = df_other[df_other["TYPE_LIEU"] == selected_type_lieu].copy() df_filtered_metrics = df_other_metrics_raw[ @@ -388,9 +404,9 @@ def frenchify(x: int) -> str: ].copy() elif ( - selected_annee == "Aucune sélection" - and selected_type_lieu != "Aucune sélection" - and selected_type_milieu != "Aucune sélection" + selected_annee == valeur_par_defaut_annee + and selected_type_lieu != valeur_par_defaut_lieu + and selected_type_milieu != valeur_par_defaut_milieu ): df_filtered = df_other[ (df_other["TYPE_LIEU"] == selected_type_lieu) @@ -401,9 +417,9 @@ def frenchify(x: int) -> str: & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) ] elif ( - selected_annee != "Aucune sélection" - and selected_type_lieu != "Aucune sélection" - and selected_type_milieu == "Aucune sélection" + selected_annee != valeur_par_defaut_annee + and selected_type_lieu != valeur_par_defaut_lieu + and selected_type_milieu == valeur_par_defaut_milieu ): df_filtered = df_other[ (df_other["ANNEE"] == selected_annee) @@ -414,9 +430,9 @@ def frenchify(x: int) -> str: & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) ] elif ( - selected_annee != "Aucune sélection" - and selected_type_lieu == "Aucune sélection" - and selected_type_milieu != "Aucune sélection" + selected_annee != valeur_par_defaut_annee + and selected_type_lieu == valeur_par_defaut_lieu + and selected_type_milieu != valeur_par_defaut_milieu ): df_filtered = df_other[ (df_other["ANNEE"] == selected_annee) @@ -458,17 +474,10 @@ def frenchify(x: int) -> str: cell8.metric("Nombre de collectes", frenchify(nombre_collectes_filtered)) # Message d'avertissement nb de collectes en dessous de 5 - if len(df_filtered) == 1: - st.warning( - "⚠️ Il n'y a qu' " - + str(len(df_filtered)) - + " collecte considérées dans les données présentées." - ) - elif len(df_filtered) <= 5: + if len(df_filtered) <= 5: st.warning( - "⚠️ Il n'y a que " + "⚠️ Faible nombre de collectes disponibles dans la base de données : " + str(len(df_filtered)) - + " collectes considérées dans les données présentées." ) # Étape 3: Preparation dataframe pour graphe @@ -570,17 +579,11 @@ def frenchify(x: int) -> str: cell3.metric("Nombre de collectes comptabilisées", frenchify(nb_collectes_int)) # Message d'avertissement nb de collectes en dessous de 5 - if nb_collectes_int == 1: - st.warning( - "⚠️ Il n'y a qu' " - + str(nb_collectes_int) - + " collecte considérées dans les données présentées." - ) - elif nb_collectes_int <= 5: + if nb_collectes_int <= 5: st.warning( - "⚠️ Il n'y a que " + "⚠️ Le nombre de collectes " + str(nb_collectes_int) - + " collectes considérées dans les données présentées." + + " est trop faible pour l'analyse." ) # Ligne 2 : graphique top déchets @@ -729,95 +732,101 @@ def frenchify(x: int) -> str: df_filtre_copy = df_other.copy() # Étape 1: Création des filtres - selected_annee_onglet_3 = st.selectbox( - "Choisir une année:", - options=["Aucune sélection"] + annee_liste, - key="année_select", - ) - if selected_annee_onglet_3 != "Aucune sélection": - filtered_data_milieu = df_other[ - df_other["ANNEE"] == selected_annee_onglet_3 - ] - else: - filtered_data_milieu = df_other.copy() - selected_type_milieu_onglet_3 = st.selectbox( - "Choisir un type de milieu:", - options=["Aucune sélection"] - + list(filtered_data_milieu["TYPE_MILIEU"].unique()), - key="type_milieu_select", - ) + with st.expander("Filtrer par année, type milieu ou type de lieu"): - if selected_type_milieu_onglet_3 != "Aucune sélection": - filtered_data_lieu = filtered_data_milieu[ - filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu_onglet_3 - ] - else: - filtered_data_lieu = filtered_data_milieu + # Filtre par année + selected_annee_onglet_3 = st.selectbox( + "Choisir une année:", + options=[valeur_par_defaut_annee] + annee_liste, + key="année_select", + ) + if selected_annee_onglet_3 != valeur_par_defaut_annee: + filtered_data_milieu = df_other[ + df_other["ANNEE"] == selected_annee_onglet_3 + ] + else: + filtered_data_milieu = df_other.copy() + + # Filtre par type de milieu + selected_type_milieu_onglet_3 = st.selectbox( + "Choisir un type de milieu:", + options=[valeur_par_defaut_milieu] + + list(filtered_data_milieu["TYPE_MILIEU"].unique()), + key="type_milieu_select", + ) - selected_type_lieu_onglet_3 = st.selectbox( - "Choisir un type de lieu:", - options=["Aucune sélection"] - + list(filtered_data_lieu["TYPE_LIEU"].unique()), - key="type_lieu_select", - ) + if selected_type_milieu_onglet_3 != valeur_par_defaut_milieu: + filtered_data_lieu = filtered_data_milieu[ + filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu_onglet_3 + ] + else: + filtered_data_lieu = filtered_data_milieu + + # Filtre par lieu + selected_type_lieu_onglet_3 = st.selectbox( + "Choisir un type de lieu:", + options=[valeur_par_defaut_lieu] + + list(filtered_data_lieu["TYPE_LIEU"].unique()), + key="type_lieu_select", + ) if ( - selected_annee_onglet_3 == "Aucune sélection" - and selected_type_milieu_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" + selected_annee_onglet_3 == valeur_par_defaut_annee + and selected_type_milieu_onglet_3 == valeur_par_defaut_milieu + and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu ): df_filtered = df_other.copy() elif ( - selected_type_milieu_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" + selected_type_milieu_onglet_3 == valeur_par_defaut_milieu + and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu ): df_filtered = df_other[df_other["ANNEE"] == selected_annee_onglet_3].copy() elif ( - selected_annee_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" - and selected_type_milieu_onglet_3 != "Aucune sélection" + selected_annee_onglet_3 == valeur_par_defaut_annee + and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu + and selected_type_milieu_onglet_3 != valeur_par_defaut_milieu ): df_filtered = df_other[ df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3 ].copy() elif ( - selected_annee_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 != "Aucune sélection" - and selected_type_milieu_onglet_3 == "Aucune sélection" + selected_annee_onglet_3 == valeur_par_defaut_annee + and selected_type_lieu_onglet_3 != valeur_par_defaut_lieu + and selected_type_milieu_onglet_3 == valeur_par_defaut_milieu ): df_filtered = df_other[ df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3 ].copy() elif ( - selected_annee_onglet_3 == "Aucune sélection" - and selected_type_lieu_onglet_3 != "Aucune sélection" - and selected_type_milieu_onglet_3 != "Aucune sélection" + selected_annee_onglet_3 == valeur_par_defaut_annee + and selected_type_lieu_onglet_3 != valeur_par_defaut_lieu + and selected_type_milieu_onglet_3 != valeur_par_defaut_milieu ): df_filtered = df_other[ (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) ].copy() elif ( - selected_annee_onglet_3 != "Aucune sélection" - and selected_type_lieu_onglet_3 != "Aucune sélection" - and selected_type_milieu_onglet_3 == "Aucune sélection" + selected_annee_onglet_3 != valeur_par_defaut_annee + and selected_type_lieu_onglet_3 != valeur_par_defaut_lieu + and selected_type_milieu_onglet_3 == valeur_par_defaut_milieu ): df_filtered = df_other[ (df_other["ANNEE"] == selected_annee_onglet_3) & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) ].copy() elif ( - selected_annee_onglet_3 != "Aucune sélection" - and selected_type_lieu_onglet_3 == "Aucune sélection" - and selected_type_milieu_onglet_3 != "Aucune sélection" + selected_annee_onglet_3 != valeur_par_defaut_annee + and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu + and selected_type_milieu_onglet_3 != valeur_par_defaut_milieu ): df_filtered = df_other[ (df_other["ANNEE"] == selected_annee_onglet_3) & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) ].copy() - elif selected_type_lieu_onglet_3 == "Aucune sélection": + elif selected_type_lieu_onglet_3 == valeur_par_defaut_lieu: df_filtered = df_other[ (df_other["ANNEE"] == selected_annee_onglet_3) & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) diff --git a/pyproject.toml b/pyproject.toml index 7d1d9cb..dfbe1dd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,14 +17,11 @@ python = "^3.10" # jupyter = "^1.0.0" # ipykernel = "^5.3.4" pandas = "^2.2.1" -dash = "^2.16.1" duckdb = "^0.10.1" geopandas = "^0.14.3" folium = "^0.16.0" -folium = "^0.16.0" streamlit = "^1.32.2" plotly-express = "^0.4.1" -streamlit-dynamic-filters = "^0.1.6" [tool.poetry.group.dev.dependencies] pre-commit = "^2.20.0" From 2ad8ba5d8ca340b4d9a0588128cda292063ffa51 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 22 May 2024 12:58:05 +0200 Subject: [PATCH 113/147] [tg] - convert liters to m3 --- dashboards/app/pages/data.py | 171 ++++++++++++++++++----------------- 1 file changed, 90 insertions(+), 81 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 600081f..c533ff7 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -109,10 +109,11 @@ def frenchify(x: int) -> str: df_volume = df_other.copy() # Calcul des indicateurs clés de haut de tableau avant transformation - volume_total = df_volume["VOLUME_TOTAL"].sum() + # Volume en litres dans la base, converti en m3 + volume_total_m3 = df_volume["VOLUME_TOTAL"].sum() / 1000 poids_total = df_volume["POIDS_TOTAL"].sum() - volume_total_categorise = df_volume[cols_volume].sum().sum() - pct_volume_categorise = volume_total_categorise / volume_total + volume_total_categorise_m3 = df_volume[cols_volume].sum().sum() / 1000 + pct_volume_categorise = volume_total_categorise_m3 / volume_total_m3 nb_collectes_int = len(df_volume) # estimation du poids categorisée en utilisant pct_volume_categorise @@ -168,7 +169,7 @@ def frenchify(x: int) -> str: # 1ère métrique : volume total de déchets collectés cell1 = l1_col1.container(border=True) # Trick pour séparer les milliers - cell1.metric("Volume de déchets collectés", frenchify(volume_total) + " litres") + cell1.metric("Volume de déchets collectés", frenchify(volume_total_m3) + " m³") # 2ème métrique : poids cell2 = l1_col2.container(border=True) @@ -176,7 +177,7 @@ def frenchify(x: int) -> str: # 3ème métrique : nombre de relevés cell3 = l1_col3.container(border=True) - cell3.metric("Nombre de collectes comptabilisées", frenchify(nb_collectes_int)) + cell3.metric("Nombre de collectes", frenchify(nb_collectes_int)) # Message d'avertissement nb de collectes en dessous de 5 if nb_collectes_int == 1: @@ -194,70 +195,72 @@ def frenchify(x: int) -> str: # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux - l2_col1, l2_col2 = st.columns(2) - cell4 = l2_col1.container(border=True) - cell5 = l2_col2.container(border=True) - with cell4: - - # Création du diagramme en donut en utilisant le dictionnaire de couleurs pour la correspondance - fig = px.pie( - df_totals_sorted, - values="Volume", - names="Matériau", - title="Répartition des matériaux en volume", - hole=0.4, - color="Matériau", - color_discrete_map=colors_map, - ) + with st.container(border=True): - # Réglage du texte affiché, format et taille de police - fig.update_traces( - textinfo="percent", - texttemplate="%{percent:.0%}", - textfont_size=14, - ) - fig.update_layout(autosize=True, legend_title_text="Matériau") - - # Affichage du graphique - st.plotly_chart(fig, use_container_width=True) - - with cell5: - # Création du graphique en barres avec Plotly Express - fig2 = px.bar( - df_totals_sorted, - x="Matériau", - y="Volume", - text="Volume", - title="Volume total par materiau (en litres)", - color="Matériau", - color_discrete_map=colors_map, - ) + cell4, cell5 = st.columns(2) + + with cell4: + + # Création du diagramme en donut en utilisant le dictionnaire de couleurs pour la correspondance + fig = px.pie( + df_totals_sorted, + values="Volume", + names="Matériau", + title="Répartition des matériaux en volume", + hole=0.4, + color="Matériau", + color_discrete_map=colors_map, + ) + + # Réglage du texte affiché, format et taille de police + fig.update_traces( + textinfo="percent", + texttemplate="%{percent:.0%}", + textfont_size=14, + ) + fig.update_layout(autosize=True, legend_title_text="Matériau") + + # Affichage du graphique + st.plotly_chart(fig, use_container_width=True) + + with cell5: + # Conversion des volumes en m3 + df_totals_sorted["Volume_m3"] = df_totals_sorted["Volume"] / 1000 + # Création du graphique en barres avec Plotly Express + fig2 = px.bar( + df_totals_sorted, + x="Matériau", + y="Volume_m3", + text="Volume_m3", + title="Volume total par materiau (en m³)", + color="Matériau", + color_discrete_map=colors_map, + ) + + # Amélioration du graphique + fig2.update_traces( + texttemplate="%{text:.2s}", + textposition="inside", + textfont_size=14, + ) + fig2.update_layout( + autosize=True, + # uniformtext_minsize=8, + uniformtext_mode="hide", + xaxis_tickangle=-45, + showlegend=False, + yaxis_showgrid=False, + xaxis_title=None, + yaxis_title=None, + ) + + # Affichage du graphique + st.plotly_chart(fig2, use_container_width=True) - # Amélioration du graphique - fig2.update_traces( - texttemplate="%{text:.2s}", - textposition="inside", - textfont_size=14, - ) - fig2.update_layout( - autosize=True, - # uniformtext_minsize=8, - uniformtext_mode="hide", - xaxis_tickangle=-45, - showlegend=False, - yaxis_showgrid=False, - xaxis_title=None, - yaxis_title=None, + st.caption( + f"NB : Ces données prennent en compte uniquement les déchets dont le matériau a été identifié, soit {pct_volume_categorise:.0%} du volume total." ) - # Affichage du graphique - st.plotly_chart(fig2, use_container_width=True) - - st.write("") - st.caption( - f"Note : Cette analyse se base sur les déchets qui ont pu être classés par matériau : {volume_total_categorise:.0f} Litres, soit {pct_volume_categorise:.0%} du volume total collecté." - ) - # Ligne 3 : Graphe par milieu de collecte # Grouper par année et type de matériau @@ -306,6 +309,10 @@ def frenchify(x: int) -> str: with st.container(border=True): st.plotly_chart(fig3, use_container_width=True) + st.caption( + f"NB : Ces données prennent en compte uniquement les déchets dont le matériau a été identifié, soit {pct_volume_categorise:.0%} du volume total." + ) + # Ligne 3 : Graphe par milieu , lieu et année st.write("**Filtrer les données par année, type de milieu ou type de lieu**") @@ -462,10 +469,11 @@ def frenchify(x: int) -> str: cell8 = l5_col3.container(border=True) poids_total_filtered = df_filtered_metrics["POIDS_TOTAL"].sum() - volume_total_filtered = df_filtered_metrics["VOLUME_TOTAL"].sum() + # Volume litres converti en m3 + volume_total_filtered_m3 = df_filtered_metrics["VOLUME_TOTAL"].sum() / 1000 cell6.metric( - "Volume de déchets collectés", frenchify(volume_total_filtered) + " litres" + "Volume de déchets collectés", frenchify(volume_total_filtered_m3) + " m³" ) cell7.metric("Poids total collecté", frenchify(poids_total_filtered) + " kg") @@ -512,20 +520,18 @@ def frenchify(x: int) -> str: "Volume" ].sum() df_totals_sorted2 = df_totals_sorted2.sort_values(["Volume"], ascending=False) - df_totals_sorted2["Volume_"] = ( - df_totals_sorted2["Volume"] - .apply(lambda x: "{0:,.0f}".format(x)) - .replace(",", " ") - ) + # Conversion litres en m + df_totals_sorted2["Volume_m3"] = df_totals_sorted2["Volume"] / 1000 # Étape 4: Création du Graphique if not df_filtered.empty: + fig4 = px.treemap( df_totals_sorted2, path=["Matériau"], - values="Volume", - title="Répartition des matériaux en volume dans le milieu ou le lieu choisi", + values="Volume_m3", + title="Répartition des matériaux en volume", color="Matériau", color_discrete_map=colors_map, ) @@ -534,14 +540,18 @@ def frenchify(x: int) -> str: ) fig4.update_traces( textinfo="label+value", - texttemplate="%{label}
%{value:.0f} litres", + texttemplate="%{label}
%{value:.1f} m³", textfont_size=16, - hovertemplate="%{label}
Volume: %{value:.0f}", + hovertemplate="Matériau : %{label}
Volume = %{value:.1f} m³", ) with st.container(border=True): st.plotly_chart(fig4, use_container_width=True) + st.caption( + f"NB : Ces données prennent en compte uniquement les déchets dont le matériau a été identifié, soit {pct_volume_categorise:.0%} du volume total." + ) + else: st.write("Aucune donnée à afficher pour les filtres sélectionnés.") @@ -564,14 +574,13 @@ def frenchify(x: int) -> str: cell1 = l1_col1.container(border=True) # Trick pour séparer les milliers - # volume_total_categorise = f"{volume_total_categorise:,.0f}".replace(",", " ") cell1.metric("Nombre de déchets catégorisés", frenchify(nb_total_dechets)) # 2ème métrique : équivalent volume catégorisé cell2 = l1_col2.container(border=True) cell2.metric( "Equivalent en volume ", - frenchify(volume_total_categorise) + " litres", + frenchify(volume_total_categorise_m3) + " m³", ) # 3ème métrique : nombre de relevés @@ -657,7 +666,7 @@ def frenchify(x: int) -> str: st.write("") st.caption( f"Note : Analyse basée sur les collectes qui ont fait l'objet d'un comptage détaillé par déchet,\ - soit {volume_total_categorise} litres équivalent à {pct_volume_categorise:.0%} du volume collecté\ + soit {volume_total_categorise_m3} m³ équivalent à {pct_volume_categorise:.0%} du volume collecté\ sur le territoire." ) with st.container(): @@ -1108,9 +1117,9 @@ def frenchify(x: int) -> str: ) figreptree.update_traces( textinfo="label+value", - texttemplate="%{label}
%{value:.0f} litres", + texttemplate="%{label}
%{value:.0f} items", textfont=dict(size=16), - hovertemplate="%{label}
Volume: %{value:.0f}", + hovertemplate="%{label}
Nombre de déchets : %{value:.0f}", ) with st.container(border=True): From 47049afcc3f48979a3a1a68ff8667fe0a1dd47e1 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 22 May 2024 13:15:31 +0200 Subject: [PATCH 114/147] =?UTF-8?q?[tg]=20-=20wording=20sous=20onglet=20ma?= =?UTF-8?q?t=C3=A9riaux?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 74 +++++++++++++++--------------------- 1 file changed, 30 insertions(+), 44 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index c533ff7..86359e7 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -177,25 +177,29 @@ def frenchify(x: int) -> str: # 3ème métrique : nombre de relevés cell3 = l1_col3.container(border=True) - cell3.metric("Nombre de collectes", frenchify(nb_collectes_int)) + cell3.metric("Nombre de ramassages", frenchify(nb_collectes_int)) # Message d'avertissement nb de collectes en dessous de 5 - if nb_collectes_int == 1: - st.warning( - "⚠️ Il n'y a qu' " - + str(nb_collectes_int) - + " collecte considérées dans les données présentées." - ) - elif nb_collectes_int <= 5: + if nb_collectes_int <= 5: st.warning( - "⚠️ Il n'y a que " + "⚠️ Faible nombre de ramassages (" + str(nb_collectes_int) - + " collectes considérées dans les données présentées." + + ") dans la base de données." ) + st.caption( + f"Note : Il n’y a pas de correspondance entre le poids et le volume global\ + de déchets indiqués car certaines organisations \ + ne renseignent que le volume sans mention de poids \ + (protocole de niveau 1) ou inversement. De plus, \ + les chiffres ci-dessous sont calculés sur XX ramassages \ + ayant fait l’objet d’une estimation des volumes \ + par matériau, soit un volume total de {volume_total_categorise_m3:.0f} m³." + ) + # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux - with st.container(border=True): + with st.container(border=False): cell4, cell5 = st.columns(2) @@ -257,10 +261,6 @@ def frenchify(x: int) -> str: # Affichage du graphique st.plotly_chart(fig2, use_container_width=True) - st.caption( - f"NB : Ces données prennent en compte uniquement les déchets dont le matériau a été identifié, soit {pct_volume_categorise:.0%} du volume total." - ) - # Ligne 3 : Graphe par milieu de collecte # Grouper par année et type de matériau @@ -306,13 +306,9 @@ def frenchify(x: int) -> str: ) # Afficher le graphique - with st.container(border=True): + with st.container(border=False): st.plotly_chart(fig3, use_container_width=True) - st.caption( - f"NB : Ces données prennent en compte uniquement les déchets dont le matériau a été identifié, soit {pct_volume_categorise:.0%} du volume total." - ) - # Ligne 3 : Graphe par milieu , lieu et année st.write("**Filtrer les données par année, type de milieu ou type de lieu**") @@ -479,12 +475,12 @@ def frenchify(x: int) -> str: cell7.metric("Poids total collecté", frenchify(poids_total_filtered) + " kg") nombre_collectes_filtered = len(df_filtered) - cell8.metric("Nombre de collectes", frenchify(nombre_collectes_filtered)) + cell8.metric("Nombre de ramassages", frenchify(nombre_collectes_filtered)) # Message d'avertissement nb de collectes en dessous de 5 if len(df_filtered) <= 5: st.warning( - "⚠️ Faible nombre de collectes disponibles dans la base de données : " + "⚠️ Faible nombre de ramassages disponibles dans la base de données : " + str(len(df_filtered)) ) @@ -531,7 +527,7 @@ def frenchify(x: int) -> str: df_totals_sorted2, path=["Matériau"], values="Volume_m3", - title="Répartition des matériaux en volume", + title="Répartition des matériaux en volume (données filtrées)", color="Matériau", color_discrete_map=colors_map, ) @@ -545,13 +541,9 @@ def frenchify(x: int) -> str: hovertemplate="Matériau : %{label}
Volume = %{value:.1f} m³", ) - with st.container(border=True): + with st.container(border=False): st.plotly_chart(fig4, use_container_width=True) - st.caption( - f"NB : Ces données prennent en compte uniquement les déchets dont le matériau a été identifié, soit {pct_volume_categorise:.0%} du volume total." - ) - else: st.write("Aucune donnée à afficher pour les filtres sélectionnés.") @@ -585,12 +577,12 @@ def frenchify(x: int) -> str: # 3ème métrique : nombre de relevés cell3 = l1_col3.container(border=True) - cell3.metric("Nombre de collectes comptabilisées", frenchify(nb_collectes_int)) + cell3.metric("Nombre de ramassages", frenchify(nb_collectes_int)) # Message d'avertissement nb de collectes en dessous de 5 if nb_collectes_int <= 5: st.warning( - "⚠️ Le nombre de collectes " + "⚠️ Le nombre de ramassages " + str(nb_collectes_int) + " est trop faible pour l'analyse." ) @@ -665,7 +657,7 @@ def frenchify(x: int) -> str: st.write("") st.caption( - f"Note : Analyse basée sur les collectes qui ont fait l'objet d'un comptage détaillé par déchet,\ + f"Note : Analyse basée sur les ramassages qui ont fait l'objet d'un comptage détaillé par déchet,\ soit {volume_total_categorise_m3} m³ équivalent à {pct_volume_categorise:.0%} du volume collecté\ sur le territoire." ) @@ -931,22 +923,16 @@ def frenchify(x: int) -> str: # 3ème métrique : nombre de collectes cell3 = l1_col3.container(border=True) cell3.metric( - "Nombre de collectes comptabilisées", - frenchify(collectes) + " collectes", + "Nombre de ramassages", + frenchify(collectes), ) # Message d'avertissement nb de collectes en dessous de 5 - if collectes == 1: - st.warning( - "⚠️ Il n'y a qu' " - + str(collectes) - + " collecte considérées dans les données présentées." - ) - elif collectes <= 5: + if collectes <= 5: st.warning( - "⚠️ Il n'y a que " + "⚠️ Faible nombre de ramassages (" + str(collectes) - + " collectes considérées dans les données présentées." + + ") dans la base de données." ) # Ligne 2 : 3 cellules avec les indicateurs clés en bas de page @@ -1031,7 +1017,7 @@ def frenchify(x: int) -> str: # 2ème métrique : nombre de marques identifiées lors des collectes cell5 = l2_col2.container(border=True) cell5.metric( - "Nombre de marques identifiées lors des collectes", + "Nombre de marques identifiées", frenchify(nb_marques) + " marques", ) @@ -1099,7 +1085,7 @@ def frenchify(x: int) -> str: # 2ème métrique : nombre de responsabilités cell7 = l3_col2.container(border=True) cell7.metric( - "Nombre de filières REP identifiées lors des collectes", + "Nombre de filières REP identifiées", frenchify(nb_rep) + " filières", ) From 1c9d0376981a041563d300f29fd2c520bd17c91f Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 22 May 2024 16:37:20 +0200 Subject: [PATCH 115/147] =?UTF-8?q?[tg]=20-=20classement=20par=20ordre=20a?= =?UTF-8?q?lphab=C3=A9tique=20dans=20les=20filtres?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 86359e7..30c8bb6 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -338,14 +338,16 @@ def frenchify(x: int) -> str: filtered_data_milieu = df_other.copy() filtered_metrics_milieu = df_other_metrics.copy() - # Filtre par milieu - + ## Filtre par milieu + # Initialiser le champ déroulant avec une valeur par défaut valeur_par_defaut_milieu = "Tous les milieux" + milieux_liste = [valeur_par_defaut_milieu] + sorted( + list(filtered_data_milieu["TYPE_MILIEU"].unique()) + ) selected_type_milieu = st.selectbox( "Choisir un type de milieu:", - options=[valeur_par_defaut_milieu] - + list(filtered_data_milieu["TYPE_MILIEU"].unique()), + options=milieux_liste, ) if selected_type_milieu != valeur_par_defaut_milieu: @@ -362,11 +364,13 @@ def frenchify(x: int) -> str: # Filtre par type de lieu valeur_par_defaut_lieu = "Tous les lieux" + lieux_liste = [valeur_par_defaut_lieu] + sorted( + list(filtered_data_lieu["TYPE_LIEU"].unique()) + ) selected_type_lieu = st.selectbox( "Choisir un type de lieu:", - options=[valeur_par_defaut_lieu] - + list(filtered_data_lieu["TYPE_LIEU"].unique()), + options=lieux_liste, ) if ( @@ -749,11 +753,15 @@ def frenchify(x: int) -> str: else: filtered_data_milieu = df_other.copy() - # Filtre par type de milieu + ## Filtre par type de milieu + # Initialiser la liste des lieux + milieux_liste = [valeur_par_defaut_milieu] + sorted( + list(filtered_data_milieu["TYPE_MILIEU"].unique()) + ) + selected_type_milieu_onglet_3 = st.selectbox( "Choisir un type de milieu:", - options=[valeur_par_defaut_milieu] - + list(filtered_data_milieu["TYPE_MILIEU"].unique()), + options=milieux_liste, key="type_milieu_select", ) @@ -764,11 +772,15 @@ def frenchify(x: int) -> str: else: filtered_data_lieu = filtered_data_milieu - # Filtre par lieu + ## Filtre par lieu + # Initialiser la liste des lieux + lieux_liste = [valeur_par_defaut_lieu] + sorted( + list(filtered_data_lieu["TYPE_LIEU"].unique()) + ) + selected_type_lieu_onglet_3 = st.selectbox( "Choisir un type de lieu:", - options=[valeur_par_defaut_lieu] - + list(filtered_data_lieu["TYPE_LIEU"].unique()), + options=lieux_liste, key="type_lieu_select", ) From b941905c6f60b56bad37a845b8416f521885a06e Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 22 May 2024 17:42:05 +0200 Subject: [PATCH 116/147] =?UTF-8?q?[tg]=20-=20wording=20sous-onglet=20top?= =?UTF-8?q?=20d=C3=A9chets?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 30c8bb6..331228a 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -564,24 +564,17 @@ def frenchify(x: int) -> str: ].sum() # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page - l1_col1, l1_col2, l1_col3 = st.columns(3) + l1_col1, l1_col2 = st.columns(2) # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) # 1ère métrique : volume total de déchets collectés cell1 = l1_col1.container(border=True) # Trick pour séparer les milliers - cell1.metric("Nombre de déchets catégorisés", frenchify(nb_total_dechets)) - - # 2ème métrique : équivalent volume catégorisé - cell2 = l1_col2.container(border=True) - cell2.metric( - "Equivalent en volume ", - frenchify(volume_total_categorise_m3) + " m³", - ) + cell1.metric("Nombre de déchets comptés", frenchify(nb_total_dechets)) # 3ème métrique : nombre de relevés - cell3 = l1_col3.container(border=True) - cell3.metric("Nombre de ramassages", frenchify(nb_collectes_int)) + cell2 = l1_col2.container(border=True) + cell2.metric("Nombre de ramassages", frenchify(nb_collectes_int)) # Message d'avertissement nb de collectes en dessous de 5 if nb_collectes_int <= 5: From c19f74bcff7a8b834ccf290b9dc782efb0b5c540 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Thu, 23 May 2024 11:05:06 +0200 Subject: [PATCH 117/147] =?UTF-8?q?[tg]=20-=20top=20d=C3=A9chets=20:=20pas?= =?UTF-8?q?sage=20rayon=20en=20log2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 331228a..1f4748e 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -3,6 +3,7 @@ import plotly.express as px import folium from folium import IFrame +import math # Configuration de la page st.set_page_config( @@ -701,10 +702,16 @@ def frenchify(x: int) -> str: for index, row in df_map_data.iterrows(): # Application de la normalisation - radius = row["nb_dechet"] / normalisation_facteur + # radius = row["nb_dechet"] / normalisation_facteur # Application d'une limite minimale pour le rayon si nécessaire - radius = max(radius, 5) + # radius = max(radius, 5) + + # Calcul du rayon du marqueur en log base 2 pour réduire les écarts + if row["nb_dechet"] > 1: + radius = math.log2(row["nb_dechet"]) + else: + radius = 0.001 folium.CircleMarker( location=(row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]), From 55e489488aa23a9bdc5057026283559cc0466364 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Thu, 23 May 2024 18:26:35 +0200 Subject: [PATCH 118/147] [tg] - chart labels templates --- dashboards/app/pages/data.py | 44 +++++++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 11 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 1f4748e..9282d2a 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -138,6 +138,9 @@ def frenchify(x: int) -> str: "Volume" ].sum() df_totals_sorted = df_totals_sorted.sort_values(["Volume"], ascending=False) + # Conversion des volumes en m3. Conserver la colonne Volume initiale (Litres) + df_totals_sorted["Volume_m3"] = df_totals_sorted["Volume"] / 1000 + # replace "Verre" with "Verre/Céramique" in df_totals_sorted df_totals_sorted["Matériau"] = df_totals_sorted["Matériau"].replace( "Verre", "Verre/Céramique" @@ -209,7 +212,7 @@ def frenchify(x: int) -> str: # Création du diagramme en donut en utilisant le dictionnaire de couleurs pour la correspondance fig = px.pie( df_totals_sorted, - values="Volume", + values="Volume_m3", names="Matériau", title="Répartition des matériaux en volume", hole=0.4, @@ -223,21 +226,27 @@ def frenchify(x: int) -> str: texttemplate="%{percent:.0%}", textfont_size=14, ) + + # Paramétrage de l'étiquette flottante + fig.update_traces( + hovertemplate="%{label} : %{value:.1f} m³" + + "
%{percent:.1%} du volume total" + ) + fig.update_layout(autosize=True, legend_title_text="Matériau") # Affichage du graphique st.plotly_chart(fig, use_container_width=True) with cell5: - # Conversion des volumes en m3 - df_totals_sorted["Volume_m3"] = df_totals_sorted["Volume"] / 1000 + # Création du graphique en barres avec Plotly Express fig2 = px.bar( df_totals_sorted, x="Matériau", y="Volume_m3", text="Volume_m3", - title="Volume total par materiau (en m³)", + title="Volume total par materiau (m³)", color="Matériau", color_discrete_map=colors_map, ) @@ -248,6 +257,10 @@ def frenchify(x: int) -> str: textposition="inside", textfont_size=14, ) + + # Paramétrage de l'étiquette flottante + fig2.update_traces(hovertemplate="%{label}: %{value:.1f} m³") + fig2.update_layout( autosize=True, # uniformtext_minsize=8, @@ -271,6 +284,8 @@ def frenchify(x: int) -> str: df_typemilieu = df_typemilieu.sort_values( ["TYPE_MILIEU", "Volume"], ascending=False ) + # Conversion litres en m + df_typemilieu["Volume_m3"] = df_typemilieu["Volume"] / 1000 # Raccourcir les étiquettes trop longues df_typemilieu = df_typemilieu.replace( @@ -283,10 +298,10 @@ def frenchify(x: int) -> str: fig3 = px.histogram( df_typemilieu, x="TYPE_MILIEU", - y="Volume", + y="Volume_m3", color="Matériau", barnorm="percent", - title="Proportion de chaque matériau en volume selon le milieu de collecte", + title="Proportion de matériaux ramassés en fonction du milieu", color_discrete_map=colors_map, text_auto=True, ) @@ -294,7 +309,7 @@ def frenchify(x: int) -> str: fig3.update_layout( bargap=0.2, height=600, - yaxis_title="Proportion du volume collecté (en %)", + yaxis_title="Proportion du volume ramassé (en %)", xaxis_title=None, ) fig3.update_xaxes(tickangle=-30) @@ -302,9 +317,15 @@ def frenchify(x: int) -> str: fig3.update_traces( texttemplate="%{y:.0f}%", textposition="inside", - hovertemplate="%{x}
Part du volume collecté dans ce milieu: %{y:.0f} %", textfont_size=12, ) + # Paramétrer l'étiquette flottante + fig3.update_traces( + hovertemplate="Ce matériau représente
" + + "%{y:.1f} % " + + "du volume ramassé
dans " + + "le milieu %{x}." + ) # Afficher le graphique with st.container(border=False): @@ -540,10 +561,11 @@ def frenchify(x: int) -> str: margin=dict(t=50, l=25, r=25, b=25), autosize=True, height=600 ) fig4.update_traces( - textinfo="label+value", - texttemplate="%{label}
%{value:.1f} m³", + textinfo="label+value+percent root", + texttemplate="%{label}
%{value:.1f} m³
%{percentRoot}", textfont_size=16, - hovertemplate="Matériau : %{label}
Volume = %{value:.1f} m³", + hovertemplate="%{label} : %{value:.1f} m³ " + + "
%{percentRoot:.1%} % du volume total.", ) with st.container(border=False): From 5c1773260134b08c5c2356027dc58cb66b444054 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Fri, 24 May 2024 10:34:37 +0200 Subject: [PATCH 119/147] =?UTF-8?q?[tg]=20-=20am=C3=A9lioration=20format?= =?UTF-8?q?=20nombres=20utilisant=20locale.set=5Flocale?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 86 ++++++++++++++++++++++-------------- 1 file changed, 52 insertions(+), 34 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 9282d2a..a58aaf4 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -4,12 +4,18 @@ import folium from folium import IFrame import math +import locale + # Configuration de la page st.set_page_config( layout="wide", page_title="Dashboard Zéro Déchet Sauvage : onglet Data" ) +# Définir les paramètres linguistiques FR pour l'affichage des nombres +locale.setlocale(locale.LC_NUMERIC, "fr_FR") +st.write(locale.format("%.2f", 1000, grouping=True)) + # Session state session_state = st.session_state @@ -60,22 +66,25 @@ def load_df_dict_corr_dechet_materiau(): df_other_metrics_raw = df_other.copy() # Fonction pour améliorer l'affichage des nombres (milliers, millions, milliards) - def frenchify(x: int) -> str: + def french_format(x: int) -> str: if x > 1e9: y = x / 1e9 - return f"{y:,.2f} milliards".replace(".", ",") + y = locale.format("%.2f", y, grouping=True) + return f"{y} milliards" if x > 1e6: y = x / 1e6 - return f"{y:,.2f} millions".replace(".", ",") + y = locale.format("%.2f", y, grouping=True) + return f"{y} millions" else: - return f"{x:,.0f}".replace(",", " ") + y = locale.format("%.0f", x, grouping=True) + return f"{y}" # 3 Onglets : Matériaux, Top déchets, Filières et marques tab1, tab2, tab3 = st.tabs( [ - "Matériaux :wood:", - "Top Déchets :wastebasket:", - "Secteurs, marques et filières REP :womans_clothes:", + "**Matériaux :wood:**", + "**Top Déchets :wastebasket:**", + "**Secteurs économiques, filières et marques :womans_clothes:**", ] ) @@ -173,15 +182,17 @@ def frenchify(x: int) -> str: # 1ère métrique : volume total de déchets collectés cell1 = l1_col1.container(border=True) # Trick pour séparer les milliers - cell1.metric("Volume de déchets collectés", frenchify(volume_total_m3) + " m³") + cell1.metric( + "Volume de déchets collectés", french_format(volume_total_m3) + " m³" + ) # 2ème métrique : poids cell2 = l1_col2.container(border=True) - cell2.metric("Poids total collecté", frenchify(poids_total) + " kg") + cell2.metric("Poids total collecté", french_format(poids_total) + " kg") # 3ème métrique : nombre de relevés cell3 = l1_col3.container(border=True) - cell3.metric("Nombre de ramassages", frenchify(nb_collectes_int)) + cell3.metric("Nombre de ramassages", french_format(nb_collectes_int)) # Message d'avertissement nb de collectes en dessous de 5 if nb_collectes_int <= 5: @@ -198,7 +209,7 @@ def frenchify(x: int) -> str: (protocole de niveau 1) ou inversement. De plus, \ les chiffres ci-dessous sont calculés sur XX ramassages \ ayant fait l’objet d’une estimation des volumes \ - par matériau, soit un volume total de {volume_total_categorise_m3:.0f} m³." + par matériau, soit un volume total de {french_format(volume_total_categorise_m3)} m³." ) # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux @@ -235,8 +246,11 @@ def frenchify(x: int) -> str: fig.update_layout(autosize=True, legend_title_text="Matériau") + # Définir affichage français pour les nombres + config = {"locale": "fr"} + # Affichage du graphique - st.plotly_chart(fig, use_container_width=True) + st.plotly_chart(fig, use_container_width=True, config=config) with cell5: @@ -284,7 +298,7 @@ def frenchify(x: int) -> str: df_typemilieu = df_typemilieu.sort_values( ["TYPE_MILIEU", "Volume"], ascending=False ) - # Conversion litres en m + # Conversion litres en m3 df_typemilieu["Volume_m3"] = df_typemilieu["Volume"] / 1000 # Raccourcir les étiquettes trop longues @@ -495,13 +509,16 @@ def frenchify(x: int) -> str: volume_total_filtered_m3 = df_filtered_metrics["VOLUME_TOTAL"].sum() / 1000 cell6.metric( - "Volume de déchets collectés", frenchify(volume_total_filtered_m3) + " m³" + "Volume de déchets collectés", + french_format(volume_total_filtered_m3) + " m³", ) - cell7.metric("Poids total collecté", frenchify(poids_total_filtered) + " kg") + cell7.metric( + "Poids total collecté", french_format(poids_total_filtered) + " kg" + ) nombre_collectes_filtered = len(df_filtered) - cell8.metric("Nombre de ramassages", frenchify(nombre_collectes_filtered)) + cell8.metric("Nombre de ramassages", french_format(nombre_collectes_filtered)) # Message d'avertissement nb de collectes en dessous de 5 if len(df_filtered) <= 5: @@ -593,11 +610,11 @@ def frenchify(x: int) -> str: cell1 = l1_col1.container(border=True) # Trick pour séparer les milliers - cell1.metric("Nombre de déchets comptés", frenchify(nb_total_dechets)) + cell1.metric("Nombre de déchets comptés", french_format(nb_total_dechets)) # 3ème métrique : nombre de relevés cell2 = l1_col2.container(border=True) - cell2.metric("Nombre de ramassages", frenchify(nb_collectes_int)) + cell2.metric("Nombre de ramassages", french_format(nb_collectes_int)) # Message d'avertissement nb de collectes en dessous de 5 if nb_collectes_int <= 5: @@ -677,10 +694,11 @@ def frenchify(x: int) -> str: st.write("") st.caption( - f"Note : Analyse basée sur les ramassages qui ont fait l'objet d'un comptage détaillé par déchet,\ - soit {volume_total_categorise_m3} m³ équivalent à {pct_volume_categorise:.0%} du volume collecté\ - sur le territoire." + f"Note : Les chiffres ci-dessous sont calculés sur XX ramassages \ + ayant fait l’objet d’une estimation des volumes \ + par matériau, soit un volume total de {french_format(volume_total_categorise_m3)} m³." ) + with st.container(): # Ajout de la selectbox selected_dechet = st.selectbox( @@ -944,21 +962,21 @@ def frenchify(x: int) -> str: # Trick pour séparer les milliers cell1.metric( - "Nombre de déchets avec secteur identifié", frenchify(nb_dechet_secteur) + "Nombre de déchets triés par secteur", french_format(nb_dechet_secteur) ) # 2ème métrique : poids cell2 = l1_col2.container(border=True) cell2.metric( - "Nombre de secteurs identifiés dans les déchets collectés", - frenchify(nb_secteurs) + " secteurs", + "Nombre de secteurs concernés", + french_format(nb_secteurs) + " secteurs", ) # 3ème métrique : nombre de collectes cell3 = l1_col3.container(border=True) cell3.metric( "Nombre de ramassages", - frenchify(collectes), + french_format(collectes), ) # Message d'avertissement nb de collectes en dessous de 5 @@ -1033,7 +1051,7 @@ def frenchify(x: int) -> str: if nb_vide_indetermine != 0: st.warning( "⚠️ Il y a " - + str(frenchify(nb_vide_indetermine)) + + str(french_format(nb_vide_indetermine)) + " déchets dont le secteur n'a pas été determiné dans les déchets collectés." ) @@ -1044,15 +1062,15 @@ def frenchify(x: int) -> str: # 1er métrique : nombre de dechets categorises par marques cell4.metric( - "Nombre de déchets dont la marque est identifiée", - frenchify(nb_dechet_marque) + " déchets", + "Nombre de déchets triés par marque", + french_format(nb_dechet_marque) + " déchets", ) # 2ème métrique : nombre de marques identifiées lors des collectes cell5 = l2_col2.container(border=True) cell5.metric( - "Nombre de marques identifiées", - frenchify(nb_marques) + " marques", + "Nombre de marques concernées", + french_format(nb_marques) + " marques", ) fig_marque = px.bar( @@ -1112,15 +1130,15 @@ def frenchify(x: int) -> str: # 1ère métrique : nombre de dechets catégorisés repartis par responsabilités cell6 = l3_col1.container(border=True) cell6.metric( - "Nombre de déchets catégorisés par filière REP", - frenchify(nb_dechet_rep), + "Quan de déchets catégorisés par filière REP", + french_format(nb_dechet_rep), ) # 2ème métrique : nombre de responsabilités cell7 = l3_col2.container(border=True) cell7.metric( "Nombre de filières REP identifiées", - frenchify(nb_rep) + " filières", + french_format(nb_rep) + " filières", ) # Treemap REP @@ -1149,7 +1167,7 @@ def frenchify(x: int) -> str: if nb_vide_rep != 0: st.warning( "⚠️ Il y a " - + str(frenchify(nb_vide_rep)) + + str(french_format(nb_vide_rep)) + " déchets dont la filière REP n'a pas été determinée dans les déchets collectés." ) From 042e13377cf9a9019d958980991caa1150560a9c Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Fri, 24 May 2024 10:35:56 +0200 Subject: [PATCH 120/147] =?UTF-8?q?[tg]=20-=20bug=20corrig=C3=A9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 1 - 1 file changed, 1 deletion(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index a58aaf4..e5cf9f4 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -14,7 +14,6 @@ # Définir les paramètres linguistiques FR pour l'affichage des nombres locale.setlocale(locale.LC_NUMERIC, "fr_FR") -st.write(locale.format("%.2f", 1000, grouping=True)) # Session state session_state = st.session_state From f22464502df2641e4efd6d52f0d008c2e0b43fbf Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Fri, 24 May 2024 18:03:40 +0200 Subject: [PATCH 121/147] =?UTF-8?q?[tg]=20-=20note=20m=C3=A9thodo=20et=20e?= =?UTF-8?q?xclusion=20des=20100%Autres?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 76 +++++++++++++++++++++++++++++------- 1 file changed, 62 insertions(+), 14 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index e5cf9f4..2453344 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -61,6 +61,17 @@ def load_df_dict_corr_dechet_materiau(): df_other = st.session_state["df_other_filtre"].copy() df_nb_dechet = st.session_state["df_nb_dechets_filtre"].copy() + # Exclusion des ramassages de niveau 0 ou avec 100% de AUTRES + def carac_exclusions(df): + if df["NIVEAU_CARAC"] == 0: + return "Exclu - niveau 0" + elif df["GLOBAL_VOLUME_AUTRE"] == df["VOLUME_TOTAL"]: + return "Exclu - 100% Autre" + else: + return "Inclus" + + df_other["Exclusions"] = df_other.apply(lambda row: carac_exclusions(row), axis=1) + # Copier le df pour la partie filtrée par milieu/lieu/année df_other_metrics_raw = df_other.copy() @@ -117,19 +128,25 @@ def french_format(x: int) -> str: # Copie des données pour transfo df_volume = df_other.copy() + # Retrait des lignes avec 100% de volume catégorisé en AUTRE + df_volume_cleaned = df_volume[df_volume["Exclusions"] == "Inclus"] + # Calcul des indicateurs clés de haut de tableau avant transformation # Volume en litres dans la base, converti en m3 volume_total_m3 = df_volume["VOLUME_TOTAL"].sum() / 1000 poids_total = df_volume["POIDS_TOTAL"].sum() - volume_total_categorise_m3 = df_volume[cols_volume].sum().sum() / 1000 + volume_total_categorise_m3 = df_volume_cleaned[cols_volume].sum().sum() / 1000 pct_volume_categorise = volume_total_categorise_m3 / volume_total_m3 - nb_collectes_int = len(df_volume) + # Nb total de collecte incluant les 100% autres et les relevés de niveau 0 + nb_collectes_int = df_volume["ID_RELEVE"].nunique() + # Nb de collectes excluant les 100% autres et les relevés de niveau 0 + nb_collectes_carac = df_volume_cleaned["ID_RELEVE"].nunique() # estimation du poids categorisée en utilisant pct_volume_categorise poids_total_categorise = round(poids_total * pct_volume_categorise) # Dépivotage du tableau pour avoir une base de données exploitable - df_volume = df_volume.melt( + df_volume_cleaned = df_volume_cleaned.melt( id_vars=cols_identifiers, value_vars=cols_volume, var_name="Matériau", @@ -137,12 +154,12 @@ def french_format(x: int) -> str: ) # Nettoyer le nom du Type déchet pour le rendre plus lisible - df_volume["Matériau"] = ( - df_volume["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() + df_volume_cleaned["Matériau"] = ( + df_volume_cleaned["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() ) # Grouper par type de matériau pour les visualisations - df_totals_sorted = df_volume.groupby(["Matériau"], as_index=False)[ + df_totals_sorted = df_volume_cleaned.groupby(["Matériau"], as_index=False)[ "Volume" ].sum() df_totals_sorted = df_totals_sorted.sort_values(["Volume"], ascending=False) @@ -201,15 +218,43 @@ def french_format(x: int) -> str: + ") dans la base de données." ) - st.caption( - f"Note : Il n’y a pas de correspondance entre le poids et le volume global\ + # Note méthodo pour expliquer les données retenues pour l'analyse + with st.expander( + "Note sur les données utilisées dans les graphiques ci-dessous" + ): + st.caption( + f"Il n’y a pas de correspondance entre le poids et le volume global\ de déchets indiqués car certaines organisations \ ne renseignent que le volume sans mention de poids \ (protocole de niveau 1) ou inversement. De plus, \ - les chiffres ci-dessous sont calculés sur XX ramassages \ + les chiffres ci-dessous sont calculés sur **{french_format(nb_collectes_carac)}** ramassages \ ayant fait l’objet d’une estimation des volumes \ - par matériau, soit un volume total de {french_format(volume_total_categorise_m3)} m³." - ) + par matériau, soit un volume total de {french_format(volume_total_categorise_m3)} m³.\ + Les relevés de niveau 0 et les relevés comptabilisant 100% de déchets 'AUTRES' ont été exclus." + ) + df_note_methodo = df_volume.groupby(["Exclusions"], as_index=False)[ + "ID_RELEVE" + ].count() + fig_data = px.pie( + df_note_methodo, + values="ID_RELEVE", + names="Exclusions", + title="Nombre de ramassages inclus ou exclus dans les analyses ci-dessous", + color="Exclusions", + color_discrete_sequence=px.colors.sequential.RdBu, + ) + # Réglage du texte affiché, format et taille de police + fig_data.update_traces( + textinfo="value+percent+label", + texttemplate="%{label}
%{value:.0f} relevés
%{percent:.0%}", + textfont_size=14, + hoverinfo=None, + insidetextorientation="horizontal", + rotation=90, + ) + fig_data.update_layout(showlegend=False) + + st.plotly_chart(fig_data) # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux @@ -291,9 +336,9 @@ def french_format(x: int) -> str: # Ligne 3 : Graphe par milieu de collecte # Grouper par année et type de matériau - df_typemilieu = df_volume.groupby(["TYPE_MILIEU", "Matériau"], as_index=False)[ - "Volume" - ].sum() + df_typemilieu = df_volume_cleaned.groupby( + ["TYPE_MILIEU", "Matériau"], as_index=False + )["Volume"].sum() df_typemilieu = df_typemilieu.sort_values( ["TYPE_MILIEU", "Volume"], ascending=False ) @@ -530,6 +575,9 @@ def french_format(x: int) -> str: # Copie des données pour transfo df_volume2 = df_filtered.copy() + # Retrait des lignes avec 100% de volume catégorisé en AUTRE + df_volume2 = df_volume2[df_volume2["Exclusions"] == "Inclus"] + # Calcul des indicateurs clés de haut de tableau avant transformation volume2_total = df_volume2["VOLUME_TOTAL"].sum() poids2_total = df_volume2["POIDS_TOTAL"].sum() From e1c7a800a8382e72800460b229f99cf37e824b92 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Mon, 27 May 2024 16:13:29 +0200 Subject: [PATCH 122/147] [tg} - infobulles dans les graphiques en langage naturel --- dashboards/app/pages/data.py | 37 +++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 2453344..123a655 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -629,7 +629,7 @@ def french_format(x: int) -> str: texttemplate="%{label}
%{value:.1f} m³
%{percentRoot}", textfont_size=16, hovertemplate="%{label} : %{value:.1f} m³ " - + "
%{percentRoot:.1%} % du volume total.", + + "
%{percentRoot:.1%} du volume total", ) with st.container(border=False): @@ -692,6 +692,7 @@ def french_format(x: int) -> str: ) # Preparation de la figure barplot df_top10_dechets.reset_index(inplace=True) + # Création du graphique en barres avec Plotly Express fig5 = px.bar( @@ -733,6 +734,9 @@ def french_format(x: int) -> str: textfont_size=18, ) + # Paramétrage de l'infobulle + fig5.update_traces(hovertemplate="%{y} : %{x:.0f} déchets") + # Suppression de la colonne categorie del df_top10_dechets["Materiau"] @@ -800,10 +804,23 @@ def french_format(x: int) -> str: else: radius = 0.001 + # Format the value with commas as thousands separators + formatted_nb_dechet = locale.format_string( + "%.0f", row["nb_dechet"], grouping=True + ) + folium.CircleMarker( location=(row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]), radius=radius, # Utilisation du rayon ajusté - popup=f"{row['NOM_ZONE']}, {row['LIEU_VILLE']}, {row['DATE']} : {row['nb_dechet']} {selected_dechet}", + popup=folium.Popup( + html=f""" + Commune : {row['LIEU_VILLE']}
+ Zone : {row['NOM_ZONE']}
+ Quantité : {formatted_nb_dechet} {selected_dechet}
+ Date : {row['DATE']} + """, + max_width=150, + ), color="#3186cc", fill=True, fill_color="#3186cc", @@ -1091,6 +1108,12 @@ def french_format(x: int) -> str: showlegend=False, yaxis_title=None, ) + + # Paramétrage de l'infobulle + fig_secteur.update_traces( + hovertemplate="Secteur : %{y}
Quantité : %{x:.0f} déchets" + ) + with st.container(border=True): st.plotly_chart(fig_secteur, use_container_width=True) @@ -1144,6 +1167,10 @@ def french_format(x: int) -> str: uniformtext_mode="hide", yaxis_title=None, ) + # Paramétrage de l'infobulle + fig_marque.update_traces( + hovertemplate="Marque : %{y}
Quantité : %{x:.0f} déchets" + ) with st.container(border=True): st.plotly_chart(fig_marque, use_container_width=True) @@ -1201,10 +1228,10 @@ def french_format(x: int) -> str: margin=dict(t=50, l=25, r=25, b=25), autosize=True, height=600 ) figreptree.update_traces( - textinfo="label+value", - texttemplate="%{label}
%{value:.0f} items", + textinfo="label+value+percent root", + texttemplate="%{label}
%{value:.0f} déchets
%{percentRoot} du total", textfont=dict(size=16), - hovertemplate="%{label}
Nombre de déchets : %{value:.0f}", + hovertemplate="%{label}
Quantité de déchets : %{value:.0f}", ) with st.container(border=True): From dc7a302e312d5f5cc76d403d228ed3461707eb8a Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Mon, 27 May 2024 16:45:21 +0200 Subject: [PATCH 123/147] =?UTF-8?q?[tg]=20-=20onglet=20mat=C3=A9riaux=20:?= =?UTF-8?q?=20tableau=20avec=20nb=20collectes=20par=20milieu?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 53 ++++++++++++++++++++++++++++++++---- 1 file changed, 48 insertions(+), 5 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 123a655..0cbbc60 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -113,6 +113,7 @@ def french_format(x: int) -> str: # Transformation du dataframe pour les graphiques # Variables à conserver en ligne cols_identifiers = [ + "ID_RELEVE", "ANNEE", "TYPE_MILIEU", "INSEE_COM", @@ -226,7 +227,10 @@ def french_format(x: int) -> str: f"Il n’y a pas de correspondance entre le poids et le volume global\ de déchets indiqués car certaines organisations \ ne renseignent que le volume sans mention de poids \ - (protocole de niveau 1) ou inversement. De plus, \ + (protocole de niveau 1) ou inversement." + ) + st.caption( + f"De plus, \ les chiffres ci-dessous sont calculés sur **{french_format(nb_collectes_carac)}** ramassages \ ayant fait l’objet d’une estimation des volumes \ par matériau, soit un volume total de {french_format(volume_total_categorise_m3)} m³.\ @@ -338,9 +342,10 @@ def french_format(x: int) -> str: # Grouper par année et type de matériau df_typemilieu = df_volume_cleaned.groupby( ["TYPE_MILIEU", "Matériau"], as_index=False - )["Volume"].sum() + ).agg({"Volume": "sum", "ID_RELEVE": "count"}) + df_typemilieu = df_typemilieu.sort_values( - ["TYPE_MILIEU", "Volume"], ascending=False + ["TYPE_MILIEU", "Volume"], ascending=True ) # Conversion litres en m3 df_typemilieu["Volume_m3"] = df_typemilieu["Volume"] / 1000 @@ -352,6 +357,10 @@ def french_format(x: int) -> str: } ) + # Ne pas faire apparaître la catégorie "Multi-lieux" + lignes_multi = df_typemilieu.loc[df_typemilieu["TYPE_MILIEU"] == "Multi-lieux"] + df_typemilieu.drop(lignes_multi.index, axis=0, inplace=True) + # Graphique à barre empilées du pourcentage de volume collecté par an et type de matériau fig3 = px.histogram( df_typemilieu, @@ -363,13 +372,16 @@ def french_format(x: int) -> str: color_discrete_map=colors_map, text_auto=True, ) - # Format d'affichage + # Format d'affichage + # traceorder : inverse l'ordre de la légende pour correspondre au graph fig3.update_layout( bargap=0.2, height=600, yaxis_title="Proportion du volume ramassé (en %)", xaxis_title=None, + legend={"traceorder": "reversed"}, ) + fig3.update_xaxes(tickangle=-30) # Etiquettes et formats de nombres fig3.update_traces( @@ -386,9 +398,40 @@ def french_format(x: int) -> str: ) # Afficher le graphique - with st.container(border=False): + with st.container(border=True): st.plotly_chart(fig3, use_container_width=True) + # Afficher un tableau du nombre de collectes par milieu en dessous + + df_nb_par_milieu = ( + df_typemilieu.groupby("TYPE_MILIEU", as_index=True) + .agg( + { + "ID_RELEVE": "sum", + "Volume": "sum", + } + ) + .sort_values("TYPE_MILIEU", ascending=True) + ) + df_nb_par_milieu.rename( + { + "TYPE_MILIEU": "Milieu", + "ID_RELEVE": "Nombre de ramassages", + "Volume": "Volume en m3", + }, + axis=1, + inplace=True, + ) + + # Convertir en int pour éviter les virgules à l'affichage + df_nb_par_milieu = df_nb_par_milieu.astype("int") + + # Affichage du tableau + st.table(df_nb_par_milieu.T) + + # Affichage du graphique + st.plotly_chart(fig2, use_container_width=True) + # Ligne 3 : Graphe par milieu , lieu et année st.write("**Filtrer les données par année, type de milieu ou type de lieu**") From 34b099674a7c0d0d1750eda4daefd3761051ce1a Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Mon, 27 May 2024 17:10:22 +0200 Subject: [PATCH 124/147] =?UTF-8?q?[tg]=20-=20onglet=20mat=C3=A9riaux=20-?= =?UTF-8?q?=20am=C3=A9liorations=20table=20nb=20d=C3=A9chets?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 41 +++++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 17 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 0cbbc60..6642dc5 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -72,6 +72,8 @@ def carac_exclusions(df): df_other["Exclusions"] = df_other.apply(lambda row: carac_exclusions(row), axis=1) + st.dataframe(df_other) + # Copier le df pour la partie filtrée par milieu/lieu/année df_other_metrics_raw = df_other.copy() @@ -154,6 +156,10 @@ def french_format(x: int) -> str: value_name="Volume", ) + # Nettoyage des lignes à 0 et conversion m3 + df_volume_cleaned = df_volume_cleaned[df_volume_cleaned["Volume"] != 0] + df_volume_cleaned["Volume_m3"] = df_volume_cleaned["Volume"] / 1000 + # Nettoyer le nom du Type déchet pour le rendre plus lisible df_volume_cleaned["Matériau"] = ( df_volume_cleaned["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() @@ -161,11 +167,9 @@ def french_format(x: int) -> str: # Grouper par type de matériau pour les visualisations df_totals_sorted = df_volume_cleaned.groupby(["Matériau"], as_index=False)[ - "Volume" + "Volume_m3" ].sum() - df_totals_sorted = df_totals_sorted.sort_values(["Volume"], ascending=False) - # Conversion des volumes en m3. Conserver la colonne Volume initiale (Litres) - df_totals_sorted["Volume_m3"] = df_totals_sorted["Volume"] / 1000 + df_totals_sorted = df_totals_sorted.sort_values(["Volume_m3"], ascending=False) # replace "Verre" with "Verre/Céramique" in df_totals_sorted df_totals_sorted["Matériau"] = df_totals_sorted["Matériau"].replace( @@ -342,13 +346,11 @@ def french_format(x: int) -> str: # Grouper par année et type de matériau df_typemilieu = df_volume_cleaned.groupby( ["TYPE_MILIEU", "Matériau"], as_index=False - ).agg({"Volume": "sum", "ID_RELEVE": "count"}) + ).agg({"Volume_m3": "sum", "ID_RELEVE": "count"}) df_typemilieu = df_typemilieu.sort_values( - ["TYPE_MILIEU", "Volume"], ascending=True + ["TYPE_MILIEU", "Volume_m3"], ascending=True ) - # Conversion litres en m3 - df_typemilieu["Volume_m3"] = df_typemilieu["Volume"] / 1000 # Raccourcir les étiquettes trop longues df_typemilieu = df_typemilieu.replace( @@ -402,22 +404,25 @@ def french_format(x: int) -> str: st.plotly_chart(fig3, use_container_width=True) # Afficher un tableau du nombre de collectes par milieu en dessous - df_nb_par_milieu = ( - df_typemilieu.groupby("TYPE_MILIEU", as_index=True) + df_other.groupby("TYPE_MILIEU", as_index=True) .agg( { - "ID_RELEVE": "sum", - "Volume": "sum", + "ID_RELEVE": "count", } ) .sort_values("TYPE_MILIEU", ascending=True) ) + + # Ne pas faire apparaître la catégorie "Multi-lieux" + lignes_multi = df_nb_par_milieu.loc[df_nb_par_milieu.index == "Multi-lieux"] + df_nb_par_milieu.drop(lignes_multi.index, axis=0, inplace=True) + + # Renommage des colonnes pour l'affichage df_nb_par_milieu.rename( { "TYPE_MILIEU": "Milieu", - "ID_RELEVE": "Nombre de ramassages", - "Volume": "Volume en m3", + "ID_RELEVE": "", }, axis=1, inplace=True, @@ -427,10 +432,12 @@ def french_format(x: int) -> str: df_nb_par_milieu = df_nb_par_milieu.astype("int") # Affichage du tableau + st.write("Nombre de ramassages par milieu :") st.table(df_nb_par_milieu.T) - - # Affichage du graphique - st.plotly_chart(fig2, use_container_width=True) + st.caption( + f"Les ramassages catégorisés en 'Multi-lieux' " + + f"ont été retirés de l'analyse." + ) # Ligne 3 : Graphe par milieu , lieu et année st.write("**Filtrer les données par année, type de milieu ou type de lieu**") From 9f1b0e4e1bf5b0e9aa4abec9ca9ba39c0bbd7f00 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 29 May 2024 10:39:34 +0200 Subject: [PATCH 125/147] =?UTF-8?q?[tg]=20-=20s=C3=A9parateurs=20d=C3=A9ci?= =?UTF-8?q?maux=20et=20milliers=20dans=20les=20graphiques?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 118 ++++++++++++++++++++++------------- 1 file changed, 74 insertions(+), 44 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 6642dc5..ae3e6e8 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -72,8 +72,6 @@ def carac_exclusions(df): df_other["Exclusions"] = df_other.apply(lambda row: carac_exclusions(row), axis=1) - st.dataframe(df_other) - # Copier le df pour la partie filtrée par milieu/lieu/année df_other_metrics_raw = df_other.copy() @@ -165,7 +163,7 @@ def french_format(x: int) -> str: df_volume_cleaned["Matériau"].str.replace("GLOBAL_VOLUME_", "").str.title() ) - # Grouper par type de matériau pour les visualisations + ## Création du dataframe groupé par type de matériau pour les visualisations df_totals_sorted = df_volume_cleaned.groupby(["Matériau"], as_index=False)[ "Volume_m3" ].sum() @@ -260,13 +258,17 @@ def french_format(x: int) -> str: insidetextorientation="horizontal", rotation=90, ) - fig_data.update_layout(showlegend=False) + # Cacher la légende + fig_data.update_layout( + showlegend=False, + separators=", ", # Séparateurs décimales et milliers + ) st.plotly_chart(fig_data) # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux - with st.container(border=False): + with st.container(border=True): cell4, cell5 = st.columns(2) @@ -296,13 +298,13 @@ def french_format(x: int) -> str: + "
%{percent:.1%} du volume total" ) - fig.update_layout(autosize=True, legend_title_text="Matériau") - - # Définir affichage français pour les nombres - config = {"locale": "fr"} + # Définir titre légende et changer séparateurs des nombres pour affichage FR + fig.update_layout( + autosize=True, legend_title_text="Matériau", separators=", " + ) # Affichage du graphique - st.plotly_chart(fig, use_container_width=True, config=config) + st.plotly_chart(fig, use_container_width=True) with cell5: @@ -336,7 +338,11 @@ def french_format(x: int) -> str: yaxis_showgrid=False, xaxis_title=None, yaxis_title=None, + separators=", ", ) + fig2.update_xaxes( + tickfont=dict(size=14) + ) # Taille des étiquettes en abcisse # Affichage du graphique st.plotly_chart(fig2, use_container_width=True) @@ -382,9 +388,14 @@ def french_format(x: int) -> str: yaxis_title="Proportion du volume ramassé (en %)", xaxis_title=None, legend={"traceorder": "reversed"}, + separators=", ", ) - fig3.update_xaxes(tickangle=-30) + fig3.update_xaxes( + tickangle=-30, # ORientation des étiquettes de l'axe X + tickfont=dict(size=14), + ) # Taille des étiquettes en ordonnée + # Etiquettes et formats de nombres fig3.update_traces( texttemplate="%{y:.0f}%", @@ -432,7 +443,7 @@ def french_format(x: int) -> str: df_nb_par_milieu = df_nb_par_milieu.astype("int") # Affichage du tableau - st.write("Nombre de ramassages par milieu :") + st.write("**Nombre de ramassages par milieu**") st.table(df_nb_par_milieu.T) st.caption( f"Les ramassages catégorisés en 'Multi-lieux' " @@ -672,17 +683,20 @@ def french_format(x: int) -> str: color_discrete_map=colors_map, ) fig4.update_layout( - margin=dict(t=50, l=25, r=25, b=25), autosize=True, height=600 + margin=dict(t=50, l=25, r=25, b=25), + autosize=True, + height=600, + separators=", ", # Séparateurs décimales et milliers ) fig4.update_traces( textinfo="label+value+percent root", - texttemplate="%{label}
%{value:.1f} m³
%{percentRoot}", + texttemplate="%{label}
%{value:.0f} m³
%{percentRoot}", textfont_size=16, hovertemplate="%{label} : %{value:.1f} m³ " + "
%{percentRoot:.1%} du volume total", ) - with st.container(border=False): + with st.container(border=True): st.plotly_chart(fig4, use_container_width=True) else: @@ -765,7 +779,7 @@ def french_format(x: int) -> str: showlegend=True, height=700, uniformtext_minsize=8, - uniformtext_mode="hide", + uniformtext_mode="show", yaxis_title=None, # Position de la légende legend=dict( @@ -774,18 +788,22 @@ def french_format(x: int) -> str: xanchor="right", x=0.95, ), + separators=", ", ) # Amélioration du visuel du graphique fig5.update_traces( - # texttemplate="%{text:.2f}", + texttemplate="%{text:,.0f}", textposition="inside", textfont_color="white", - textfont_size=18, + textfont_size=14, ) - # Paramétrage de l'infobulle - fig5.update_traces(hovertemplate="%{y} : %{x:.0f} déchets") + fig5.update_yaxes(tickfont=dict(size=14)) # Taille des étiquettes en abcisse + + fig5.update_traces( + hovertemplate="%{y} : %{x:,.0f} déchets" + ) # Template de l'infobulle # Suppression de la colonne categorie del df_top10_dechets["Materiau"] @@ -800,7 +818,9 @@ def french_format(x: int) -> str: par matériau, soit un volume total de {french_format(volume_total_categorise_m3)} m³." ) - with st.container(): + with st.container(border=True): + + st.write("**Lieux de ramassage des déchets dans le top 10**") # Ajout de la selectbox selected_dechet = st.selectbox( "Choisir un type de déchet :", noms_top10_dechets, index=0 @@ -1148,7 +1168,7 @@ def french_format(x: int) -> str: # add log scale to x axis fig_secteur.update_layout(xaxis_type="log") fig_secteur.update_traces( - texttemplate="%{value:.0f}", + texttemplate="%{value:,.0f}", textposition="inside", textfont_size=14, ) @@ -1157,11 +1177,15 @@ def french_format(x: int) -> str: uniformtext_mode="hide", showlegend=False, yaxis_title=None, + separators=", ", ) + fig_secteur.update_yaxes( + tickfont=dict(size=14) + ) # Taille des étiquettes en ordonnée # Paramétrage de l'infobulle fig_secteur.update_traces( - hovertemplate="Secteur : %{y}
Quantité : %{x:.0f} déchets" + hovertemplate="Secteur : %{y}
Quantité : %{x:,.0f} déchets" ) with st.container(border=True): @@ -1193,34 +1217,35 @@ def french_format(x: int) -> str: french_format(nb_marques) + " marques", ) + # Configuration du graphique à barres fig_marque = px.bar( top_marque_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), x="Nombre de déchets", y="Marque", title="Top 10 des marques identifiées dans les déchets comptés", - labels={ - "Nombre de déchets": "Nombre total de déchets (échelle logarithmique)", - }, color_discrete_sequence=["#1951A0"], orientation="h", - text_auto=False, - text=top_marque_df.tail(10)["Marque"] - + " : " - + top_marque_df.tail(10)["Nombre de déchets"].astype(str), + text_auto=True, ) + # add log scale to x axis - fig_marque.update_layout(xaxis_type="log") - fig_marque.update_traces(textfont_size=14) fig_marque.update_layout( + # xaxis_type="log", # Pas besoin d'échelle log ici height=700, uniformtext_minsize=8, uniformtext_mode="hide", yaxis_title=None, + separators=", ", ) - # Paramétrage de l'infobulle + # Paramétrage de la taille de police et de l'infobulle fig_marque.update_traces( - hovertemplate="Marque : %{y}
Quantité : %{x:.0f} déchets" + textfont_size=14, + texttemplate="%{value:,.0f}", + hovertemplate="Marque : %{y}
Quantité : %{x:,.0f} déchets", ) + fig_marque.update_yaxes( + tickfont=dict(size=14) + ) # Taille des étiquettes en ordonnée with st.container(border=True): st.plotly_chart(fig_marque, use_container_width=True) @@ -1275,25 +1300,30 @@ def french_format(x: int) -> str: color_discrete_sequence=px.colors.qualitative.Set2, ) figreptree.update_layout( - margin=dict(t=50, l=25, r=25, b=25), autosize=True, height=600 + margin=dict(t=50, l=25, r=25, b=25), + autosize=True, + height=600, + separators=", ", ) figreptree.update_traces( textinfo="label+value+percent root", - texttemplate="%{label}
%{value:.0f} déchets
%{percentRoot} du total", + texttemplate="%{label}
%{value:,.0f} déchets
%{percentRoot} du total", textfont=dict(size=16), - hovertemplate="%{label}
Quantité de déchets : %{value:.0f}", + hovertemplate="%{label}
" + + "Quantité de déchets : %{value:,.0f}
" + + "Part du total ramassé : %{percentRoot:.1%}", ) with st.container(border=True): st.plotly_chart(figreptree, use_container_width=True) - # Message d'avertissement Nombre de déchets dont la REP n'a pas été determine - if nb_vide_rep != 0: - st.warning( - "⚠️ Il y a " - + str(french_format(nb_vide_rep)) - + " déchets dont la filière REP n'a pas été determinée dans les déchets collectés." - ) + # Message d'avertissement Nombre de déchets dont la REP n'a pas été determine + if nb_vide_rep != 0: + st.warning( + "⚠️ Il y a " + + str(french_format(nb_vide_rep)) + + " déchets dont la filière REP n'a pas été determinée dans les déchets collectés." + ) else: From 0835d038a34e976e8f94d64f27f763b6c750b757 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 29 May 2024 10:59:05 +0200 Subject: [PATCH 126/147] =?UTF-8?q?[tg]=20-=20sous-onglet=20mat=C3=A9riaux?= =?UTF-8?q?=20:=20filtrer=20sur=20milieux=20avec=20plus=20de=203=20collect?= =?UTF-8?q?es?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 57 +++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 23 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index ae3e6e8..798dfac 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -72,6 +72,13 @@ def carac_exclusions(df): df_other["Exclusions"] = df_other.apply(lambda row: carac_exclusions(row), axis=1) + # Raccourcir les étiquettes de milieux trop longues + df_other = df_other.replace( + { + "Zone naturelle ou rurale (hors littoral et montagne)": "Zone naturelle ou rurale" + } + ) + # Copier le df pour la partie filtrée par milieu/lieu/année df_other_metrics_raw = df_other.copy() @@ -79,14 +86,14 @@ def carac_exclusions(df): def french_format(x: int) -> str: if x > 1e9: y = x / 1e9 - y = locale.format("%.2f", y, grouping=True) + y = locale.format_string("%.2f", y, grouping=True) return f"{y} milliards" if x > 1e6: y = x / 1e6 - y = locale.format("%.2f", y, grouping=True) + y = locale.format_string("%.2f", y, grouping=True) return f"{y} millions" else: - y = locale.format("%.0f", x, grouping=True) + y = locale.format_string("%.0f", x, grouping=True) return f"{y}" # 3 Onglets : Matériaux, Top déchets, Filières et marques @@ -347,9 +354,25 @@ def french_format(x: int) -> str: # Affichage du graphique st.plotly_chart(fig2, use_container_width=True) - # Ligne 3 : Graphe par milieu de collecte + ### GRAPHIQUE PAR MILIEU DE COLLECTE + + # Calcul du nombre de collectes par milieu + df_nb_par_milieu = ( + df_other.groupby("TYPE_MILIEU", as_index=True) + .agg( + { + "ID_RELEVE": "count", + } + ) + .sort_values("TYPE_MILIEU", ascending=True) + ) + # Exclure les milieux avec moins de 3 collectes + milieux_a_exclure = df_nb_par_milieu[ + df_nb_par_milieu["ID_RELEVE"] <= 3 + ].index.to_list() + df_nb_par_milieu = df_nb_par_milieu.drop(milieux_a_exclure, axis=0) - # Grouper par année et type de matériau + # Calcul du dataframe groupé par milieu et matériau pour le graphique df_typemilieu = df_volume_cleaned.groupby( ["TYPE_MILIEU", "Matériau"], as_index=False ).agg({"Volume_m3": "sum", "ID_RELEVE": "count"}) @@ -358,12 +381,10 @@ def french_format(x: int) -> str: ["TYPE_MILIEU", "Volume_m3"], ascending=True ) - # Raccourcir les étiquettes trop longues - df_typemilieu = df_typemilieu.replace( - { - "Zone naturelle ou rurale (hors littoral et montagne)": "Zone naturelle ou rurale" - } - ) + # Retirer milieux avec moins de 3 collectes + df_typemilieu = df_typemilieu[ + ~df_typemilieu["TYPE_MILIEU"].isin(milieux_a_exclure) + ] # Ne pas faire apparaître la catégorie "Multi-lieux" lignes_multi = df_typemilieu.loc[df_typemilieu["TYPE_MILIEU"] == "Multi-lieux"] @@ -414,17 +435,6 @@ def french_format(x: int) -> str: with st.container(border=True): st.plotly_chart(fig3, use_container_width=True) - # Afficher un tableau du nombre de collectes par milieu en dessous - df_nb_par_milieu = ( - df_other.groupby("TYPE_MILIEU", as_index=True) - .agg( - { - "ID_RELEVE": "count", - } - ) - .sort_values("TYPE_MILIEU", ascending=True) - ) - # Ne pas faire apparaître la catégorie "Multi-lieux" lignes_multi = df_nb_par_milieu.loc[df_nb_par_milieu.index == "Multi-lieux"] df_nb_par_milieu.drop(lignes_multi.index, axis=0, inplace=True) @@ -447,7 +457,8 @@ def french_format(x: int) -> str: st.table(df_nb_par_milieu.T) st.caption( f"Les ramassages catégorisés en 'Multi-lieux' " - + f"ont été retirés de l'analyse." + + f"ont été retirés de l'analyse. " + + f"Les milieux représentant moins de 3 ramassages ne sont pas affichés." ) # Ligne 3 : Graphe par milieu , lieu et année From 9225aea9819cbb6f0676833593ba02b605bb832e Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 29 May 2024 12:32:43 +0200 Subject: [PATCH 127/147] =?UTF-8?q?[tg]=20-=20ajout=20des=20filtres=20sur?= =?UTF-8?q?=20top10=20d=C3=A9chets?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 182 ++++++++++++++++++++++++++--------- 1 file changed, 136 insertions(+), 46 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 798dfac..93fa8d4 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -80,7 +80,7 @@ def carac_exclusions(df): ) # Copier le df pour la partie filtrée par milieu/lieu/année - df_other_metrics_raw = df_other.copy() + # df_other_metrics_raw = df_other.copy() # Fonction pour améliorer l'affichage des nombres (milliers, millions, milliards) def french_format(x: int) -> str: @@ -466,8 +466,8 @@ def french_format(x: int) -> str: # Étape 1: Création des filtres - df_other_metrics = df_other_metrics_raw.copy() - df_other_metrics = df_other_metrics.fillna(0) + # df_other_metrics = df_other_metrics_raw.copy() + # df_other_metrics = df_other_metrics.fillna(0) with st.expander("Filtrer par année, type milieu ou type de lieu"): @@ -483,12 +483,12 @@ def french_format(x: int) -> str: filtered_data_milieu = df_other[ df_other["ANNEE"] == selected_annee ].copy() - filtered_metrics_milieu = df_other_metrics[ - df_other_metrics["ANNEE"] == selected_annee - ].copy() + # filtered_metrics_milieu = df_other_metrics[ + # df_other_metrics["ANNEE"] == selected_annee + # ].copy() else: filtered_data_milieu = df_other.copy() - filtered_metrics_milieu = df_other_metrics.copy() + # filtered_metrics_milieu = df_other_metrics.copy() ## Filtre par milieu # Initialiser le champ déroulant avec une valeur par défaut @@ -506,12 +506,12 @@ def french_format(x: int) -> str: filtered_data_lieu = filtered_data_milieu[ filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu ] - filtered_metrics_milieu = filtered_metrics_milieu[ - filtered_metrics_milieu["TYPE_MILIEU"] == selected_type_milieu - ] + # filtered_metrics_milieu = filtered_metrics_milieu[ + # filtered_metrics_milieu["TYPE_MILIEU"] == selected_type_milieu + # ] else: filtered_data_lieu = filtered_data_milieu.copy() - filtered_metrics_milieu = df_other_metrics.copy() + # filtered_metrics_milieu = df_other_metrics.copy() # Filtre par type de lieu @@ -531,15 +531,15 @@ def french_format(x: int) -> str: and selected_type_lieu == valeur_par_defaut_lieu ): df_filtered = df_other.copy() - df_filtered_metrics = df_other_metrics_raw.copy() + # df_filtered_metrics = df_other_metrics_raw.copy() elif ( selected_type_milieu == valeur_par_defaut_milieu and selected_type_lieu == valeur_par_defaut_lieu ): df_filtered = df_other[df_other["ANNEE"] == selected_annee].copy() - df_filtered_metrics = df_other_metrics_raw[ - df_other_metrics["ANNEE"] == selected_annee - ].copy() + # df_filtered_metrics = df_other_metrics_raw[ + # df_other_metrics["ANNEE"] == selected_annee + # ].copy() elif ( selected_annee == valeur_par_defaut_annee and selected_type_lieu == valeur_par_defaut_lieu @@ -548,9 +548,9 @@ def french_format(x: int) -> str: df_filtered = df_other[ df_other["TYPE_MILIEU"] == selected_type_milieu ].copy() - df_filtered_metrics = df_other_metrics_raw[ - df_other_metrics["TYPE_MILIEU"] == selected_type_milieu - ].copy() + # df_filtered_metrics = df_other_metrics_raw[ + # df_other_metrics["TYPE_MILIEU"] == selected_type_milieu + # ].copy() elif ( selected_annee == valeur_par_defaut_annee @@ -558,9 +558,9 @@ def french_format(x: int) -> str: and selected_type_milieu == valeur_par_defaut_milieu ): df_filtered = df_other[df_other["TYPE_LIEU"] == selected_type_lieu].copy() - df_filtered_metrics = df_other_metrics_raw[ - df_other_metrics["TYPE_LIEU"] == selected_type_lieu - ].copy() + # df_filtered_metrics = df_other_metrics_raw[ + # df_other_metrics["TYPE_LIEU"] == selected_type_lieu + # ].copy() elif ( selected_annee == valeur_par_defaut_annee @@ -571,10 +571,10 @@ def french_format(x: int) -> str: (df_other["TYPE_LIEU"] == selected_type_lieu) & (df_other["TYPE_MILIEU"] == selected_type_milieu) ].copy() - df_filtered_metrics = df_other_metrics_raw[ - (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) - & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) - ] + # df_filtered_metrics = df_other_metrics_raw[ + # (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) + # & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) + # ] elif ( selected_annee != valeur_par_defaut_annee and selected_type_lieu != valeur_par_defaut_lieu @@ -584,10 +584,10 @@ def french_format(x: int) -> str: (df_other["ANNEE"] == selected_annee) & (df_other["TYPE_LIEU"] == selected_type_lieu) ].copy() - df_filtered_metrics = df_other_metrics_raw[ - (df_other_metrics["ANNEE"] == selected_annee) - & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) - ] + # df_filtered_metrics = df_other_metrics_raw[ + # (df_other_metrics["ANNEE"] == selected_annee) + # & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) + # ] elif ( selected_annee != valeur_par_defaut_annee and selected_type_lieu == valeur_par_defaut_lieu @@ -597,10 +597,10 @@ def french_format(x: int) -> str: (df_other["ANNEE"] == selected_annee) & (df_other["TYPE_MILIEU"] == selected_type_milieu) ].copy() - df_filtered_metrics = df_other_metrics_raw[ - (df_other_metrics["ANNEE"] == selected_annee) - & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) - ] + # df_filtered_metrics = df_other_metrics_raw[ + # (df_other_metrics["ANNEE"] == selected_annee) + # & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) + # ] else: df_filtered = df_other[ @@ -608,11 +608,11 @@ def french_format(x: int) -> str: & (df_other["TYPE_MILIEU"] == selected_type_milieu) & (df_other["TYPE_LIEU"] == selected_type_lieu) ].copy() - df_filtered_metrics = df_other_metrics_raw[ - (df_other_metrics["ANNEE"] == selected_annee) - & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) - & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) - ] + # df_filtered_metrics = df_other_metrics_raw[ + # (df_other_metrics["ANNEE"] == selected_annee) + # & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) + # & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) + # ] # Ligne 5 : Metriques filtrés l5_col1, l5_col2, l5_col3 = st.columns(3) @@ -620,9 +620,9 @@ def french_format(x: int) -> str: cell7 = l5_col2.container(border=True) cell8 = l5_col3.container(border=True) - poids_total_filtered = df_filtered_metrics["POIDS_TOTAL"].sum() + poids_total_filtered = df_filtered["POIDS_TOTAL"].sum() # Volume litres converti en m3 - volume_total_filtered_m3 = df_filtered_metrics["VOLUME_TOTAL"].sum() / 1000 + volume_total_filtered_m3 = df_filtered["VOLUME_TOTAL"].sum() / 1000 cell6.metric( "Volume de déchets collectés", @@ -720,11 +720,103 @@ def french_format(x: int) -> str: df_top = df_nb_dechet.copy() df_top_data_releves = df_other.copy() + filtered_df = df_other.copy() # Initialiser le df sans filtres + + # Filtres + with st.expander("Filtrer par année, type milieu ou type de lieu"): + + # Définir les options + annee_options = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) + options_annee = [valeur_par_defaut_annee] + annee_options + options_milieux = [valeur_par_defaut_milieu] + sorted( + list(df_other["TYPE_MILIEU"].unique()) + ) + options_lieux = [valeur_par_defaut_lieu] + sorted( + list(df_other["TYPE_LIEU"].unique()) + ) + + annee = st.selectbox( + "Choisir une année :", + options=options_annee, + index=options_annee.index(valeur_par_defaut_annee), # Définir l'index + key="topdechets_annee", # définir key pour éviter conflits + ) + + milieu = st.selectbox( + "Choisir un type de milieu :", + options=options_milieux, + index=options_milieux.index( + valeur_par_defaut_milieu + ), # Définir l'index + key="topdechets_milieu", # définir key pour éviter conflits + ) + + # Mise à jour dynamique des filtres + if milieu != valeur_par_defaut_milieu: + options_lieux = [valeur_par_defaut_lieu] + list( + milieu_lieu_dict[milieu] + ) + + lieu = st.selectbox( + "Choisir un type de lieu :", + options=options_lieux, + index=options_lieux.index(valeur_par_defaut_lieu), # Définir l'index + key="topdechets_lieu", # définir key pour éviter conflits + ) + + # Conditions pour filtrer les valeurs et ne pas considérer la valeur par défaut dans le filtre + if annee == valeur_par_defaut_annee: # Aucun filtre annee + if milieu == valeur_par_defaut_milieu: # Aucun filtre milieu + if lieu == valeur_par_defaut_lieu: # Aucun filtre lieu + pass # Pas de filtre + else: # Si lieu choisi + filtered_df = filtered_df[(filtered_df["TYPE_LIEU"] == lieu)] + else: # Si milieu choisi + if lieu == valeur_par_defaut_lieu: # Aucun filtre lieu + filtered_df = filtered_df[(filtered_df["TYPE_MILIEU"] == milieu)] + else: # Si milieu ET lieu choisi + filtered_df = filtered_df[ + (filtered_df["TYPE_MILIEU"] == milieu) + & (filtered_df["TYPE_LIEU"] == lieu) + ] + else: # Si annee a été choisie + if milieu == valeur_par_defaut_milieu: # Aucun filtre milieu + if lieu == valeur_par_defaut_lieu: # Aucun filtre lieu + filtered_df = filtered_df[ + (filtered_df["ANNEE"] == annee) + ] # Filtre annee uniquement + else: # Si lieu choisi + filtered_df = filtered_df[ + (filtered_df["ANNEE"] == annee) + & (filtered_df["TYPE_LIEU"] == lieu) + ] + else: # Si milieu choisi + if lieu == valeur_par_defaut_lieu: # Aucun filtre lieu + filtered_df = filtered_df[ + (filtered_df["ANNEE"] == annee) + & (filtered_df["TYPE_MILIEU"] == milieu) + ] + else: # Si milieu ET lieu choisi : 3 filtres + filtered_df = filtered_df[ + (filtered_df["ANNEE"] == annee) + & (filtered_df["TYPE_MILIEU"] == milieu) + & (filtered_df["TYPE_LIEU"] == lieu) + ] + + # Récupérer les index de collectes pour filtrer le dataframe nb_dechets + # Filtrer les données sur les ID_RELEVES + df_top10 = pd.merge(df_top, filtered_df, on="ID_RELEVE", how="inner") + + # Retrait des lignes avec 100% de volume catégorisé en AUTRE + df_top10 = df_top10[df_top10["Exclusions"] == "Inclus"] + # Calcul du nombre total de déchets catégorisés sur le territoier - nb_total_dechets = df_top[(df_top["type_regroupement"] == "GROUPE")][ + nb_total_dechets = df_top10[(df_top10["type_regroupement"] == "GROUPE")][ "nb_dechet" ].sum() + nb_collec_top = df_top10["ID_RELEVE"].nunique() + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2 = st.columns(2) # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) @@ -736,7 +828,7 @@ def french_format(x: int) -> str: # 3ème métrique : nombre de relevés cell2 = l1_col2.container(border=True) - cell2.metric("Nombre de ramassages", french_format(nb_collectes_int)) + cell2.metric("Nombre de ramassages", french_format(nb_collec_top)) # Message d'avertissement nb de collectes en dessous de 5 if nb_collectes_int <= 5: @@ -748,8 +840,6 @@ def french_format(x: int) -> str: # Ligne 2 : graphique top déchets - # Filtration des données pour nb_dechets - df_top10 = pd.merge(df_top, df_top_data_releves, on="ID_RELEVE", how="inner") # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement df_dechets_groupe = df_top10[df_top10["type_regroupement"].isin(["GROUPE"])] # Group by 'categorie', sum 'nb_dechet', et top 10 @@ -824,9 +914,9 @@ def french_format(x: int) -> str: st.write("") st.caption( - f"Note : Les chiffres ci-dessous sont calculés sur XX ramassages \ + f"Note : Les chiffres ci-dessous sont calculés sur {nb_collec_top} ramassages \ ayant fait l’objet d’une estimation des volumes \ - par matériau, soit un volume total de {french_format(volume_total_categorise_m3)} m³." + par matériau." ) with st.container(border=True): From 3f2472981015d649fb7aec9430779893789c169c Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 29 May 2024 12:46:56 +0200 Subject: [PATCH 128/147] [tg] inversion graphs marques et REP --- dashboards/app/pages/data.py | 145 +++++++++++++++++++---------------- 1 file changed, 77 insertions(+), 68 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 93fa8d4..91a6b6e 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -1172,7 +1172,8 @@ def french_format(x: int) -> str: nb_dechet_rep = rep_df["nb_dechet"].sum() nb_rep = len(top_rep_df["Responsabilité élargie producteur"].unique()) - # Metriques et graphs secteurs + ### ANALYSE PAR SECTEUR + st.write("**Analyse par secteur économique**") # Retrait des categoriés "VIDE" et "INDERTERMINE" si présentes et recupération des valeurs nb_vide_indetermine = 0 if "VIDE" in top_secteur_df["Secteur"].unique(): @@ -1292,76 +1293,20 @@ def french_format(x: int) -> str: with st.container(border=True): st.plotly_chart(fig_secteur, use_container_width=True) - # Message d'avertissement Nombre de dechets dont le secteur n'a pas été determine - if nb_vide_indetermine != 0: - st.warning( - "⚠️ Il y a " - + str(french_format(nb_vide_indetermine)) - + " déchets dont le secteur n'a pas été determiné dans les déchets collectés." - ) - - # Metriques et graphes marques - l2_col1, l2_col2 = st.columns(2) - cell4 = l2_col1.container(border=True) - - # 1er métrique : nombre de dechets categorises par marques - - cell4.metric( - "Nombre de déchets triés par marque", - french_format(nb_dechet_marque) + " déchets", - ) - - # 2ème métrique : nombre de marques identifiées lors des collectes - cell5 = l2_col2.container(border=True) - cell5.metric( - "Nombre de marques concernées", - french_format(nb_marques) + " marques", - ) + # Message d'avertissement Nombre de dechets dont le secteur n'a pas été determine + if nb_vide_indetermine != 0: + st.warning( + "⚠️ Il y a " + + str(french_format(nb_vide_indetermine)) + + " déchets dont le secteur n'a pas été determiné dans les déchets collectés." + ) - # Configuration du graphique à barres - fig_marque = px.bar( - top_marque_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), - x="Nombre de déchets", - y="Marque", - title="Top 10 des marques identifiées dans les déchets comptés", - color_discrete_sequence=["#1951A0"], - orientation="h", - text_auto=True, - ) + ### ANALYSE PAR FILIERE REP - # add log scale to x axis - fig_marque.update_layout( - # xaxis_type="log", # Pas besoin d'échelle log ici - height=700, - uniformtext_minsize=8, - uniformtext_mode="hide", - yaxis_title=None, - separators=", ", - ) - # Paramétrage de la taille de police et de l'infobulle - fig_marque.update_traces( - textfont_size=14, - texttemplate="%{value:,.0f}", - hovertemplate="Marque : %{y}
Quantité : %{x:,.0f} déchets", + st.write( + "**Analyse par filière de RResponsabilité Élargie du Producteur (REP)**" ) - fig_marque.update_yaxes( - tickfont=dict(size=14) - ) # Taille des étiquettes en ordonnée - - with st.container(border=True): - st.plotly_chart(fig_marque, use_container_width=True) - with st.container(border=True): - st.caption( - "La Responsabilité Élargie du Producteur (REP) est une obligation qui impose aux entreprises de payer une contribution financière" - + " pour la prise en charge de la gestion des déchets issus des produits qu’ils mettent sur le marché selon le principe pollueur-payeur." - + " Pour ce faire, elles doivent contribuer financièrement à la collecte, du tri et au recyclage de ces produits, " - + "généralement à travers les éco-organismes privés, agréés par l’Etat, comme CITEO pour les emballages. " - + "L’État a depuis 1993 progressivement mis en place 25 filières REP, qui regroupent de grandes familles de produits " - + "(emballages ménagers, tabac, textile, ameublement, …)." - ) - - # Metriques et graphes Responsabilité elargie producteurs l3_col1, l3_col2 = st.columns(2) # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) # Suppression de la catégorie "VIDE" @@ -1380,7 +1325,7 @@ def french_format(x: int) -> str: # 1ère métrique : nombre de dechets catégorisés repartis par responsabilités cell6 = l3_col1.container(border=True) cell6.metric( - "Quan de déchets catégorisés par filière REP", + "Quantité de déchets catégorisés", french_format(nb_dechet_rep), ) @@ -1391,6 +1336,16 @@ def french_format(x: int) -> str: french_format(nb_rep) + " filières", ) + with st.expander("Qu'est-ce que la Responsabilité Élargie du Producteur ?"): + st.write( + "La Responsabilité Élargie du Producteur (REP) est une obligation qui impose aux entreprises de payer une contribution financière" + + " pour la prise en charge de la gestion des déchets issus des produits qu’ils mettent sur le marché selon le principe pollueur-payeur." + + " Pour ce faire, elles doivent contribuer financièrement à la collecte, du tri et au recyclage de ces produits, " + + "généralement à travers les éco-organismes privés, agréés par l’Etat, comme CITEO pour les emballages. " + + "L’État a depuis 1993 progressivement mis en place 25 filières REP, qui regroupent de grandes familles de produits " + + "(emballages ménagers, tabac, textile, ameublement, …)." + ) + # Treemap REP figreptree = px.treemap( top_rep_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), @@ -1426,6 +1381,60 @@ def french_format(x: int) -> str: + " déchets dont la filière REP n'a pas été determinée dans les déchets collectés." ) + ### ANALYSES PAR MARQUE + + st.write("**Analyse par marque**") + + l2_col1, l2_col2 = st.columns(2) + cell4 = l2_col1.container(border=True) + + # 1er métrique : nombre de dechets categorises par marques + + cell4.metric( + "Nombre de déchets triés par marque", + french_format(nb_dechet_marque) + " déchets", + ) + + # 2ème métrique : nombre de marques identifiées lors des collectes + cell5 = l2_col2.container(border=True) + cell5.metric( + "Nombre de marques concernées", + french_format(nb_marques) + " marques", + ) + + # Configuration du graphique à barres + fig_marque = px.bar( + top_marque_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), + x="Nombre de déchets", + y="Marque", + title="Top 10 des marques identifiées dans les déchets comptés", + color_discrete_sequence=["#1951A0"], + orientation="h", + text_auto=True, + ) + + # add log scale to x axis + fig_marque.update_layout( + # xaxis_type="log", # Pas besoin d'échelle log ici + height=700, + uniformtext_minsize=8, + uniformtext_mode="hide", + yaxis_title=None, + separators=", ", + ) + # Paramétrage de la taille de police et de l'infobulle + fig_marque.update_traces( + textfont_size=14, + texttemplate="%{value:,.0f}", + hovertemplate="Marque : %{y}
Quantité : %{x:,.0f} déchets", + ) + fig_marque.update_yaxes( + tickfont=dict(size=14) + ) # Taille des étiquettes en ordonnée + + with st.container(border=True): + st.plotly_chart(fig_marque, use_container_width=True) + else: st.markdown("## 🚨 Veuillez vous connecter pour accéder à l'onglet 🚨") From 5334ee7923458a95f4cf51a525d1b92f01aca204 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 29 May 2024 15:30:50 +0200 Subject: [PATCH 129/147] =?UTF-8?q?[tg]=20-=20sous-onglet=203=20:=20filtre?= =?UTF-8?q?=20sur=20niveaux=204=20(secteurs/REP)=20et=202=C3=A04=20(marque?= =?UTF-8?q?s)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 97 ++++++++++++++++++++++++------------ 1 file changed, 66 insertions(+), 31 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 91a6b6e..d2c5e8a 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -1005,7 +1005,6 @@ def french_format(x: int) -> str: ) # Onglet 3 : Secteurs et marques with tab3: - st.write("") # Préparation des données df_dechet_copy = df_nb_dechet.copy() @@ -1126,11 +1125,17 @@ def french_format(x: int) -> str: & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) ].copy() + # + # Filtration des données pour nb_dechets df_init = pd.merge(df_dechet_copy, df_filtered, on="ID_RELEVE", how="inner") # Data pour le plot secteur secteur_df = df_init[df_init["type_regroupement"].isin(["SECTEUR"])] + secteur_df = secteur_df[ + secteur_df["NIVEAU_CARAC"] == 4 + ] # Filtre sur relevés de niveau 4 + top_secteur_df = ( secteur_df.groupby("categorie")["nb_dechet"] .sum() @@ -1142,8 +1147,23 @@ def french_format(x: int) -> str: "Nombre de déchets" ].astype(int) + # Data pour le plot responsabilités + rep_df = df_init[df_init["type_regroupement"].isin(["REP"])] + rep_df = rep_df[rep_df["NIVEAU_CARAC"] == 4] # Filtre sur relevés de niveau 4 + + top_rep_df = ( + rep_df.groupby("categorie")["nb_dechet"].sum().sort_values(ascending=True) + ) + top_rep_df = top_rep_df.reset_index() + top_rep_df.columns = ["Responsabilité élargie producteur", "Nombre de déchets"] + # Data pour le plot marque + marque_df = df_init[df_init["type_regroupement"].isin(["MARQUE"])] + marque_df = marque_df[ + marque_df["NIVEAU_CARAC"] >= 2 + ] # Filtre sur relevés de niveau 2, 3 et 4 + top_marque_df = ( marque_df.groupby("categorie")["nb_dechet"] .sum() @@ -1155,25 +1175,19 @@ def french_format(x: int) -> str: int ) - # Data pour le plot responsabilités - rep_df = df_init[df_init["type_regroupement"].isin(["REP"])] - top_rep_df = ( - rep_df.groupby("categorie")["nb_dechet"].sum().sort_values(ascending=True) - ) - top_rep_df = top_rep_df.reset_index() - top_rep_df.columns = ["Responsabilité élargie producteur", "Nombre de déchets"] - # Chiffres clés nb_dechet_secteur = secteur_df["nb_dechet"].sum() nb_secteurs = len(top_secteur_df["Secteur"].unique()) nb_dechet_marque = marque_df["nb_dechet"].sum() nb_marques = len(top_marque_df["Marque"].unique()) - collectes = len(df_filtered) + collectes_sect = secteur_df["ID_RELEVE"].nunique() + collectes_rep = rep_df["ID_RELEVE"].nunique() + collectes_marque = marque_df["ID_RELEVE"].nunique() nb_dechet_rep = rep_df["nb_dechet"].sum() nb_rep = len(top_rep_df["Responsabilité élargie producteur"].unique()) ### ANALYSE PAR SECTEUR - st.write("**Analyse par secteur économique**") + st.write("**Analyse par secteur économique** (relevés de niveau 4 uniquement)") # Retrait des categoriés "VIDE" et "INDERTERMINE" si présentes et recupération des valeurs nb_vide_indetermine = 0 if "VIDE" in top_secteur_df["Secteur"].unique(): @@ -1198,28 +1212,28 @@ def french_format(x: int) -> str: # Trick pour séparer les milliers cell1.metric( - "Nombre de déchets triés par secteur", french_format(nb_dechet_secteur) + "Quantité de déchets catégorisés", french_format(nb_dechet_secteur) ) # 2ème métrique : poids cell2 = l1_col2.container(border=True) cell2.metric( "Nombre de secteurs concernés", - french_format(nb_secteurs) + " secteurs", + french_format(nb_secteurs), ) # 3ème métrique : nombre de collectes cell3 = l1_col3.container(border=True) cell3.metric( "Nombre de ramassages", - french_format(collectes), + french_format(collectes_sect), ) # Message d'avertissement nb de collectes en dessous de 5 - if collectes <= 5: + if collectes_sect <= 5: st.warning( "⚠️ Faible nombre de ramassages (" - + str(collectes) + + str(collectes_sect) + ") dans la base de données." ) @@ -1295,19 +1309,19 @@ def french_format(x: int) -> str: # Message d'avertissement Nombre de dechets dont le secteur n'a pas été determine if nb_vide_indetermine != 0: - st.warning( - "⚠️ Il y a " + st.caption( + "Note : cette analyse exclut " + str(french_format(nb_vide_indetermine)) - + " déchets dont le secteur n'a pas été determiné dans les déchets collectés." + + " déchets dont le secteur n'a pas pu être determiné." ) ### ANALYSE PAR FILIERE REP st.write( - "**Analyse par filière de RResponsabilité Élargie du Producteur (REP)**" + "**Analyse par filière de Responsabilité Élargie du Producteur** (relevés de niveau 4 uniquement)" ) - l3_col1, l3_col2 = st.columns(2) + l3_col1, l3_col2, l3_col3 = st.columns(3) # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) # Suppression de la catégorie "VIDE" nb_vide_rep = 0 @@ -1333,7 +1347,13 @@ def french_format(x: int) -> str: cell7 = l3_col2.container(border=True) cell7.metric( "Nombre de filières REP identifiées", - french_format(nb_rep) + " filières", + french_format(nb_rep), + ) + + cell8 = l3_col3.container(border=True) # Nb de collectes + cell8.metric( + "Nombre de ramassages", + french_format(collectes_rep), ) with st.expander("Qu'est-ce que la Responsabilité Élargie du Producteur ?"): @@ -1358,7 +1378,7 @@ def french_format(x: int) -> str: figreptree.update_layout( margin=dict(t=50, l=25, r=25, b=25), autosize=True, - height=600, + height=500, separators=", ", ) figreptree.update_traces( @@ -1375,31 +1395,37 @@ def french_format(x: int) -> str: # Message d'avertissement Nombre de déchets dont la REP n'a pas été determine if nb_vide_rep != 0: - st.warning( - "⚠️ Il y a " + st.caption( + "Note : Cette analyse exclut " + str(french_format(nb_vide_rep)) - + " déchets dont la filière REP n'a pas été determinée dans les déchets collectés." + + " déchets dont la filière REP n'a pas pu être determinée." ) ### ANALYSES PAR MARQUE - st.write("**Analyse par marque**") + st.write("**Analyse par marque** (relevés de niveaux 2 à 4)") - l2_col1, l2_col2 = st.columns(2) + l2_col1, l2_col2, l2_col3 = st.columns(3) cell4 = l2_col1.container(border=True) # 1er métrique : nombre de dechets categorises par marques cell4.metric( - "Nombre de déchets triés par marque", - french_format(nb_dechet_marque) + " déchets", + "Quantité de déchets catégorisés", + french_format(nb_dechet_marque), ) # 2ème métrique : nombre de marques identifiées lors des collectes cell5 = l2_col2.container(border=True) cell5.metric( "Nombre de marques concernées", - french_format(nb_marques) + " marques", + french_format(nb_marques), + ) + + cell12 = l2_col3.container(border=True) # Nb de collectes + cell12.metric( + "Nombre de ramassages", + french_format(collectes_marque), ) # Configuration du graphique à barres @@ -1435,6 +1461,15 @@ def french_format(x: int) -> str: with st.container(border=True): st.plotly_chart(fig_marque, use_container_width=True) + # Message d'avertissement pour les déchets non catégorisés + if nb_vide_rep != 0: + st.caption( + "Note : cette analyse exclut " + # + str(french_format(nb_vide_rep)) + + " XXX " + + " déchets dont la marque n'a pas pu être determinée." + ) + else: st.markdown("## 🚨 Veuillez vous connecter pour accéder à l'onglet 🚨") From 4c048eb51230d1176edc43d99b27bd3058fa8c60 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 29 May 2024 16:13:26 +0200 Subject: [PATCH 130/147] [tg] - retrait des VIDES et INDETERMINES dans les metrics de l'onglet 3 --- dashboards/app/pages/data.py | 117 ++++++++++++++++++++--------------- 1 file changed, 67 insertions(+), 50 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index d2c5e8a..465ca31 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -5,6 +5,7 @@ from folium import IFrame import math import locale +import duckdb # Configuration de la page @@ -1130,11 +1131,24 @@ def french_format(x: int) -> str: # Filtration des données pour nb_dechets df_init = pd.merge(df_dechet_copy, df_filtered, on="ID_RELEVE", how="inner") - # Data pour le plot secteur - secteur_df = df_init[df_init["type_regroupement"].isin(["SECTEUR"])] - secteur_df = secteur_df[ - secteur_df["NIVEAU_CARAC"] == 4 - ] # Filtre sur relevés de niveau 4 + # Data pour le plot secteur : filtrer par type_regroup et niveau 4 + + secteur_df = duckdb.query( + ( + "SELECT * " + "FROM df_init " + "WHERE type_regroupement='SECTEUR' AND NIVEAU_CARAC = 4 AND categorie NOT IN ('VIDE', 'INDÉTERMINÉ');" + ) + ).to_df() + + # Calcul du nombre de secteurs VIDE et INDETERMINE + nb_vide_indetermine = duckdb.query( + ( + "SELECT sum(nb_dechet)" + "FROM df_init " + "WHERE type_regroupement='SECTEUR' AND NIVEAU_CARAC = 4 AND categorie IN ('VIDE', 'INDÉTERMINÉ');" + ) + ).fetchone()[0] top_secteur_df = ( secteur_df.groupby("categorie")["nb_dechet"] @@ -1148,8 +1162,22 @@ def french_format(x: int) -> str: ].astype(int) # Data pour le plot responsabilités - rep_df = df_init[df_init["type_regroupement"].isin(["REP"])] - rep_df = rep_df[rep_df["NIVEAU_CARAC"] == 4] # Filtre sur relevés de niveau 4 + rep_df = duckdb.query( + ( + "SELECT * " + "FROM df_init " + "WHERE type_regroupement='REP' AND NIVEAU_CARAC = 4 AND categorie NOT IN ('VIDE', 'INDÉTERMINÉ');" + ) + ).to_df() # Filtre sur le regroupement REP et le niveau 4, exclusion des vides et indeterminés + + # Calcul du nombre de secteurs VIDE et INDETERMINE + nb_vide_indetermine_REP = duckdb.query( + ( + "SELECT sum(nb_dechet)" + "FROM df_init " + "WHERE type_regroupement='REP' AND NIVEAU_CARAC = 4 AND categorie IN ('VIDE', 'INDÉTERMINÉ');" + ) + ).fetchone()[0] top_rep_df = ( rep_df.groupby("categorie")["nb_dechet"].sum().sort_values(ascending=True) @@ -1159,10 +1187,23 @@ def french_format(x: int) -> str: # Data pour le plot marque - marque_df = df_init[df_init["type_regroupement"].isin(["MARQUE"])] - marque_df = marque_df[ - marque_df["NIVEAU_CARAC"] >= 2 - ] # Filtre sur relevés de niveau 2, 3 et 4 + # Data pour le plot responsabilités + marque_df = duckdb.query( + ( + "SELECT * " + "FROM df_init " + "WHERE type_regroupement='MARQUE' AND NIVEAU_CARAC >= 2 AND categorie NOT IN ('VIDE', 'INDÉTERMINÉ');" + ) + ).to_df() # Filtre sur le regroupement REP et le niveau 4, exclusion des vides et indeterminés + + # Calcul du nombre de secteurs VIDE et INDETERMINE + nb_vide_indetermine_marque = duckdb.query( + ( + "SELECT sum(nb_dechet)" + "FROM df_init " + "WHERE type_regroupement='MARQUE' AND NIVEAU_CARAC = 4 AND categorie IN ('VIDE', 'INDÉTERMINÉ');" + ) + ).fetchone()[0] top_marque_df = ( marque_df.groupby("categorie")["nb_dechet"] @@ -1175,34 +1216,23 @@ def french_format(x: int) -> str: int ) - # Chiffres clés + # Chiffres clés secteurs nb_dechet_secteur = secteur_df["nb_dechet"].sum() - nb_secteurs = len(top_secteur_df["Secteur"].unique()) - nb_dechet_marque = marque_df["nb_dechet"].sum() - nb_marques = len(top_marque_df["Marque"].unique()) + nb_secteurs = secteur_df["categorie"].nunique() collectes_sect = secteur_df["ID_RELEVE"].nunique() + + # Chiffres clés filières REP + nb_dechet_rep = rep_df["nb_dechet"].sum() collectes_rep = rep_df["ID_RELEVE"].nunique() + nb_rep = rep_df["categorie"].nunique() + + # Chiffres clés marques + nb_dechet_marque = marque_df["nb_dechet"].sum() + nb_marques = marque_df["categorie"].nunique() collectes_marque = marque_df["ID_RELEVE"].nunique() - nb_dechet_rep = rep_df["nb_dechet"].sum() - nb_rep = len(top_rep_df["Responsabilité élargie producteur"].unique()) ### ANALYSE PAR SECTEUR st.write("**Analyse par secteur économique** (relevés de niveau 4 uniquement)") - # Retrait des categoriés "VIDE" et "INDERTERMINE" si présentes et recupération des valeurs - nb_vide_indetermine = 0 - if "VIDE" in top_secteur_df["Secteur"].unique(): - df_vide_indetermine = top_secteur_df[top_secteur_df["Secteur"] == "VIDE"] - nb_vide_indetermine = df_vide_indetermine["Nombre de déchets"].sum() - elif "INDÉTERMINÉ" in top_secteur_df["Secteur"].unique(): - df_vide_indetermine = top_secteur_df[ - top_secteur_df["Secteur"] == "INDÉTERMINÉ" - ] - nb_vide_indetermine += df_vide_indetermine["Nombre de déchets"].sum() - else: - pass - - top_secteur_df = top_secteur_df[top_secteur_df["Secteur"] != "INDÉTERMINÉ"] - top_secteur_df = top_secteur_df[top_secteur_df["Secteur"] != "VIDE"] # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2, l1_col3 = st.columns(3) @@ -1312,7 +1342,7 @@ def french_format(x: int) -> str: st.caption( "Note : cette analyse exclut " + str(french_format(nb_vide_indetermine)) - + " déchets dont le secteur n'a pas pu être determiné." + + " déchets dont le secteur est 'Vide' ou 'Indeterminé'." ) ### ANALYSE PAR FILIERE REP @@ -1323,18 +1353,6 @@ def french_format(x: int) -> str: l3_col1, l3_col2, l3_col3 = st.columns(3) # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - # Suppression de la catégorie "VIDE" - nb_vide_rep = 0 - if "VIDE" in top_rep_df["Responsabilité élargie producteur"].unique(): - df_vide_rep = top_rep_df[ - top_rep_df["Responsabilité élargie producteur"] == "VIDE" - ] - nb_vide_rep = df_vide_rep["Nombre de déchets"].sum() - else: - pass - top_rep_df = top_rep_df[ - top_rep_df["Responsabilité élargie producteur"] != "VIDE" - ] # 1ère métrique : nombre de dechets catégorisés repartis par responsabilités cell6 = l3_col1.container(border=True) @@ -1394,10 +1412,10 @@ def french_format(x: int) -> str: st.plotly_chart(figreptree, use_container_width=True) # Message d'avertissement Nombre de déchets dont la REP n'a pas été determine - if nb_vide_rep != 0: + if nb_vide_indetermine_REP != 0: st.caption( "Note : Cette analyse exclut " - + str(french_format(nb_vide_rep)) + + str(french_format(nb_vide_indetermine_REP)) + " déchets dont la filière REP n'a pas pu être determinée." ) @@ -1462,11 +1480,10 @@ def french_format(x: int) -> str: st.plotly_chart(fig_marque, use_container_width=True) # Message d'avertissement pour les déchets non catégorisés - if nb_vide_rep != 0: + if nb_vide_indetermine_marque != None: st.caption( "Note : cette analyse exclut " - # + str(french_format(nb_vide_rep)) - + " XXX " + + str(french_format(nb_vide_indetermine_marque)) + " déchets dont la marque n'a pas pu être determinée." ) From f7dc00c0b4d6541cada2eb0f1362afd4eb123051 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 29 May 2024 16:24:14 +0200 Subject: [PATCH 131/147] =?UTF-8?q?[tg]=20-=20ajout=20%d=C3=A9chets=20dans?= =?UTF-8?q?=20graph=20par=20secteurs?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 465ca31..0d2f606 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -1160,6 +1160,11 @@ def french_format(x: int) -> str: top_secteur_df["Nombre de déchets"] = top_secteur_df[ "Nombre de déchets" ].astype(int) + # Calcul du pourcentage + top_secteur_df["Pourcentage"] = ( + top_secteur_df["Nombre de déchets"] + / top_secteur_df["Nombre de déchets"].sum() + ) # Data pour le plot responsabilités rep_df = duckdb.query( @@ -1304,6 +1309,7 @@ def french_format(x: int) -> str: y="Secteur", color="Secteur", title="Top 10 des secteurs économiques identifiés dans les déchets comptés", + hover_data=["Pourcentage"], labels={ "Nombre de déchets": "Nombre total de déchets (échelle logarithmique)", }, @@ -1331,7 +1337,7 @@ def french_format(x: int) -> str: # Paramétrage de l'infobulle fig_secteur.update_traces( - hovertemplate="Secteur : %{y}
Quantité : %{x:,.0f} déchets" + hovertemplate="Secteur : %{y}
Quantité : %{x:,.0f} déchets
Part du total déchets : %{customdata[0]:.0%}" ) with st.container(border=True): @@ -1405,7 +1411,7 @@ def french_format(x: int) -> str: textfont=dict(size=16), hovertemplate="%{label}
" + "Quantité de déchets : %{value:,.0f}
" - + "Part du total ramassé : %{percentRoot:.1%}", + + "Part des déchets catégorisés : %{percentRoot:.1%}", ) with st.container(border=True): From fb43f53dc11b5495577de1111e977fab93458d37 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Thu, 6 Jun 2024 10:59:46 +0200 Subject: [PATCH 132/147] =?UTF-8?q?[tg]=20-=20am=C3=A9lioration=20layout?= =?UTF-8?q?=203=20onglets?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 183 ++++++++++++++++------------------- 1 file changed, 83 insertions(+), 100 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 0d2f606..aade6ff 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -32,10 +32,14 @@ if st.session_state["authentication_status"]: if filtre_niveau == "" and filtre_collectivite == "": - st.write("Aucune sélection de territoire n'a été effectuée") + with st.sidebar: + st.warning("⚠️ Aucune sélection de territoire n'a été effectuée") else: - st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") - + with st.sidebar: + st.info( + f" Territoire sélectionné : **{filtre_niveau} {filtre_collectivite}**", + icon="🌍", + ) # Définition d'une fonction pour charger les données du nombre de déchets@st.cache_data def load_df_dict_corr_dechet_materiau(): return pd.read_csv( @@ -201,6 +205,14 @@ def french_format(x: int) -> str: "Autre": "#F3B900", } + # Message d'avertissement en haut de page si nb de collectes < 5 + if nb_collectes_int <= 5: + st.warning( + "⚠️ Faible nombre de ramassages (" + + str(nb_collectes_int) + + ") dans la base de données." + ) + # Ligne 1 : 2 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2, l1_col3 = st.columns(3) @@ -221,58 +233,28 @@ def french_format(x: int) -> str: cell3 = l1_col3.container(border=True) cell3.metric("Nombre de ramassages", french_format(nb_collectes_int)) - # Message d'avertissement nb de collectes en dessous de 5 - if nb_collectes_int <= 5: - st.warning( - "⚠️ Faible nombre de ramassages (" - + str(nb_collectes_int) - + ") dans la base de données." - ) - # Note méthodo pour expliquer les données retenues pour l'analyse - with st.expander( - "Note sur les données utilisées dans les graphiques ci-dessous" - ): - st.caption( - f"Il n’y a pas de correspondance entre le poids et le volume global\ + with st.expander("Note sur les données utilisées dans cet onglet"): + st.markdown( + f""" + - Il n’y a pas de correspondance entre le poids et le volume global\ de déchets indiqués car certaines organisations \ ne renseignent que le volume sans mention de poids \ - (protocole de niveau 1) ou inversement." - ) - st.caption( - f"De plus, \ - les chiffres ci-dessous sont calculés sur **{french_format(nb_collectes_carac)}** ramassages \ + (protocole de niveau 1) ou inversement. + - Les chiffres ci-dessous sont calculés sur **{french_format(nb_collectes_carac)}** ramassages \ ayant fait l’objet d’une estimation des volumes \ par matériau, soit un volume total de {french_format(volume_total_categorise_m3)} m³.\ - Les relevés de niveau 0 et les relevés comptabilisant 100% de déchets 'AUTRES' ont été exclus." - ) - df_note_methodo = df_volume.groupby(["Exclusions"], as_index=False)[ - "ID_RELEVE" - ].count() - fig_data = px.pie( - df_note_methodo, - values="ID_RELEVE", - names="Exclusions", - title="Nombre de ramassages inclus ou exclus dans les analyses ci-dessous", - color="Exclusions", - color_discrete_sequence=px.colors.sequential.RdBu, + Les relevés de niveau 0 et les relevés comptabilisant 100% de déchets 'AUTRES' ont été exclus. + """ ) - # Réglage du texte affiché, format et taille de police - fig_data.update_traces( - textinfo="value+percent+label", - texttemplate="%{label}
%{value:.0f} relevés
%{percent:.0%}", - textfont_size=14, - hoverinfo=None, - insidetextorientation="horizontal", - rotation=90, + # Afficher le nombre de relevés inclus ou exclus + df_note_methodo = ( + df_volume.groupby(["Exclusions"], as_index=True)["ID_RELEVE"] + .count() + .sort_values(ascending=False) ) - # Cacher la légende - fig_data.update_layout( - showlegend=False, - separators=", ", # Séparateurs décimales et milliers - ) - - st.plotly_chart(fig_data) + df_note_methodo.rename("Nombre de relevés", inplace=True) + st.dataframe(df_note_methodo) # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux @@ -339,7 +321,7 @@ def french_format(x: int) -> str: fig2.update_layout( autosize=True, - # uniformtext_minsize=8, + # uniformtext_minsize=10, uniformtext_mode="hide", xaxis_tickangle=-45, showlegend=False, @@ -434,6 +416,12 @@ def french_format(x: int) -> str: # Afficher le graphique with st.container(border=True): + + # Message d'avertissement si pas de données à afficher + if len(df_typemilieu) == 0: + st.warning("⚠️ Aucune donnée à afficher") + + # Afficher le graphique st.plotly_chart(fig3, use_container_width=True) # Ne pas faire apparaître la catégorie "Multi-lieux" @@ -444,7 +432,7 @@ def french_format(x: int) -> str: df_nb_par_milieu.rename( { "TYPE_MILIEU": "Milieu", - "ID_RELEVE": "", + "ID_RELEVE": "Ramassages", }, axis=1, inplace=True, @@ -463,7 +451,7 @@ def french_format(x: int) -> str: ) # Ligne 3 : Graphe par milieu , lieu et année - st.write("**Filtrer les données par année, type de milieu ou type de lieu**") + st.write("**Détail par année, type de milieu ou de lieu**") # Étape 1: Création des filtres @@ -615,6 +603,12 @@ def french_format(x: int) -> str: # & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) # ] + # Message d'avertissement nb de collectes en dessous de 5 + if len(df_filtered) <= 5: + st.warning( + f"⚠️ Faible nombre de ramassages ({len(df_filtered)}) dans la base de données." + ) + # Ligne 5 : Metriques filtrés l5_col1, l5_col2, l5_col3 = st.columns(3) cell6 = l5_col1.container(border=True) @@ -637,13 +631,6 @@ def french_format(x: int) -> str: nombre_collectes_filtered = len(df_filtered) cell8.metric("Nombre de ramassages", french_format(nombre_collectes_filtered)) - # Message d'avertissement nb de collectes en dessous de 5 - if len(df_filtered) <= 5: - st.warning( - "⚠️ Faible nombre de ramassages disponibles dans la base de données : " - + str(len(df_filtered)) - ) - # Étape 3: Preparation dataframe pour graphe # Copie des données pour transfo df_volume2 = df_filtered.copy() @@ -818,6 +805,14 @@ def french_format(x: int) -> str: nb_collec_top = df_top10["ID_RELEVE"].nunique() + # Message d'avertissement nb de collectes en dessous de 5 + if nb_collectes_int <= 5: + st.warning( + "⚠️ Faible nombre de ramassages (" + + str(nb_collectes_int) + + ") dans la base de données." + ) + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2 = st.columns(2) # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) @@ -831,14 +826,6 @@ def french_format(x: int) -> str: cell2 = l1_col2.container(border=True) cell2.metric("Nombre de ramassages", french_format(nb_collec_top)) - # Message d'avertissement nb de collectes en dessous de 5 - if nb_collectes_int <= 5: - st.warning( - "⚠️ Le nombre de ramassages " - + str(nb_collectes_int) - + " est trop faible pour l'analyse." - ) - # Ligne 2 : graphique top déchets # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement @@ -875,37 +862,35 @@ def french_format(x: int) -> str: color_discrete_map=colors_map, category_orders={"categorie": df_top10_dechets["categorie"].tolist()}, ) - fig5.update_layout(xaxis_type="log") - # suppression de la légende des couleurs + fig5.update_layout( - showlegend=True, - height=700, - uniformtext_minsize=8, - uniformtext_mode="show", - yaxis_title=None, - # Position de la légende + xaxis_type="log", # Echelle logarithmique + showlegend=True, # Afficher la légende + height=700, # Régler la hauteur du graphique + uniformtext_minsize=10, # Taille minimale du texte sur les barres + uniformtext_mode="show", # Règle d'affichage du texte sur les barres + yaxis_title=None, # Cache le titre de l'axe y legend=dict( yanchor="bottom", y=1.01, xanchor="right", x=0.95, - ), - separators=", ", + ), # Règle la position de la légende à partir du point d'ancrage choisi + separators=", ", # Formatte les nombres en français (séparateur décimale, séparateur milliers) ) - # Amélioration du visuel du graphique fig5.update_traces( - texttemplate="%{text:,.0f}", - textposition="inside", - textfont_color="white", - textfont_size=14, + texttemplate="%{text:,.0f}", # Template du texte sur les barres + textposition="inside", # Position du texte sur les barres + textfont_color="white", # Couleur du texte + textfont_size=14, # Taille du texte ) fig5.update_yaxes(tickfont=dict(size=14)) # Taille des étiquettes en abcisse fig5.update_traces( hovertemplate="%{y} : %{x:,.0f} déchets" - ) # Template de l'infobulle + ) # Template de l'infobulle, fait référence à x et y définis dans px.bar. # Suppression de la colonne categorie del df_top10_dechets["Materiau"] @@ -916,8 +901,7 @@ def french_format(x: int) -> str: st.write("") st.caption( f"Note : Les chiffres ci-dessous sont calculés sur {nb_collec_top} ramassages \ - ayant fait l’objet d’une estimation des volumes \ - par matériau." + ayant fait l’objet d’un comptage par type de déchets, soit {nb_total_dechets:.0f} déchets." ) with st.container(border=True): @@ -1239,6 +1223,14 @@ def french_format(x: int) -> str: ### ANALYSE PAR SECTEUR st.write("**Analyse par secteur économique** (relevés de niveau 4 uniquement)") + # Message d'avertissement nb de collectes en dessous de 5 + if collectes_sect <= 5: + st.warning( + "⚠️ Faible nombre de ramassages (" + + str(collectes_sect) + + ") dans la base de données." + ) + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2, l1_col3 = st.columns(3) # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) @@ -1246,9 +1238,7 @@ def french_format(x: int) -> str: cell1 = l1_col1.container(border=True) # Trick pour séparer les milliers - cell1.metric( - "Quantité de déchets catégorisés", french_format(nb_dechet_secteur) - ) + cell1.metric("Nombre de déchets comptés", french_format(nb_dechet_secteur)) # 2ème métrique : poids cell2 = l1_col2.container(border=True) @@ -1264,14 +1254,6 @@ def french_format(x: int) -> str: french_format(collectes_sect), ) - # Message d'avertissement nb de collectes en dessous de 5 - if collectes_sect <= 5: - st.warning( - "⚠️ Faible nombre de ramassages (" - + str(collectes_sect) - + ") dans la base de données." - ) - # Ligne 2 : 3 cellules avec les indicateurs clés en bas de page colors_map_secteur = { "AGRICULTURE": "#156644", @@ -1326,6 +1308,7 @@ def french_format(x: int) -> str: ) fig_secteur.update_layout( height=700, + uniformtext_minsize=10, uniformtext_mode="hide", showlegend=False, yaxis_title=None, @@ -1337,14 +1320,14 @@ def french_format(x: int) -> str: # Paramétrage de l'infobulle fig_secteur.update_traces( - hovertemplate="Secteur : %{y}
Quantité : %{x:,.0f} déchets
Part du total déchets : %{customdata[0]:.0%}" + hovertemplate="Secteur : %{y}
Quantité : %{x:,.0f} déchets
Proportion : %{customdata[0]:.0%}" ) with st.container(border=True): st.plotly_chart(fig_secteur, use_container_width=True) # Message d'avertissement Nombre de dechets dont le secteur n'a pas été determine - if nb_vide_indetermine != 0: + if nb_vide_indetermine != 0 and nb_vide_indetermine != None: st.caption( "Note : cette analyse exclut " + str(french_format(nb_vide_indetermine)) @@ -1418,7 +1401,7 @@ def french_format(x: int) -> str: st.plotly_chart(figreptree, use_container_width=True) # Message d'avertissement Nombre de déchets dont la REP n'a pas été determine - if nb_vide_indetermine_REP != 0: + if nb_vide_indetermine_REP != 0 and nb_vide_indetermine_REP != None: st.caption( "Note : Cette analyse exclut " + str(french_format(nb_vide_indetermine_REP)) @@ -1467,7 +1450,7 @@ def french_format(x: int) -> str: fig_marque.update_layout( # xaxis_type="log", # Pas besoin d'échelle log ici height=700, - uniformtext_minsize=8, + uniformtext_minsize=10, uniformtext_mode="hide", yaxis_title=None, separators=", ", @@ -1486,7 +1469,7 @@ def french_format(x: int) -> str: st.plotly_chart(fig_marque, use_container_width=True) # Message d'avertissement pour les déchets non catégorisés - if nb_vide_indetermine_marque != None: + if nb_vide_indetermine_marque != None and nb_vide_indetermine_marque != 0: st.caption( "Note : cette analyse exclut " + str(french_format(nb_vide_indetermine_marque)) From a9ff5f14627835008b6dcb17f1d1a654061b5fcf Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Thu, 6 Jun 2024 15:11:12 +0200 Subject: [PATCH 133/147] [tg] - optimisation du code des filtres --- dashboards/app/pages/data.py | 550 +++++++++++++++-------------------- 1 file changed, 241 insertions(+), 309 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index aade6ff..22c0a04 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -1,5 +1,6 @@ import streamlit as st import pandas as pd +import numpy as np import plotly.express as px import folium from folium import IFrame @@ -68,14 +69,15 @@ def load_df_dict_corr_dechet_materiau(): # Exclusion des ramassages de niveau 0 ou avec 100% de AUTRES def carac_exclusions(df): - if df["NIVEAU_CARAC"] == 0: - return "Exclu - niveau 0" - elif df["GLOBAL_VOLUME_AUTRE"] == df["VOLUME_TOTAL"]: - return "Exclu - 100% Autre" - else: - return "Inclus" + conditions = [ + df["NIVEAU_CARAC"] == 0, + df["GLOBAL_VOLUME_AUTRE"] == df["VOLUME_TOTAL"], + ] + choices = ["Exclu - niveau 0", "Exclu - 100% Autre"] + return np.select(conditions, choices, default="Inclus") - df_other["Exclusions"] = df_other.apply(lambda row: carac_exclusions(row), axis=1) + # Appliquer la fonction au dataframe + df_other["Exclusions"] = carac_exclusions(df_other) # Raccourcir les étiquettes de milieux trop longues df_other = df_other.replace( @@ -89,16 +91,16 @@ def carac_exclusions(df): # Fonction pour améliorer l'affichage des nombres (milliers, millions, milliards) def french_format(x: int) -> str: - if x > 1e9: + if x >= 1e9: y = x / 1e9 y = locale.format_string("%.2f", y, grouping=True) return f"{y} milliards" - if x > 1e6: + if x >= 1e6: y = x / 1e6 y = locale.format_string("%.2f", y, grouping=True) return f"{y} millions" else: - y = locale.format_string("%.0f", x, grouping=True) + y = locale.format_string("%d", x, grouping=True) return f"{y}" # 3 Onglets : Matériaux, Top déchets, Filières et marques @@ -455,153 +457,56 @@ def french_format(x: int) -> str: # Étape 1: Création des filtres - # df_other_metrics = df_other_metrics_raw.copy() - # df_other_metrics = df_other_metrics.fillna(0) - with st.expander("Filtrer par année, type milieu ou type de lieu"): # Filtre par Année - # Valeur par défaut sous forme de liste pour concaténation avec données + # Default values for filters valeur_par_defaut_annee = "Toute la période" + valeur_par_defaut_milieu = "Tous les milieux" + valeur_par_defaut_lieu = "Tous les lieux" + # Filter by year selected_annee = st.selectbox( "Choisir une année:", options=[valeur_par_defaut_annee] + annee_liste, ) + # Filter data based on selected year + filtered_data = df_other.copy() if selected_annee != valeur_par_defaut_annee: - filtered_data_milieu = df_other[ - df_other["ANNEE"] == selected_annee - ].copy() - # filtered_metrics_milieu = df_other_metrics[ - # df_other_metrics["ANNEE"] == selected_annee - # ].copy() - else: - filtered_data_milieu = df_other.copy() - # filtered_metrics_milieu = df_other_metrics.copy() + filtered_data = filtered_data[filtered_data["ANNEE"] == selected_annee] - ## Filtre par milieu - # Initialiser le champ déroulant avec une valeur par défaut - valeur_par_defaut_milieu = "Tous les milieux" + # Filter by milieu milieux_liste = [valeur_par_defaut_milieu] + sorted( - list(filtered_data_milieu["TYPE_MILIEU"].unique()) + filtered_data["TYPE_MILIEU"].unique() ) - selected_type_milieu = st.selectbox( "Choisir un type de milieu:", options=milieux_liste, ) + # Filter data based on selected milieu if selected_type_milieu != valeur_par_defaut_milieu: - filtered_data_lieu = filtered_data_milieu[ - filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu + filtered_data = filtered_data[ + filtered_data["TYPE_MILIEU"] == selected_type_milieu ] - # filtered_metrics_milieu = filtered_metrics_milieu[ - # filtered_metrics_milieu["TYPE_MILIEU"] == selected_type_milieu - # ] - else: - filtered_data_lieu = filtered_data_milieu.copy() - # filtered_metrics_milieu = df_other_metrics.copy() - - # Filtre par type de lieu - valeur_par_defaut_lieu = "Tous les lieux" + # Filter by lieu lieux_liste = [valeur_par_defaut_lieu] + sorted( - list(filtered_data_lieu["TYPE_LIEU"].unique()) + filtered_data["TYPE_LIEU"].unique() ) - selected_type_lieu = st.selectbox( "Choisir un type de lieu:", options=lieux_liste, ) - if ( - selected_annee == valeur_par_defaut_annee - and selected_type_milieu == valeur_par_defaut_milieu - and selected_type_lieu == valeur_par_defaut_lieu - ): - df_filtered = df_other.copy() - # df_filtered_metrics = df_other_metrics_raw.copy() - elif ( - selected_type_milieu == valeur_par_defaut_milieu - and selected_type_lieu == valeur_par_defaut_lieu - ): - df_filtered = df_other[df_other["ANNEE"] == selected_annee].copy() - # df_filtered_metrics = df_other_metrics_raw[ - # df_other_metrics["ANNEE"] == selected_annee - # ].copy() - elif ( - selected_annee == valeur_par_defaut_annee - and selected_type_lieu == valeur_par_defaut_lieu - and selected_type_milieu != valeur_par_defaut_milieu - ): - df_filtered = df_other[ - df_other["TYPE_MILIEU"] == selected_type_milieu - ].copy() - # df_filtered_metrics = df_other_metrics_raw[ - # df_other_metrics["TYPE_MILIEU"] == selected_type_milieu - # ].copy() - - elif ( - selected_annee == valeur_par_defaut_annee - and selected_type_lieu != valeur_par_defaut_lieu - and selected_type_milieu == valeur_par_defaut_milieu - ): - df_filtered = df_other[df_other["TYPE_LIEU"] == selected_type_lieu].copy() - # df_filtered_metrics = df_other_metrics_raw[ - # df_other_metrics["TYPE_LIEU"] == selected_type_lieu - # ].copy() - - elif ( - selected_annee == valeur_par_defaut_annee - and selected_type_lieu != valeur_par_defaut_lieu - and selected_type_milieu != valeur_par_defaut_milieu - ): - df_filtered = df_other[ - (df_other["TYPE_LIEU"] == selected_type_lieu) - & (df_other["TYPE_MILIEU"] == selected_type_milieu) - ].copy() - # df_filtered_metrics = df_other_metrics_raw[ - # (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) - # & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) - # ] - elif ( - selected_annee != valeur_par_defaut_annee - and selected_type_lieu != valeur_par_defaut_lieu - and selected_type_milieu == valeur_par_defaut_milieu - ): - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee) - & (df_other["TYPE_LIEU"] == selected_type_lieu) - ].copy() - # df_filtered_metrics = df_other_metrics_raw[ - # (df_other_metrics["ANNEE"] == selected_annee) - # & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) - # ] - elif ( - selected_annee != valeur_par_defaut_annee - and selected_type_lieu == valeur_par_defaut_lieu - and selected_type_milieu != valeur_par_defaut_milieu - ): - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee) - & (df_other["TYPE_MILIEU"] == selected_type_milieu) - ].copy() - # df_filtered_metrics = df_other_metrics_raw[ - # (df_other_metrics["ANNEE"] == selected_annee) - # & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) - # ] + # Filter data based on selected lieu + if selected_type_lieu != valeur_par_defaut_lieu: + filtered_data = filtered_data[ + filtered_data["TYPE_LIEU"] == selected_type_lieu + ] - else: - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee) - & (df_other["TYPE_MILIEU"] == selected_type_milieu) - & (df_other["TYPE_LIEU"] == selected_type_lieu) - ].copy() - # df_filtered_metrics = df_other_metrics_raw[ - # (df_other_metrics["ANNEE"] == selected_annee) - # & (df_other_metrics["TYPE_MILIEU"] == selected_type_milieu) - # & (df_other_metrics["TYPE_LIEU"] == selected_type_lieu) - # ] + # Final filtered data + df_filtered = filtered_data.copy() # Message d'avertissement nb de collectes en dessous de 5 if len(df_filtered) <= 5: @@ -705,105 +610,77 @@ def french_format(x: int) -> str: with tab2: # Préparation des datas pour l'onglet 2 - df_top = df_nb_dechet.copy() - df_top_data_releves = df_other.copy() - - filtered_df = df_other.copy() # Initialiser le df sans filtres + df_top_dechets = df_nb_dechet.copy() # Filtres with st.expander("Filtrer par année, type milieu ou type de lieu"): - # Définir les options - annee_options = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) - options_annee = [valeur_par_defaut_annee] + annee_options - options_milieux = [valeur_par_defaut_milieu] + sorted( - list(df_other["TYPE_MILIEU"].unique()) - ) - options_lieux = [valeur_par_defaut_lieu] + sorted( - list(df_other["TYPE_LIEU"].unique()) + filtered_df = df_other.copy() # Initialiser le df sans filtres + + # Define the initial options for the selectboxes + annee_options = [valeur_par_defaut_annee] + sorted( + df_other["ANNEE"].unique().tolist(), reverse=True ) + milieu_options = [valeur_par_defaut_milieu] + lieu_options = [valeur_par_defaut_lieu] annee = st.selectbox( "Choisir une année :", - options=options_annee, - index=options_annee.index(valeur_par_defaut_annee), # Définir l'index + options=annee_options, + index=0, # Définir l'index key="topdechets_annee", # définir key pour éviter conflits ) + # Apply filters based on the selected values + if annee != valeur_par_defaut_annee: + filtered_df = filtered_df[filtered_df["ANNEE"] == annee] + + # Update milieu options based on filtered data + milieu_options += sorted(filtered_df["TYPE_MILIEU"].unique().tolist()) + milieu = st.selectbox( "Choisir un type de milieu :", - options=options_milieux, - index=options_milieux.index( - valeur_par_defaut_milieu - ), # Définir l'index + options=milieu_options, + index=0, # Définir l'index key="topdechets_milieu", # définir key pour éviter conflits ) - # Mise à jour dynamique des filtres + # Apply milieu filter if selected if milieu != valeur_par_defaut_milieu: - options_lieux = [valeur_par_defaut_lieu] + list( - milieu_lieu_dict[milieu] - ) + filtered_df = filtered_df[filtered_df["TYPE_MILIEU"] == milieu] + # Update lieu options based on filtered data + lieu_options += sorted(filtered_df["TYPE_LIEU"].unique().tolist()) + + # Lieu selection lieu = st.selectbox( "Choisir un type de lieu :", - options=options_lieux, - index=options_lieux.index(valeur_par_defaut_lieu), # Définir l'index - key="topdechets_lieu", # définir key pour éviter conflits + options=lieu_options, + index=0, # Default to the first option (valeur_par_defaut_lieu) + key="topdechets_lieu", ) - # Conditions pour filtrer les valeurs et ne pas considérer la valeur par défaut dans le filtre - if annee == valeur_par_defaut_annee: # Aucun filtre annee - if milieu == valeur_par_defaut_milieu: # Aucun filtre milieu - if lieu == valeur_par_defaut_lieu: # Aucun filtre lieu - pass # Pas de filtre - else: # Si lieu choisi - filtered_df = filtered_df[(filtered_df["TYPE_LIEU"] == lieu)] - else: # Si milieu choisi - if lieu == valeur_par_defaut_lieu: # Aucun filtre lieu - filtered_df = filtered_df[(filtered_df["TYPE_MILIEU"] == milieu)] - else: # Si milieu ET lieu choisi - filtered_df = filtered_df[ - (filtered_df["TYPE_MILIEU"] == milieu) - & (filtered_df["TYPE_LIEU"] == lieu) - ] - else: # Si annee a été choisie - if milieu == valeur_par_defaut_milieu: # Aucun filtre milieu - if lieu == valeur_par_defaut_lieu: # Aucun filtre lieu - filtered_df = filtered_df[ - (filtered_df["ANNEE"] == annee) - ] # Filtre annee uniquement - else: # Si lieu choisi - filtered_df = filtered_df[ - (filtered_df["ANNEE"] == annee) - & (filtered_df["TYPE_LIEU"] == lieu) - ] - else: # Si milieu choisi - if lieu == valeur_par_defaut_lieu: # Aucun filtre lieu - filtered_df = filtered_df[ - (filtered_df["ANNEE"] == annee) - & (filtered_df["TYPE_MILIEU"] == milieu) - ] - else: # Si milieu ET lieu choisi : 3 filtres - filtered_df = filtered_df[ - (filtered_df["ANNEE"] == annee) - & (filtered_df["TYPE_MILIEU"] == milieu) - & (filtered_df["TYPE_LIEU"] == lieu) - ] + # Apply lieu filter if selected + if lieu != valeur_par_defaut_lieu: + filtered_df = filtered_df[filtered_df["TYPE_LIEU"] == lieu] + + # The filtered_df now contains the data based on the selected filters # Récupérer les index de collectes pour filtrer le dataframe nb_dechets # Filtrer les données sur les ID_RELEVES - df_top10 = pd.merge(df_top, filtered_df, on="ID_RELEVE", how="inner") + df_top_dechets = pd.merge( + df_top_dechets, filtered_df, on="ID_RELEVE", how="inner" + ) # Retrait des lignes avec 100% de volume catégorisé en AUTRE - df_top10 = df_top10[df_top10["Exclusions"] == "Inclus"] + df_top_dechets = df_top_dechets[df_top_dechets["Exclusions"] == "Inclus"] # Calcul du nombre total de déchets catégorisés sur le territoier - nb_total_dechets = df_top10[(df_top10["type_regroupement"] == "GROUPE")][ - "nb_dechet" - ].sum() + nb_total_dechets = df_top_dechets[ + (df_top_dechets["type_regroupement"] == "GROUPE") + ]["nb_dechet"].sum() - nb_collec_top = df_top10["ID_RELEVE"].nunique() + nb_collec_top = df_top_dechets["ID_RELEVE"].nunique() # Message d'avertissement nb de collectes en dessous de 5 if nb_collectes_int <= 5: @@ -829,16 +706,17 @@ def french_format(x: int) -> str: # Ligne 2 : graphique top déchets # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement - df_dechets_groupe = df_top10[df_top10["type_regroupement"].isin(["GROUPE"])] + df_top_dechets = df_top_dechets[ + df_top_dechets["type_regroupement"].isin(["GROUPE"]) + ] # Group by 'categorie', sum 'nb_dechet', et top 10 df_top10_dechets = ( - df_dechets_groupe.groupby("categorie") + df_top_dechets.groupby("categorie") .agg({"nb_dechet": "sum"}) .sort_values(by="nb_dechet", ascending=False) .head(10) ) - # recuperation de ces 10 dechets dans une liste pour filtration bubble map - noms_top10_dechets = df_top10_dechets.index.tolist() + # Preparation des datas pour l'onglet 3# ajout de la colonne materiau df_top10_dechets = df_top10_dechets.merge( df_dict_corr_dechet_materiau, on="categorie", how="left" @@ -907,18 +785,21 @@ def french_format(x: int) -> str: with st.container(border=True): st.write("**Lieux de ramassage des déchets dans le top 10**") + # Ajout de la selectbox selected_dechet = st.selectbox( - "Choisir un type de déchet :", noms_top10_dechets, index=0 + "Choisir un type de déchet :", + df_top10_dechets["categorie"].unique().tolist(), + index=0, ) # Filtration sur le dechet top 10 sélectionné - df_top_map = df_top[df_top["categorie"] == selected_dechet] + df_map_data = df_top_dechets[df_top_dechets["categorie"] == selected_dechet] - # Création du DataFrame de travail pour la carte - df_map_data = pd.merge( - df_top_map, df_top_data_releves, on="ID_RELEVE", how="inner" - ) + # # Création du DataFrame de travail pour la carte + # df_map_data = pd.merge( + # df_top_map, df_top_data_releves, on="ID_RELEVE", how="inner" + # ) # Création de la carte centrée autour d'une localisation # Initialisation du zoom sur la carte @@ -993,127 +874,178 @@ def french_format(x: int) -> str: # Préparation des données df_dechet_copy = df_nb_dechet.copy() - df_filtre_copy = df_other.copy() + filtered_df = df_other.copy() # Étape 1: Création des filtres with st.expander("Filtrer par année, type milieu ou type de lieu"): - # Filtre par année - selected_annee_onglet_3 = st.selectbox( - "Choisir une année:", - options=[valeur_par_defaut_annee] + annee_liste, - key="année_select", + # Define the initial options for the selectboxes + annee_options = [valeur_par_defaut_annee] + sorted( + df_other["ANNEE"].unique().tolist(), reverse=True ) - if selected_annee_onglet_3 != valeur_par_defaut_annee: - filtered_data_milieu = df_other[ - df_other["ANNEE"] == selected_annee_onglet_3 - ] - else: - filtered_data_milieu = df_other.copy() + milieu_options = [valeur_par_defaut_milieu] + lieu_options = [valeur_par_defaut_lieu] - ## Filtre par type de milieu - # Initialiser la liste des lieux - milieux_liste = [valeur_par_defaut_milieu] + sorted( - list(filtered_data_milieu["TYPE_MILIEU"].unique()) + # Year selection + annee = st.selectbox( + "Choisir une année :", + options=annee_options, + index=0, # Default to the first option (valeur_par_defaut_annee) + key="secteurs_annee", ) - selected_type_milieu_onglet_3 = st.selectbox( - "Choisir un type de milieu:", - options=milieux_liste, - key="type_milieu_select", - ) + # Apply year filter if selected + if annee != valeur_par_defaut_annee: + filtered_df = filtered_df[filtered_df["ANNEE"] == annee] - if selected_type_milieu_onglet_3 != valeur_par_defaut_milieu: - filtered_data_lieu = filtered_data_milieu[ - filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu_onglet_3 - ] - else: - filtered_data_lieu = filtered_data_milieu + # Update milieu options based on filtered data + milieu_options += sorted(filtered_df["TYPE_MILIEU"].unique().tolist()) - ## Filtre par lieu - # Initialiser la liste des lieux - lieux_liste = [valeur_par_defaut_lieu] + sorted( - list(filtered_data_lieu["TYPE_LIEU"].unique()) + # Milieu selection + milieu = st.selectbox( + "Choisir un type de milieu :", + options=milieu_options, + index=0, # Default to the first option (valeur_par_defaut_milieu) + key="secteurs_milieu", ) - selected_type_lieu_onglet_3 = st.selectbox( - "Choisir un type de lieu:", - options=lieux_liste, - key="type_lieu_select", + # Apply milieu filter if selected + if milieu != valeur_par_defaut_milieu: + filtered_df = filtered_df[filtered_df["TYPE_MILIEU"] == milieu] + + # Update lieu options based on filtered data + lieu_options += sorted(filtered_df["TYPE_LIEU"].unique().tolist()) + + # Lieu selection + lieu = st.selectbox( + "Choisir un type de lieu :", + options=lieu_options, + index=0, # Default to the first option (valeur_par_defaut_lieu) + key="secteurs_lieu", ) - if ( - selected_annee_onglet_3 == valeur_par_defaut_annee - and selected_type_milieu_onglet_3 == valeur_par_defaut_milieu - and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu - ): - df_filtered = df_other.copy() - elif ( - selected_type_milieu_onglet_3 == valeur_par_defaut_milieu - and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu - ): - df_filtered = df_other[df_other["ANNEE"] == selected_annee_onglet_3].copy() - elif ( - selected_annee_onglet_3 == valeur_par_defaut_annee - and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu - and selected_type_milieu_onglet_3 != valeur_par_defaut_milieu - ): - df_filtered = df_other[ - df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3 - ].copy() - elif ( - selected_annee_onglet_3 == valeur_par_defaut_annee - and selected_type_lieu_onglet_3 != valeur_par_defaut_lieu - and selected_type_milieu_onglet_3 == valeur_par_defaut_milieu - ): - df_filtered = df_other[ - df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3 - ].copy() - elif ( - selected_annee_onglet_3 == valeur_par_defaut_annee - and selected_type_lieu_onglet_3 != valeur_par_defaut_lieu - and selected_type_milieu_onglet_3 != valeur_par_defaut_milieu - ): - df_filtered = df_other[ - (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - ].copy() - elif ( - selected_annee_onglet_3 != valeur_par_defaut_annee - and selected_type_lieu_onglet_3 != valeur_par_defaut_lieu - and selected_type_milieu_onglet_3 == valeur_par_defaut_milieu - ): - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) - ].copy() - elif ( - selected_annee_onglet_3 != valeur_par_defaut_annee - and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu - and selected_type_milieu_onglet_3 != valeur_par_defaut_milieu - ): - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - ].copy() - - elif selected_type_lieu_onglet_3 == valeur_par_defaut_lieu: - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - ].copy() - else: - df_filtered = df_other[ - (df_other["ANNEE"] == selected_annee_onglet_3) - & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) - ].copy() + # Apply lieu filter if selected + if lieu != valeur_par_defaut_lieu: + filtered_df = filtered_df[filtered_df["TYPE_LIEU"] == lieu] + + # The filtered_df now contains the data based on the selected filters + + # # Filtre par année + # selected_annee_onglet_3 = st.selectbox( + # "Choisir une année:", + # options=[valeur_par_defaut_annee] + annee_liste, + # key="année_select", + # ) + # if selected_annee_onglet_3 != valeur_par_defaut_annee: + # filtered_data_milieu = df_other[ + # df_other["ANNEE"] == selected_annee_onglet_3 + # ] + # else: + # filtered_data_milieu = df_other.copy() + + # ## Filtre par type de milieu + # # Initialiser la liste des lieux + # milieux_liste = [valeur_par_defaut_milieu] + sorted( + # list(filtered_data_milieu["TYPE_MILIEU"].unique()) + # ) + + # selected_type_milieu_onglet_3 = st.selectbox( + # "Choisir un type de milieu:", + # options=milieux_liste, + # key="type_milieu_select", + # ) + + # if selected_type_milieu_onglet_3 != valeur_par_defaut_milieu: + # filtered_data_lieu = filtered_data_milieu[ + # filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu_onglet_3 + # ] + # else: + # filtered_data_lieu = filtered_data_milieu + + # ## Filtre par lieu + # # Initialiser la liste des lieux + # lieux_liste = [valeur_par_defaut_lieu] + sorted( + # list(filtered_data_lieu["TYPE_LIEU"].unique()) + # ) + + # selected_type_lieu_onglet_3 = st.selectbox( + # "Choisir un type de lieu:", + # options=lieux_liste, + # key="type_lieu_select", + # ) + + # if ( + # selected_annee_onglet_3 == valeur_par_defaut_annee + # and selected_type_milieu_onglet_3 == valeur_par_defaut_milieu + # and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu + # ): + # df_filtered = df_other.copy() + # elif ( + # selected_type_milieu_onglet_3 == valeur_par_defaut_milieu + # and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu + # ): + # df_filtered = df_other[df_other["ANNEE"] == selected_annee_onglet_3].copy() + # elif ( + # selected_annee_onglet_3 == valeur_par_defaut_annee + # and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu + # and selected_type_milieu_onglet_3 != valeur_par_defaut_milieu + # ): + # df_filtered = df_other[ + # df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3 + # ].copy() + # elif ( + # selected_annee_onglet_3 == valeur_par_defaut_annee + # and selected_type_lieu_onglet_3 != valeur_par_defaut_lieu + # and selected_type_milieu_onglet_3 == valeur_par_defaut_milieu + # ): + # df_filtered = df_other[ + # df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3 + # ].copy() + # elif ( + # selected_annee_onglet_3 == valeur_par_defaut_annee + # and selected_type_lieu_onglet_3 != valeur_par_defaut_lieu + # and selected_type_milieu_onglet_3 != valeur_par_defaut_milieu + # ): + # df_filtered = df_other[ + # (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) + # & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + # ].copy() + # elif ( + # selected_annee_onglet_3 != valeur_par_defaut_annee + # and selected_type_lieu_onglet_3 != valeur_par_defaut_lieu + # and selected_type_milieu_onglet_3 == valeur_par_defaut_milieu + # ): + # df_filtered = df_other[ + # (df_other["ANNEE"] == selected_annee_onglet_3) + # & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) + # ].copy() + # elif ( + # selected_annee_onglet_3 != valeur_par_defaut_annee + # and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu + # and selected_type_milieu_onglet_3 != valeur_par_defaut_milieu + # ): + # df_filtered = df_other[ + # (df_other["ANNEE"] == selected_annee_onglet_3) + # & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + # ].copy() + + # elif selected_type_lieu_onglet_3 == valeur_par_defaut_lieu: + # df_filtered = df_other[ + # (df_other["ANNEE"] == selected_annee_onglet_3) + # & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + # ].copy() + # else: + # df_filtered = df_other[ + # (df_other["ANNEE"] == selected_annee_onglet_3) + # & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) + # & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) + # ].copy() # # Filtration des données pour nb_dechets - df_init = pd.merge(df_dechet_copy, df_filtered, on="ID_RELEVE", how="inner") + df_init = pd.merge(df_dechet_copy, filtered_df, on="ID_RELEVE", how="inner") # Data pour le plot secteur : filtrer par type_regroup et niveau 4 From 858bb7a39f8ce1556136f5e9e614fd561898b75f Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Thu, 6 Jun 2024 17:45:29 +0200 Subject: [PATCH 134/147] =?UTF-8?q?[tg]=20Am=C3=A9liorations=20suite=20poi?= =?UTF-8?q?nt=20Merterre?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 606 +++++++++++++++++++---------------- 1 file changed, 327 insertions(+), 279 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 22c0a04..aac0b04 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -95,13 +95,16 @@ def french_format(x: int) -> str: y = x / 1e9 y = locale.format_string("%.2f", y, grouping=True) return f"{y} milliards" - if x >= 1e6: + elif x >= 1e6: y = x / 1e6 y = locale.format_string("%.2f", y, grouping=True) return f"{y} millions" - else: + elif x >= 10: y = locale.format_string("%d", x, grouping=True) return f"{y}" + else: + y = locale.format_string("%.2f", x, grouping=True) + return f"{y}" # 3 Onglets : Matériaux, Top déchets, Filières et marques tab1, tab2, tab3 = st.tabs( @@ -208,12 +211,8 @@ def french_format(x: int) -> str: } # Message d'avertissement en haut de page si nb de collectes < 5 - if nb_collectes_int <= 5: - st.warning( - "⚠️ Faible nombre de ramassages (" - + str(nb_collectes_int) - + ") dans la base de données." - ) + if nb_collectes_int < 5: + st.warning("⚠️ Moins de 5 ramassages dans la base de données") # Ligne 1 : 2 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2, l1_col3 = st.columns(3) @@ -233,7 +232,7 @@ def french_format(x: int) -> str: # 3ème métrique : nombre de relevés cell3 = l1_col3.container(border=True) - cell3.metric("Nombre de ramassages", french_format(nb_collectes_int)) + cell3.metric("Nombre de ramassages", nb_collectes_int) # Note méthodo pour expliquer les données retenues pour l'analyse with st.expander("Note sur les données utilisées dans cet onglet"): @@ -243,7 +242,7 @@ def french_format(x: int) -> str: de déchets indiqués car certaines organisations \ ne renseignent que le volume sans mention de poids \ (protocole de niveau 1) ou inversement. - - Les chiffres ci-dessous sont calculés sur **{french_format(nb_collectes_carac)}** ramassages \ + - Les chiffres ci-dessous sont calculés sur **{nb_collectes_carac}** ramassages \ ayant fait l’objet d’une estimation des volumes \ par matériau, soit un volume total de {french_format(volume_total_categorise_m3)} m³.\ Les relevés de niveau 0 et les relevés comptabilisant 100% de déchets 'AUTRES' ont été exclus. @@ -282,6 +281,8 @@ def french_format(x: int) -> str: textinfo="percent", texttemplate="%{percent:.0%}", textfont_size=14, + direction="clockwise", + rotation=-90, ) # Paramétrage de l'étiquette flottante @@ -313,17 +314,19 @@ def french_format(x: int) -> str: # Amélioration du graphique fig2.update_traces( - texttemplate="%{text:.2s}", + texttemplate="%{text:.2f}", textposition="inside", textfont_size=14, ) # Paramétrage de l'étiquette flottante - fig2.update_traces(hovertemplate="%{label}: %{value:.1f} m³") + fig2.update_traces( + hovertemplate="Matériau : %{label}
Volume : %{value:.2f} m³" + ) fig2.update_layout( autosize=True, - # uniformtext_minsize=10, + uniformtext_minsize=10, uniformtext_mode="hide", xaxis_tickangle=-45, showlegend=False, @@ -420,32 +423,39 @@ def french_format(x: int) -> str: with st.container(border=True): # Message d'avertissement si pas de données à afficher - if len(df_typemilieu) == 0: - st.warning("⚠️ Aucune donnée à afficher") + if len(df_typemilieu) != 0: - # Afficher le graphique - st.plotly_chart(fig3, use_container_width=True) + # Afficher le graphique + st.plotly_chart(fig3, use_container_width=True) - # Ne pas faire apparaître la catégorie "Multi-lieux" - lignes_multi = df_nb_par_milieu.loc[df_nb_par_milieu.index == "Multi-lieux"] - df_nb_par_milieu.drop(lignes_multi.index, axis=0, inplace=True) + # Ne pas faire apparaître la catégorie "Multi-lieux" + lignes_multi = df_nb_par_milieu.loc[ + df_nb_par_milieu.index == "Multi-lieux" + ] + df_nb_par_milieu.drop(lignes_multi.index, axis=0, inplace=True) + + # Renommage des colonnes pour l'affichage + df_nb_par_milieu.rename( + { + "TYPE_MILIEU": "Milieu", + "ID_RELEVE": "Ramassages", + }, + axis=1, + inplace=True, + ) - # Renommage des colonnes pour l'affichage - df_nb_par_milieu.rename( - { - "TYPE_MILIEU": "Milieu", - "ID_RELEVE": "Ramassages", - }, - axis=1, - inplace=True, - ) + # Convertir en int pour éviter les virgules à l'affichage + df_nb_par_milieu = df_nb_par_milieu.astype("int") - # Convertir en int pour éviter les virgules à l'affichage - df_nb_par_milieu = df_nb_par_milieu.astype("int") + # Affichage du tableau + st.write("**Nombre de ramassages par milieu**") + st.table(df_nb_par_milieu.T) + + else: + st.warning( + "⚠️ Aucune donnée à afficher par type de milieu (nombre de ramassages trop faible)" + ) - # Affichage du tableau - st.write("**Nombre de ramassages par milieu**") - st.table(df_nb_par_milieu.T) st.caption( f"Les ramassages catégorisés en 'Multi-lieux' " + f"ont été retirés de l'analyse. " @@ -509,10 +519,8 @@ def french_format(x: int) -> str: df_filtered = filtered_data.copy() # Message d'avertissement nb de collectes en dessous de 5 - if len(df_filtered) <= 5: - st.warning( - f"⚠️ Faible nombre de ramassages ({len(df_filtered)}) dans la base de données." - ) + if len(df_filtered) < 5: + st.warning("⚠️ Moins de 5 ramassages dans la base de données") # Ligne 5 : Metriques filtrés l5_col1, l5_col2, l5_col3 = st.columns(3) @@ -534,7 +542,7 @@ def french_format(x: int) -> str: ) nombre_collectes_filtered = len(df_filtered) - cell8.metric("Nombre de ramassages", french_format(nombre_collectes_filtered)) + cell8.metric("Nombre de ramassages", nombre_collectes_filtered) # Étape 3: Preparation dataframe pour graphe # Copie des données pour transfo @@ -594,9 +602,9 @@ def french_format(x: int) -> str: ) fig4.update_traces( textinfo="label+value+percent root", - texttemplate="%{label}
%{value:.0f} m³
%{percentRoot}", + texttemplate="%{label}
%{value:.2f} m³
%{percentRoot}", textfont_size=16, - hovertemplate="%{label} : %{value:.1f} m³ " + hovertemplate="%{label} : %{value:.2f} m³ " + "
%{percentRoot:.1%} du volume total", ) @@ -683,12 +691,8 @@ def french_format(x: int) -> str: nb_collec_top = df_top_dechets["ID_RELEVE"].nunique() # Message d'avertissement nb de collectes en dessous de 5 - if nb_collectes_int <= 5: - st.warning( - "⚠️ Faible nombre de ramassages (" - + str(nb_collectes_int) - + ") dans la base de données." - ) + if nb_collectes_int < 5: + st.warning("⚠️ Moins de 5 ramassages dans la base de données") # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page l1_col1, l1_col2 = st.columns(2) @@ -701,7 +705,7 @@ def french_format(x: int) -> str: # 3ème métrique : nombre de relevés cell2 = l1_col2.container(border=True) - cell2.metric("Nombre de ramassages", french_format(nb_collec_top)) + cell2.metric("Nombre de ramassages", nb_collec_top) # Ligne 2 : graphique top déchets @@ -825,15 +829,7 @@ def french_format(x: int) -> str: tiles="OpenStreetMap", ) - # Facteur de normalisation pour ajuster la taille des bulles - normalisation_facteur = 1000 - for index, row in df_map_data.iterrows(): - # Application de la normalisation - # radius = row["nb_dechet"] / normalisation_facteur - - # Application d'une limite minimale pour le rayon si nécessaire - # radius = max(radius, 5) # Calcul du rayon du marqueur en log base 2 pour réduire les écarts if row["nb_dechet"] > 1: @@ -863,6 +859,34 @@ def french_format(x: int) -> str: fill_color="#3186cc", ).add_to(map_data) + # Add a legend + legend_html = """ +
+
Légende
+
+ + + Quantité: 100 + + Quantité: 150 + +
+
+ """ + + map_data.get_root().html.add_child(folium.Element(legend_html)) + # Affichage de la carte Folium dans Streamlit st_folium = st.components.v1.html st_folium( @@ -1155,258 +1179,282 @@ def french_format(x: int) -> str: ### ANALYSE PAR SECTEUR st.write("**Analyse par secteur économique** (relevés de niveau 4 uniquement)") - # Message d'avertissement nb de collectes en dessous de 5 - if collectes_sect <= 5: - st.warning( - "⚠️ Faible nombre de ramassages (" - + str(collectes_sect) - + ") dans la base de données." + # Message d'avertissement si le nombre de collectes est en dessous de 5 + if 0 < collectes_sect < 5: + st.warning("⚠️ Moins de 5 ramassages dans la base de données") + + if len(secteur_df) != 0: + # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page + l1_col1, l1_col2, l1_col3 = st.columns(3) + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + # 1ère métrique : volume total de déchets collectés + cell1 = l1_col1.container(border=True) + + # Trick pour séparer les milliers + cell1.metric("Nombre de déchets comptés", french_format(nb_dechet_secteur)) + + # 2ème métrique : poids + cell2 = l1_col2.container(border=True) + cell2.metric( + "Nombre de secteurs concernés", + french_format(nb_secteurs), ) - # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page - l1_col1, l1_col2, l1_col3 = st.columns(3) - # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - # 1ère métrique : volume total de déchets collectés - cell1 = l1_col1.container(border=True) - - # Trick pour séparer les milliers - cell1.metric("Nombre de déchets comptés", french_format(nb_dechet_secteur)) - - # 2ème métrique : poids - cell2 = l1_col2.container(border=True) - cell2.metric( - "Nombre de secteurs concernés", - french_format(nb_secteurs), - ) - - # 3ème métrique : nombre de collectes - cell3 = l1_col3.container(border=True) - cell3.metric( - "Nombre de ramassages", - french_format(collectes_sect), - ) - - # Ligne 2 : 3 cellules avec les indicateurs clés en bas de page - colors_map_secteur = { - "AGRICULTURE": "#156644", - "ALIMENTATION": "#F7D156", - "AMEUBLEMENT, DÉCORATION ET ÉQUIPEMENT DE LA MAISON": "#F79D65", - "AQUACULTURE": "#0067C2", - "BÂTIMENT, TRAVAUX ET MATÉRIAUX DE CONSTRUCTION": "#FF9900", - "CHASSE ET ARMEMENT": "#23A76F", - "COSMÉTIQUES, HYGIÈNE ET SOINS PERSONNELS": "#BF726B", - "DÉTERGENTS ET PRODUITS D'ENTRETIENS": "#506266", - "EMBALLAGE INDUSTRIEL ET COLIS": "#754B30", - "GRAPHIQUE ET PAPETERIE ET FOURNITURES DE BUREAU": "#EFEFEF", - "INDÉTERMINÉ": "#967EA1", - "INFORMATIQUE ET HIGHTECH": "#E351F7", - "JOUETS ET LOISIR": "#A64D79", - "MATÉRIEL ÉLECTRIQUE ET ÉLECTROMÉNAGER": "#AE05C3", - "MÉTALLURGIE": "#EC4773", - "PÊCHE": "#003463", - "PETROCHIMIE": "#0D0D0D", - "PHARMACEUTIQUE/PARAMÉDICAL": "#61BF5E", - "PLASTURGIE": "#05A2AD", - "TABAC": "#E9003F", - "TEXTILE ET HABILLEMENT": "#FA9EE5", - "TRAITEMENT DES EAUX": "#4AA6F7", - "TRANSPORT / AUTOMOBILE": "#6C2775", - "VAISSELLE À USAGE UNIQUE": "#732D3A", - "AUTRES SECTEURS": "#D9C190", - } + # 3ème métrique : nombre de collectes + cell3 = l1_col3.container(border=True) + cell3.metric( + "Nombre de ramassages", + collectes_sect, + ) - fig_secteur = px.bar( - top_secteur_df.tail(10).sort_values( - by="Nombre de déchets", ascending=False - ), - x="Nombre de déchets", - y="Secteur", - color="Secteur", - title="Top 10 des secteurs économiques identifiés dans les déchets comptés", - hover_data=["Pourcentage"], - labels={ - "Nombre de déchets": "Nombre total de déchets (échelle logarithmique)", - }, - orientation="h", - color_discrete_map=colors_map_secteur, - text_auto=True, - ) - # add log scale to x axis - fig_secteur.update_layout(xaxis_type="log") - fig_secteur.update_traces( - texttemplate="%{value:,.0f}", - textposition="inside", - textfont_size=14, - ) - fig_secteur.update_layout( - height=700, - uniformtext_minsize=10, - uniformtext_mode="hide", - showlegend=False, - yaxis_title=None, - separators=", ", - ) - fig_secteur.update_yaxes( - tickfont=dict(size=14) - ) # Taille des étiquettes en ordonnée + # Ligne 2 : 3 cellules avec les indicateurs clés en bas de page + colors_map_secteur = { + "AGRICULTURE": "#156644", + "ALIMENTATION": "#F7D156", + "AMEUBLEMENT, DÉCORATION ET ÉQUIPEMENT DE LA MAISON": "#F79D65", + "AQUACULTURE": "#0067C2", + "BÂTIMENT, TRAVAUX ET MATÉRIAUX DE CONSTRUCTION": "#FF9900", + "CHASSE ET ARMEMENT": "#23A76F", + "COSMÉTIQUES, HYGIÈNE ET SOINS PERSONNELS": "#BF726B", + "DÉTERGENTS ET PRODUITS D'ENTRETIENS": "#506266", + "EMBALLAGE INDUSTRIEL ET COLIS": "#754B30", + "GRAPHIQUE ET PAPETERIE ET FOURNITURES DE BUREAU": "#EFEFEF", + "INDÉTERMINÉ": "#967EA1", + "INFORMATIQUE ET HIGHTECH": "#E351F7", + "JOUETS ET LOISIR": "#A64D79", + "MATÉRIEL ÉLECTRIQUE ET ÉLECTROMÉNAGER": "#AE05C3", + "MÉTALLURGIE": "#EC4773", + "PÊCHE": "#003463", + "PETROCHIMIE": "#0D0D0D", + "PHARMACEUTIQUE/PARAMÉDICAL": "#61BF5E", + "PLASTURGIE": "#05A2AD", + "TABAC": "#E9003F", + "TEXTILE ET HABILLEMENT": "#FA9EE5", + "TRAITEMENT DES EAUX": "#4AA6F7", + "TRANSPORT / AUTOMOBILE": "#6C2775", + "VAISSELLE À USAGE UNIQUE": "#732D3A", + "AUTRES SECTEURS": "#D9C190", + } + + fig_secteur = px.bar( + top_secteur_df.tail(10).sort_values( + by="Nombre de déchets", ascending=False + ), + x="Nombre de déchets", + y="Secteur", + color="Secteur", + title="Top 10 des secteurs économiques identifiés dans les déchets comptés", + hover_data=["Pourcentage"], + labels={ + "Nombre de déchets": "Nombre total de déchets (échelle logarithmique)", + }, + orientation="h", + color_discrete_map=colors_map_secteur, + text_auto=True, + ) + # add log scale to x axis + fig_secteur.update_layout(xaxis_type="log") + fig_secteur.update_traces( + texttemplate="%{value:,.0f}", + textposition="inside", + textfont_size=14, + ) + fig_secteur.update_layout( + height=700, + uniformtext_minsize=10, + uniformtext_mode="hide", + showlegend=False, + yaxis_title=None, + separators=", ", + ) + fig_secteur.update_yaxes( + tickfont=dict(size=14) + ) # Taille des étiquettes en ordonnée - # Paramétrage de l'infobulle - fig_secteur.update_traces( - hovertemplate="Secteur : %{y}
Quantité : %{x:,.0f} déchets
Proportion : %{customdata[0]:.0%}" - ) + # Paramétrage de l'infobulle + fig_secteur.update_traces( + hovertemplate="Secteur : %{y}
Quantité : %{x:,.0f} déchets
Proportion : %{customdata[0]:.0%}" + ) - with st.container(border=True): - st.plotly_chart(fig_secteur, use_container_width=True) - - # Message d'avertissement Nombre de dechets dont le secteur n'a pas été determine - if nb_vide_indetermine != 0 and nb_vide_indetermine != None: - st.caption( - "Note : cette analyse exclut " - + str(french_format(nb_vide_indetermine)) - + " déchets dont le secteur est 'Vide' ou 'Indeterminé'." - ) + with st.container(border=True): + st.plotly_chart(fig_secteur, use_container_width=True) + + # Message d'avertissement Nombre de dechets dont le secteur n'a pas été determine + if nb_vide_indetermine != 0 and nb_vide_indetermine != None: + st.caption( + "Note : cette analyse exclut " + + str(nb_vide_indetermine) + + " déchets dont le secteur est indeterminé (fragments ou secteur non identifié)" + ) + else: + st.warning( + "⚠️ Aucune donnée à afficher par secteur (nombre de ramassages trop faible)" + ) ### ANALYSE PAR FILIERE REP st.write( "**Analyse par filière de Responsabilité Élargie du Producteur** (relevés de niveau 4 uniquement)" ) + # Message d'avertissement si le nombre de collectes est en dessous de 5 + if 0 < collectes_rep < 5: + st.warning("⚠️ Moins de 5 ramassages dans la base de données") + + if len(rep_df) != 0: + l3_col1, l3_col2, l3_col3 = st.columns(3) + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + + # 1ère métrique : nombre de dechets catégorisés repartis par responsabilités + cell6 = l3_col1.container(border=True) + cell6.metric( + "Nombre de déchets comptés", + french_format(nb_dechet_rep), + ) - l3_col1, l3_col2, l3_col3 = st.columns(3) - # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - - # 1ère métrique : nombre de dechets catégorisés repartis par responsabilités - cell6 = l3_col1.container(border=True) - cell6.metric( - "Quantité de déchets catégorisés", - french_format(nb_dechet_rep), - ) + # 2ème métrique : nombre de responsabilités + cell7 = l3_col2.container(border=True) + cell7.metric( + "Nombre de filières REP identifiées", + french_format(nb_rep), + ) - # 2ème métrique : nombre de responsabilités - cell7 = l3_col2.container(border=True) - cell7.metric( - "Nombre de filières REP identifiées", - french_format(nb_rep), - ) + cell8 = l3_col3.container(border=True) # Nb de collectes + cell8.metric( + "Nombre de ramassages", + collectes_rep, + ) - cell8 = l3_col3.container(border=True) # Nb de collectes - cell8.metric( - "Nombre de ramassages", - french_format(collectes_rep), - ) + with st.expander("Qu'est-ce que la Responsabilité Élargie du Producteur ?"): + st.write( + "La Responsabilité Élargie du Producteur (REP) est une obligation qui impose aux entreprises de payer une contribution financière" + + " pour la prise en charge de la gestion des déchets issus des produits qu’ils mettent sur le marché selon le principe pollueur-payeur." + + " Pour ce faire, elles doivent contribuer financièrement à la collecte, du tri et au recyclage de ces produits, " + + "généralement à travers les éco-organismes privés, agréés par l’Etat, comme CITEO pour les emballages. " + + "L’État a depuis 1993 progressivement mis en place 25 filières REP, qui regroupent de grandes familles de produits " + + "(emballages ménagers, tabac, textile, ameublement, …)." + ) - with st.expander("Qu'est-ce que la Responsabilité Élargie du Producteur ?"): - st.write( - "La Responsabilité Élargie du Producteur (REP) est une obligation qui impose aux entreprises de payer une contribution financière" - + " pour la prise en charge de la gestion des déchets issus des produits qu’ils mettent sur le marché selon le principe pollueur-payeur." - + " Pour ce faire, elles doivent contribuer financièrement à la collecte, du tri et au recyclage de ces produits, " - + "généralement à travers les éco-organismes privés, agréés par l’Etat, comme CITEO pour les emballages. " - + "L’État a depuis 1993 progressivement mis en place 25 filières REP, qui regroupent de grandes familles de produits " - + "(emballages ménagers, tabac, textile, ameublement, …)." + # Treemap REP + figreptree = px.treemap( + top_rep_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), + path=["Responsabilité élargie producteur"], + values="Nombre de déchets", + title="Top 10 des filières REP relatives aux déchets les plus ramassés", + color="Responsabilité élargie producteur", + color_discrete_sequence=px.colors.qualitative.Set2, + ) + figreptree.update_layout( + margin=dict(t=50, l=25, r=25, b=25), + autosize=True, + height=500, + separators=", ", + ) + figreptree.update_traces( + textinfo="label+value+percent root", + texttemplate="%{label}
%{value:,.0f} déchets
%{percentRoot} du total", + textfont=dict(size=16), + hovertemplate="%{label}
" + + "Quantité de déchets : %{value:,.0f}
" + + "Part des déchets catégorisés : %{percentRoot:.1%}", ) - # Treemap REP - figreptree = px.treemap( - top_rep_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), - path=["Responsabilité élargie producteur"], - values="Nombre de déchets", - title="Top 10 des filières REP relatives aux déchets les plus ramassés", - color="Responsabilité élargie producteur", - color_discrete_sequence=px.colors.qualitative.Set2, - ) - figreptree.update_layout( - margin=dict(t=50, l=25, r=25, b=25), - autosize=True, - height=500, - separators=", ", - ) - figreptree.update_traces( - textinfo="label+value+percent root", - texttemplate="%{label}
%{value:,.0f} déchets
%{percentRoot} du total", - textfont=dict(size=16), - hovertemplate="%{label}
" - + "Quantité de déchets : %{value:,.0f}
" - + "Part des déchets catégorisés : %{percentRoot:.1%}", - ) - - with st.container(border=True): - st.plotly_chart(figreptree, use_container_width=True) - - # Message d'avertissement Nombre de déchets dont la REP n'a pas été determine - if nb_vide_indetermine_REP != 0 and nb_vide_indetermine_REP != None: - st.caption( - "Note : Cette analyse exclut " - + str(french_format(nb_vide_indetermine_REP)) - + " déchets dont la filière REP n'a pas pu être determinée." - ) + with st.container(border=True): + st.plotly_chart(figreptree, use_container_width=True) + + # Message d'avertissement Nombre de déchets dont la REP n'a pas été determine + if nb_vide_indetermine_REP != 0 and nb_vide_indetermine_REP != None: + st.caption( + "Note : Cette analyse exclut " + + str(french_format(nb_vide_indetermine_REP)) + + " déchets dont la filière REP n'a pas pu être determinée." + ) + else: + st.warning( + "⚠️ Aucune donnée à afficher par filière REP (nombre de ramassages trop faible)" + ) ### ANALYSES PAR MARQUE st.write("**Analyse par marque** (relevés de niveaux 2 à 4)") - l2_col1, l2_col2, l2_col3 = st.columns(3) - cell4 = l2_col1.container(border=True) + # Message d'avertissement si le nombre de collectes est en dessous de 5 + if 0 < collectes_marque < 5: + st.warning("⚠️ Moins de 5 ramassages dans la base de données") - # 1er métrique : nombre de dechets categorises par marques + if len(top_marque_df) != 0: - cell4.metric( - "Quantité de déchets catégorisés", - french_format(nb_dechet_marque), - ) + l2_col1, l2_col2, l2_col3 = st.columns(3) + cell4 = l2_col1.container(border=True) - # 2ème métrique : nombre de marques identifiées lors des collectes - cell5 = l2_col2.container(border=True) - cell5.metric( - "Nombre de marques concernées", - french_format(nb_marques), - ) + # 1er métrique : nombre de dechets categorises par marques - cell12 = l2_col3.container(border=True) # Nb de collectes - cell12.metric( - "Nombre de ramassages", - french_format(collectes_marque), - ) + cell4.metric( + "Nombre de déchets comptés", + french_format(nb_dechet_marque), + ) - # Configuration du graphique à barres - fig_marque = px.bar( - top_marque_df.tail(10).sort_values(by="Nombre de déchets", ascending=True), - x="Nombre de déchets", - y="Marque", - title="Top 10 des marques identifiées dans les déchets comptés", - color_discrete_sequence=["#1951A0"], - orientation="h", - text_auto=True, - ) + # 2ème métrique : nombre de marques identifiées lors des collectes + cell5 = l2_col2.container(border=True) + cell5.metric( + "Nombre de marques concernées", + french_format(nb_marques), + ) - # add log scale to x axis - fig_marque.update_layout( - # xaxis_type="log", # Pas besoin d'échelle log ici - height=700, - uniformtext_minsize=10, - uniformtext_mode="hide", - yaxis_title=None, - separators=", ", - ) - # Paramétrage de la taille de police et de l'infobulle - fig_marque.update_traces( - textfont_size=14, - texttemplate="%{value:,.0f}", - hovertemplate="Marque : %{y}
Quantité : %{x:,.0f} déchets", - ) - fig_marque.update_yaxes( - tickfont=dict(size=14) - ) # Taille des étiquettes en ordonnée + cell12 = l2_col3.container(border=True) # Nb de collectes + cell12.metric( + "Nombre de ramassages", + collectes_marque, + ) - with st.container(border=True): - st.plotly_chart(fig_marque, use_container_width=True) - - # Message d'avertissement pour les déchets non catégorisés - if nb_vide_indetermine_marque != None and nb_vide_indetermine_marque != 0: - st.caption( - "Note : cette analyse exclut " - + str(french_format(nb_vide_indetermine_marque)) - + " déchets dont la marque n'a pas pu être determinée." - ) + # Configuration du graphique à barres + fig_marque = px.bar( + top_marque_df.tail(10).sort_values( + by="Nombre de déchets", ascending=True + ), + x="Nombre de déchets", + y="Marque", + title="Top 10 des marques identifiées dans les déchets comptés", + color_discrete_sequence=["#1951A0"], + orientation="h", + text_auto=True, + ) + + # add log scale to x axis + fig_marque.update_layout( + # xaxis_type="log", # Pas besoin d'échelle log ici + height=700, + uniformtext_minsize=10, + uniformtext_mode="hide", + yaxis_title=None, + separators=", ", + ) + # Paramétrage de la taille de police et de l'infobulle + fig_marque.update_traces( + textfont_size=14, + texttemplate="%{value:,.0f}", + hovertemplate="Marque : %{y}
Quantité : %{x:,.0f} déchets", + ) + fig_marque.update_yaxes( + tickfont=dict(size=14) + ) # Taille des étiquettes en ordonnée + + with st.container(border=True): + st.plotly_chart(fig_marque, use_container_width=True) + + # Message d'avertissement pour les déchets non catégorisés + if ( + nb_vide_indetermine_marque != None + and nb_vide_indetermine_marque != 0 + ): + st.caption( + "Note : cette analyse exclut " + + str(french_format(nb_vide_indetermine_marque)) + + " déchets dont la marque n'a pas pu être determinée." + ) + else: + st.warning( + "⚠️ Aucune donnée à afficher par marque (nombre de ramassages trop faible)" + ) else: From 0c42b3b00f88e1609eb76f5dd9a7a1963294104a Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Tue, 18 Jun 2024 12:21:41 +0200 Subject: [PATCH 135/147] =?UTF-8?q?[tg]=20Ajout=20d=C3=A9tail=20donn=C3=A9?= =?UTF-8?q?es=20utilis=C3=A9es=20+=20rayon=20carto=20top=20d=C3=A9chets?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/pages/data.py | 92 +++++++++++++++++++++++------------- 1 file changed, 60 insertions(+), 32 deletions(-) diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index aac0b04..7d264a6 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -142,6 +142,9 @@ def french_format(x: int) -> str: # variables à décroiser de la base de données correspondant aux Volume global de chaque matériau cols_volume = [k for k in df_other.columns if "GLOBAL_VOLUME_" in k] + volume_total_categorise_avant_exclusions = ( + df_other[cols_volume].sum().sum() / 1000 + ) # Copie des données pour transfo df_volume = df_other.copy() @@ -153,8 +156,12 @@ def french_format(x: int) -> str: # Volume en litres dans la base, converti en m3 volume_total_m3 = df_volume["VOLUME_TOTAL"].sum() / 1000 poids_total = df_volume["POIDS_TOTAL"].sum() - volume_total_categorise_m3 = df_volume_cleaned[cols_volume].sum().sum() / 1000 - pct_volume_categorise = volume_total_categorise_m3 / volume_total_m3 + volume_total_categorise_apres_exclusions_m3 = ( + df_volume_cleaned[cols_volume].sum().sum() / 1000 + ) + pct_volume_categorise = ( + volume_total_categorise_apres_exclusions_m3 / volume_total_m3 + ) # Nb total de collecte incluant les 100% autres et les relevés de niveau 0 nb_collectes_int = df_volume["ID_RELEVE"].nunique() # Nb de collectes excluant les 100% autres et les relevés de niveau 0 @@ -235,6 +242,17 @@ def french_format(x: int) -> str: cell3.metric("Nombre de ramassages", nb_collectes_int) # Note méthodo pour expliquer les données retenues pour l'analyse + # Périmètre des données + volume_total_avant_exclusions_m3 = df_other["VOLUME_TOTAL"].sum() / 1000 + volume_exclu = ( + volume_total_categorise_avant_exclusions + - volume_total_categorise_apres_exclusions_m3 + ) + volume_global_percentage_nul = ( + df_other[df_other["GLOBAL_POURCENTAGE_TOTAL"] == 0]["VOLUME_TOTAL"].sum() + / 1000 + ) + with st.expander("Note sur les données utilisées dans cet onglet"): st.markdown( f""" @@ -242,10 +260,14 @@ def french_format(x: int) -> str: de déchets indiqués car certaines organisations \ ne renseignent que le volume sans mention de poids \ (protocole de niveau 1) ou inversement. - - Les chiffres ci-dessous sont calculés sur **{nb_collectes_carac}** ramassages \ - ayant fait l’objet d’une estimation des volumes \ - par matériau, soit un volume total de {french_format(volume_total_categorise_m3)} m³.\ - Les relevés de niveau 0 et les relevés comptabilisant 100% de déchets 'AUTRES' ont été exclus. + - Certaines collectes n'ont pas fait l'objet d'un comptage par matériau. Par conséquent, le volume utilisé pour l'analyse est de **{french_format(volume_total_categorise_apres_exclusions_m3)} m³**, calculé sur **{nb_collectes_carac} ramassages** \ + + - Détails : + - Volume total enregistré dans la base de données : {french_format(volume_total_avant_exclusions_m3)} m³ + - Volume correspondant aux collectes qui n'ont pas fait l'objet d'un comptage par matériau : {french_format(volume_global_percentage_nul)} m³ + - Relevés de niveau 0 et relevés comptabilisant 100% de déchets 'AUTRES', exclus de l'analyse : {french_format(volume_exclu)} m³ + - Volume final utilisé pour l'analyse par matériaux : {french_format(volume_total_categorise_apres_exclusions_m3)} m³ + """ ) # Afficher le nombre de relevés inclus ou exclus @@ -255,7 +277,6 @@ def french_format(x: int) -> str: .sort_values(ascending=False) ) df_note_methodo.rename("Nombre de relevés", inplace=True) - st.dataframe(df_note_methodo) # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux @@ -344,7 +365,16 @@ def french_format(x: int) -> str: ### GRAPHIQUE PAR MILIEU DE COLLECTE - # Calcul du nombre de collectes par milieu + # Calcul du dataframe groupé par milieu et matériau pour le graphique + df_typemilieu = df_volume_cleaned.groupby( + ["TYPE_MILIEU", "Matériau"], as_index=False + ).agg({"Volume_m3": "sum", "ID_RELEVE": "count"}) + + df_typemilieu = df_typemilieu.sort_values( + ["TYPE_MILIEU", "Volume_m3"], ascending=True + ) + + # Calcul du nombre de collectes par milieu pour exclure les milieux à moins de 3 collectes df_nb_par_milieu = ( df_other.groupby("TYPE_MILIEU", as_index=True) .agg( @@ -354,22 +384,12 @@ def french_format(x: int) -> str: ) .sort_values("TYPE_MILIEU", ascending=True) ) + # Exclure les milieux avec moins de 3 collectes milieux_a_exclure = df_nb_par_milieu[ df_nb_par_milieu["ID_RELEVE"] <= 3 ].index.to_list() df_nb_par_milieu = df_nb_par_milieu.drop(milieux_a_exclure, axis=0) - - # Calcul du dataframe groupé par milieu et matériau pour le graphique - df_typemilieu = df_volume_cleaned.groupby( - ["TYPE_MILIEU", "Matériau"], as_index=False - ).agg({"Volume_m3": "sum", "ID_RELEVE": "count"}) - - df_typemilieu = df_typemilieu.sort_values( - ["TYPE_MILIEU", "Volume_m3"], ascending=True - ) - - # Retirer milieux avec moins de 3 collectes df_typemilieu = df_typemilieu[ ~df_typemilieu["TYPE_MILIEU"].isin(milieux_a_exclure) ] @@ -378,6 +398,14 @@ def french_format(x: int) -> str: lignes_multi = df_typemilieu.loc[df_typemilieu["TYPE_MILIEU"] == "Multi-lieux"] df_typemilieu.drop(lignes_multi.index, axis=0, inplace=True) + # Fusionner les dataframe pour obtenir le nb de collectes et le volume par milieu + df_nb_par_milieu = pd.merge( + df_nb_par_milieu, + df_typemilieu.groupby("TYPE_MILIEU")["Volume_m3"].sum(), + on="TYPE_MILIEU", + how="left", + ) + # Graphique à barre empilées du pourcentage de volume collecté par an et type de matériau fig3 = px.histogram( df_typemilieu, @@ -450,18 +478,17 @@ def french_format(x: int) -> str: # Affichage du tableau st.write("**Nombre de ramassages par milieu**") st.table(df_nb_par_milieu.T) + st.caption( + f"Les ramassages catégorisés en 'Multi-lieux' " + + f"ont été retirés de l'analyse. " + + f"Les milieux représentant moins de 3 ramassages ne sont pas affichés." + ) else: st.warning( "⚠️ Aucune donnée à afficher par type de milieu (nombre de ramassages trop faible)" ) - st.caption( - f"Les ramassages catégorisés en 'Multi-lieux' " - + f"ont été retirés de l'analyse. " - + f"Les milieux représentant moins de 3 ramassages ne sont pas affichés." - ) - # Ligne 3 : Graphe par milieu , lieu et année st.write("**Détail par année, type de milieu ou de lieu**") @@ -831,9 +858,10 @@ def french_format(x: int) -> str: for index, row in df_map_data.iterrows(): - # Calcul du rayon du marqueur en log base 2 pour réduire les écarts + # Calcul du rayon du marqueur en log base 2 pour réduire les écarts if row["nb_dechet"] > 1: - radius = math.log2(row["nb_dechet"]) + radius = math.log2(row["nb_dechet"] / 10) * 2 + else: radius = 0.001 @@ -847,11 +875,11 @@ def french_format(x: int) -> str: radius=radius, # Utilisation du rayon ajusté popup=folium.Popup( html=f""" - Commune : {row['LIEU_VILLE']}
- Zone : {row['NOM_ZONE']}
- Quantité : {formatted_nb_dechet} {selected_dechet}
- Date : {row['DATE']} - """, + Quantité : {formatted_nb_dechet}
+ Date : {row['DATE']}
+ Commune : {row['LIEU_VILLE']}
+ Zone : {row['NOM_ZONE']}
+ """, max_width=150, ), color="#3186cc", From 9ddfbedb5ad75490e16a12aa9cf4e0b651943ff2 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Wed, 19 Jun 2024 12:26:42 +0200 Subject: [PATCH 136/147] [tg] remove line from poetry.lock to solve precommit fail --- .gitignore | 2 +- poetry.lock | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index a21d144..ae45a6f 100644 --- a/.gitignore +++ b/.gitignore @@ -102,7 +102,7 @@ ipython_config.py # This is especially recommended for binary packages to ensure reproducibility, and is more # commonly ignored for libraries. # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock +poetry.lock # pdm # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. diff --git a/poetry.lock b/poetry.lock index 254c136..e79000d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -576,8 +576,6 @@ files = [ {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, ] - -[package.dependencies] smmap = ">=3.0.1,<6" [[package]] From ba83666a7aafe4409bcd77771a394ea28ff0e422 Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Thu, 20 Jun 2024 17:31:44 +0200 Subject: [PATCH 137/147] [tg] update poetry.lock to solve precommit conflicts --- dashboards/app/requirements.txt | 1 - poetry.lock | 516 +++++++++----------------------- pyproject.toml | 4 +- 3 files changed, 147 insertions(+), 374 deletions(-) diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index 54ac0f9..b1aa314 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -7,7 +7,6 @@ streamlit==1.32.2 openpyxl==3.1.2 streamlit-folium==0.19.1 plotly==5.19.0 -streamlit-dynamic-filters==0.1.6 streamlit-authenticator==0.3.2 st-pages==0.4.5 babel==2.11.0 diff --git a/poetry.lock b/poetry.lock index e79000d..7d3ba3d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -44,26 +44,6 @@ tests = ["attrs[tests-no-zope]", "zope-interface"] tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] -[[package]] -name = "attrs" -version = "23.2.0" -description = "Classes Without Boilerplate" -category = "main" -optional = false -python-versions = ">=3.7" -files = [ - {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, - {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, -] - -[package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] -tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] - [[package]] name = "blinker" version = "1.7.0" @@ -79,7 +59,6 @@ files = [ name = "branca" version = "0.7.1" description = "Generate complex HTML+JS pages with Python" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -251,7 +230,6 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} name = "click-plugins" version = "1.1.1" description = "An extension module for click to enable registering CLI commands via setuptools entry-points." -category = "main" optional = false python-versions = "*" files = [ @@ -269,7 +247,6 @@ dev = ["coveralls", "pytest (>=3.6)", "pytest-cov", "wheel"] name = "cligj" version = "0.7.2" description = "Click params for commmand line interfaces to GeoJSON" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, <4" files = [ @@ -294,72 +271,6 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -[[package]] -name = "dash" -version = "2.16.1" -description = "A Python framework for building reactive web-apps. Developed by Plotly." -optional = false -python-versions = ">=3.8" -files = [ - {file = "dash-2.16.1-py3-none-any.whl", hash = "sha256:8a9d2a618e415113c0b2a4d25d5dc4df5cb921f733b33dde75559db2316b1df1"}, - {file = "dash-2.16.1.tar.gz", hash = "sha256:b2871d6b8d4c9dfd0a64f89f22d001c93292910b41d92d9ff2bb424a28283976"}, -] - -[package.dependencies] -dash-core-components = "2.0.0" -dash-html-components = "2.0.0" -dash-table = "5.0.0" -Flask = ">=1.0.4,<3.1" -importlib-metadata = "*" -nest-asyncio = "*" -plotly = ">=5.0.0" -requests = "*" -retrying = "*" -setuptools = "*" -typing-extensions = ">=4.1.1" -Werkzeug = "<3.1" - -[package.extras] -celery = ["celery[redis] (>=5.1.2)", "redis (>=3.5.3)"] -ci = ["black (==22.3.0)", "dash-dangerously-set-inner-html", "dash-flow-example (==0.0.5)", "flake8 (==7.0.0)", "flaky (==3.7.0)", "flask-talisman (==1.0.0)", "jupyterlab (<4.0.0)", "mimesis (<=11.1.0)", "mock (==4.0.3)", "numpy (<=1.26.3)", "openpyxl", "orjson (==3.9.12)", "pandas (>=1.4.0)", "pyarrow", "pylint (==3.0.3)", "pytest-mock", "pytest-rerunfailures", "pytest-sugar (==0.9.6)", "pyzmq (==25.1.2)", "xlrd (>=2.0.1)"] -compress = ["flask-compress"] -dev = ["PyYAML (>=5.4.1)", "coloredlogs (>=15.0.1)", "fire (>=0.4.0)"] -diskcache = ["diskcache (>=5.2.1)", "multiprocess (>=0.70.12)", "psutil (>=5.8.0)"] -testing = ["beautifulsoup4 (>=4.8.2)", "cryptography (<3.4)", "dash-testing-stub (>=0.0.2)", "lxml (>=4.6.2)", "multiprocess (>=0.70.12)", "percy (>=2.0.2)", "psutil (>=5.8.0)", "pytest (>=6.0.2)", "requests[security] (>=2.21.0)", "selenium (>=3.141.0,<=4.2.0)", "waitress (>=1.4.4)"] - -[[package]] -name = "dash-core-components" -version = "2.0.0" -description = "Core component suite for Dash" -optional = false -python-versions = "*" -files = [ - {file = "dash_core_components-2.0.0-py3-none-any.whl", hash = "sha256:52b8e8cce13b18d0802ee3acbc5e888cb1248a04968f962d63d070400af2e346"}, - {file = "dash_core_components-2.0.0.tar.gz", hash = "sha256:c6733874af975e552f95a1398a16c2ee7df14ce43fa60bb3718a3c6e0b63ffee"}, -] - -[[package]] -name = "dash-html-components" -version = "2.0.0" -description = "Vanilla HTML components for Dash" -optional = false -python-versions = "*" -files = [ - {file = "dash_html_components-2.0.0-py3-none-any.whl", hash = "sha256:b42cc903713c9706af03b3f2548bda4be7307a7cf89b7d6eae3da872717d1b63"}, - {file = "dash_html_components-2.0.0.tar.gz", hash = "sha256:8703a601080f02619a6390998e0b3da4a5daabe97a1fd7a9cebc09d015f26e50"}, -] - -[[package]] -name = "dash-table" -version = "5.0.0" -description = "Dash table" -optional = false -python-versions = "*" -files = [ - {file = "dash_table-5.0.0-py3-none-any.whl", hash = "sha256:19036fa352bb1c11baf38068ec62d172f0515f73ca3276c79dee49b95ddc16c9"}, - {file = "dash_table-5.0.0.tar.gz", hash = "sha256:18624d693d4c8ef2ddec99a6f167593437a7ea0bf153aa20f318c170c5bc7308"}, -] - [[package]] name = "distlib" version = "0.3.8" @@ -373,58 +284,58 @@ files = [ [[package]] name = "duckdb" -version = "0.10.1" +version = "0.10.0" description = "DuckDB in-process database" optional = false python-versions = ">=3.7.0" files = [ - {file = "duckdb-0.10.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0ac172788e3d8e410e009e3699016a4d7f17b4c7cde20f98856fca1fea79d247"}, - {file = "duckdb-0.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f754c20d3b963574da58b0d22029681b79c63f2e32060f10b687f41b7bba54d7"}, - {file = "duckdb-0.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c68b1ef88b8cce185381ec69f437d20059c30623375bab41ac07a1104acdb57"}, - {file = "duckdb-0.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f566f615278844ea240c9a3497c0ef201331628f78e0f9f4d64f72f82210e750"}, - {file = "duckdb-0.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67d2996c3372a0f7d8f41f1c49e00ecdb26f83cdd9132b76730224ad68b1f1e3"}, - {file = "duckdb-0.10.1-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5c3b3a18a58eebabb426beafc2f7da01d59805d660fc909e5e143b6db04d881a"}, - {file = "duckdb-0.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:343795d13ec3d8cd06c250225a05fd3c348c3ed49cccdde01addd46cb50f3559"}, - {file = "duckdb-0.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:33f99c2e9e4060464673912312b4ec91060d66638756592c9484c62824ff4e85"}, - {file = "duckdb-0.10.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fdbe4173729043b2fd949be83135b035820bb2faf64648500563b16f3f6f02ee"}, - {file = "duckdb-0.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f90738310a76bd1618acbc7345175582d36b6907cb0ed07841a3d800dea189d6"}, - {file = "duckdb-0.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d14d00560832592cbac2817847b649bd1d573f125d064518afb6eec5b02e15a"}, - {file = "duckdb-0.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11c0bf253c96079c6139e8a0880300d80f4dc9f21a8c5c239d2ebc060b227d46"}, - {file = "duckdb-0.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcc60833bb1a1fb2c33b052cf793fef48f681c565d982acff6ac7a86369794da"}, - {file = "duckdb-0.10.1-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:88cdc0c2501dd7a65b1df2a76d7624b93d9b6d27febd2ee80b7e5643a0b40bcb"}, - {file = "duckdb-0.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:698a8d1d48b150d344d8aa6dbc30a22ea30fb14ff2b15c90004fc9fcb0b3a3e9"}, - {file = "duckdb-0.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:b450aa2b3e0eb1fc0f7ad276bd1e4a5a03b1a4def6c45366af17557de2cafbdf"}, - {file = "duckdb-0.10.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:40dd55ea9c31abc69e5a8299f16c877e0b1950fd9a311c117efb4dd3c0dc8458"}, - {file = "duckdb-0.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7c1b3538bb9c2b49f48b26f092444525b22186efa4e77ba070603ed4a348a66"}, - {file = "duckdb-0.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bce024b69bae426b0739c470803f7b44261bdc0c0700ea7c41dff5f2d70ca4f3"}, - {file = "duckdb-0.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52af2a078340b2e1b57958477ebc1be07786d3ad5796777e87d4f453e0477b4c"}, - {file = "duckdb-0.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3c52b08c773e52484542300339ebf295e3c9b12d5d7d49b2567e252c16205a7"}, - {file = "duckdb-0.10.1-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:097aa9b6d5c9f5d3ed8c35b16020a67731d04befc35f6b89ccb5db9d5f1489c4"}, - {file = "duckdb-0.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b5a14a80ad09d65c270d16761b04ea6b074811cdfde6b5e4db1a8b0184125d1b"}, - {file = "duckdb-0.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fb98dbbdbf8048b07223dc6e7401333bb4e83681dde4cded2d239051ea102b5"}, - {file = "duckdb-0.10.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:28857b0d595c229827cc3631ae9b74ff52d11614435aa715e09d8629d2e1b609"}, - {file = "duckdb-0.10.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d85645136fc25026978b5db81869e8a120cfb60e1645a29a0f6dd155be9e59e"}, - {file = "duckdb-0.10.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e2e10582db74b99051e718279c1be204c98a63a5b6aa4e09226b7249e414146"}, - {file = "duckdb-0.10.1-cp37-cp37m-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d6a88358d86a8ce689fdd4136514aebedf958e910361156a0bb0e53dc3c55f7d"}, - {file = "duckdb-0.10.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b025afa30fcdcede094386e7c519e6964d26de5ad95f4e04a2a0a713676d4465"}, - {file = "duckdb-0.10.1-cp37-cp37m-win_amd64.whl", hash = "sha256:910be5005de7427c5231a7200027e0adb951e048c612b895340effcd3e660d5a"}, - {file = "duckdb-0.10.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:13d81752763f14203a53981f32bd09731900eb6fda4048fbc532eae5e7bf30e5"}, - {file = "duckdb-0.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:21858225b8a5c5dead128f62e4e88facdcbfdce098e18cbcd86a6cd8f48fb2b3"}, - {file = "duckdb-0.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8bf46d55685906729998eca70ee751934e0425d86863148e658277526c54282e"}, - {file = "duckdb-0.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f786b4402b9c31461ea0520d919e2166df4f9e6e21fd3c7bb0035fa985b5dfe"}, - {file = "duckdb-0.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32e52c6e939a4bada220803e6bde6fc0ce870da5662a33cabdd3be14824183a6"}, - {file = "duckdb-0.10.1-cp38-cp38-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5c563b565ea68cfebe9c4078646503b3d38930218f9c3c278277d58952873771"}, - {file = "duckdb-0.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:af8382280f24273a535e08b80e9383ad739c66e22855ce68716dfbaeaf8910b9"}, - {file = "duckdb-0.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:2e6e01e2499e07873b09316bf4d6808f712c57034fa24c255565c4f92386e8e3"}, - {file = "duckdb-0.10.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7791a0aa2cea972a612d31d4a289c81c5d00181328ed4f7642907f68f8b1fb9f"}, - {file = "duckdb-0.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1ace20383fb0ba06229e060a6bb0bcfd48a4582a02e43f05991720504508eb59"}, - {file = "duckdb-0.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5aad3e085c33253c689205b5ea3c5d9d54117c1249276c90d495cb85d9adce76"}, - {file = "duckdb-0.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa08173f68e678793dfe6aab6490ac753204ca7935beb8dbde778dbe593552d8"}, - {file = "duckdb-0.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:525efad4e6caff80d0f6a51d466470839146e3880da36d4544fee7ff842e7e20"}, - {file = "duckdb-0.10.1-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48d84577216010ee407913bad9dc47af4cbc65e479c91e130f7bd909a32caefe"}, - {file = "duckdb-0.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6e65f00294c3b8576ae651e91e732ea1cefc4aada89c307fb02f49231fd11e1f"}, - {file = "duckdb-0.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:30aa9dbbfc1f9607249fc148af9e6d6fd253fdc2f4c9924d4957d6a535558b4f"}, - {file = "duckdb-0.10.1.tar.gz", hash = "sha256:0d5b6daa9bb54a635e371798994caa08f26d2f145ebcbc989e16b0a0104e84fb"}, + {file = "duckdb-0.10.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bd0ffb3fddef0f72a150e4d76e10942a84a1a0447d10907df1621b90d6668060"}, + {file = "duckdb-0.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f3d709d5c7c1a12b5e10d0b05fa916c670cd2b50178e3696faa0cc16048a1745"}, + {file = "duckdb-0.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9114aa22ec5d591a20ce5184be90f49d8e5b5348ceaab21e102c54560d07a5f8"}, + {file = "duckdb-0.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77a37877efadf39caf7cadde0f430fedf762751b9c54750c821e2f1316705a21"}, + {file = "duckdb-0.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87cbc9e1d9c3fc9f14307bea757f99f15f46843c0ab13a6061354410824ed41f"}, + {file = "duckdb-0.10.0-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f0bfec79fed387201550517d325dff4fad2705020bc139d936cab08b9e845662"}, + {file = "duckdb-0.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c5622134d2d9796b15e09de810e450859d4beb46d9b861357ec9ae40a61b775c"}, + {file = "duckdb-0.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:089ee8e831ccaef1b73fc89c43b661567175eed0115454880bafed5e35cda702"}, + {file = "duckdb-0.10.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a05af63747f1d7021995f0811c333dee7316cec3b06c0d3e4741b9bdb678dd21"}, + {file = "duckdb-0.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:072d6eba5d8a59e0069a8b5b4252fed8a21f9fe3f85a9129d186a39b3d0aea03"}, + {file = "duckdb-0.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a77b85668f59b919042832e4659538337f1c7f197123076c5311f1c9cf077df7"}, + {file = "duckdb-0.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96a666f1d2da65d03199a977aec246920920a5ea1da76b70ae02bd4fb1ffc48c"}, + {file = "duckdb-0.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ec76a4262b783628d26612d184834852d9c92fb203e91af789100c17e3d7173"}, + {file = "duckdb-0.10.0-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:009dd9d2cdbd3b061a9efbdfc79f2d1a8377bcf49f1e5f430138621f8c083a6c"}, + {file = "duckdb-0.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:878f06766088090dad4a2e5ee0081555242b2e8dcb29415ecc97e388cf0cf8d8"}, + {file = "duckdb-0.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:713ff0a1fb63a6d60f454acf67f31656549fb5d63f21ac68314e4f522daa1a89"}, + {file = "duckdb-0.10.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9c0ee450dfedfb52dd4957244e31820feef17228da31af6d052979450a80fd19"}, + {file = "duckdb-0.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ff79b2ea9994398b545c0d10601cd73565fbd09f8951b3d8003c7c5c0cebc7cb"}, + {file = "duckdb-0.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6bdf1aa71b924ef651062e6b8ff9981ad85bec89598294af8a072062c5717340"}, + {file = "duckdb-0.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0265bbc8216be3ced7b377ba8847128a3fc0ef99798a3c4557c1b88e3a01c23"}, + {file = "duckdb-0.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d418a315a07707a693bd985274c0f8c4dd77015d9ef5d8d3da4cc1942fd82e0"}, + {file = "duckdb-0.10.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2828475a292e68c71855190b818aded6bce7328f79e38c04a0c75f8f1c0ceef0"}, + {file = "duckdb-0.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c3aaeaae2eba97035c65f31ffdb18202c951337bf2b3d53d77ce1da8ae2ecf51"}, + {file = "duckdb-0.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:c51790aaaea97d8e4a58a114c371ed8d2c4e1ca7cbf29e3bdab6d8ccfc5afc1e"}, + {file = "duckdb-0.10.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8af1ae7cc77a12206b6c47ade191882cc8f49f750bb3e72bb86ac1d4fa89926a"}, + {file = "duckdb-0.10.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa4f7e8e8dc0e376aeb280b83f2584d0e25ec38985c27d19f3107b2edc4f4a97"}, + {file = "duckdb-0.10.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28ae942a79fad913defa912b56483cd7827a4e7721f4ce4bc9025b746ecb3c89"}, + {file = "duckdb-0.10.0-cp37-cp37m-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:01b57802898091455ca2a32c1335aac1e398da77c99e8a96a1e5de09f6a0add9"}, + {file = "duckdb-0.10.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:52e1ad4a55fa153d320c367046b9500578192e01c6d04308ba8b540441736f2c"}, + {file = "duckdb-0.10.0-cp37-cp37m-win_amd64.whl", hash = "sha256:904c47d04095af745e989c853f0bfc0776913dfc40dfbd2da7afdbbb5f67fed0"}, + {file = "duckdb-0.10.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:184ae7ea5874f3b8fa51ab0f1519bdd088a0b78c32080ee272b1d137e2c8fd9c"}, + {file = "duckdb-0.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bd33982ecc9bac727a032d6cedced9f19033cbad56647147408891eb51a6cb37"}, + {file = "duckdb-0.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f59bf0949899105dd5f8864cb48139bfb78454a8c017b8258ba2b5e90acf7afc"}, + {file = "duckdb-0.10.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:395f3b18948001e35dceb48a4423d574e38656606d033eef375408b539e7b076"}, + {file = "duckdb-0.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b8eb2b803be7ee1df70435c33b03a4598cdaf676cd67ad782b288dcff65d781"}, + {file = "duckdb-0.10.0-cp38-cp38-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:31b2ddd331801064326c8e3587a4db8a31d02aef11332c168f45b3bd92effb41"}, + {file = "duckdb-0.10.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c8b89e76a041424b8c2026c5dc1f74b53fbbc6c6f650d563259885ab2e7d093d"}, + {file = "duckdb-0.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:79084a82f16c0a54f6bfb7ded5600400c2daa90eb0d83337d81a56924eaee5d4"}, + {file = "duckdb-0.10.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:79799b3a270dcd9070f677ba510f1e66b112df3068425691bac97c5e278929c7"}, + {file = "duckdb-0.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8fc394bfe3434920cdbcfbdd0ac3ba40902faa1dbda088db0ba44003a45318a"}, + {file = "duckdb-0.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c116605551b4abf5786243a59bcef02bd69cc51837d0c57cafaa68cdc428aa0c"}, + {file = "duckdb-0.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3191170c3b0a43b0c12644800326f5afdea00d5a4621d59dbbd0c1059139e140"}, + {file = "duckdb-0.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fee69a50eb93c72dc77e7ab1fabe0c38d21a52c5da44a86aa217081e38f9f1bd"}, + {file = "duckdb-0.10.0-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c5f449e87dacb16b0d145dbe65fa6fdb5a55b2b6911a46d74876e445dd395bac"}, + {file = "duckdb-0.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4487d0df221b17ea4177ad08131bc606b35f25cfadf890987833055b9d10cdf6"}, + {file = "duckdb-0.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:c099ae2ff8fe939fda62da81704f91e2f92ac45e48dc0e37c679c9d243d01e65"}, + {file = "duckdb-0.10.0.tar.gz", hash = "sha256:c02bcc128002aa79e3c9d89b9de25e062d1096a8793bc0d7932317b7977f6845"}, ] [[package]] @@ -461,7 +372,6 @@ typing = ["typing-extensions (>=4.8)"] name = "fiona" version = "1.9.6" description = "Fiona reads and writes spatial data files" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -505,33 +415,10 @@ calc = ["shapely"] s3 = ["boto3 (>=1.3.1)"] test = ["fiona[s3]", "pytest (>=7)", "pytest-cov", "pytz"] -[[package]] -name = "flask" -version = "3.0.2" -description = "A simple framework for building complex web applications." -optional = false -python-versions = ">=3.8" -files = [ - {file = "flask-3.0.2-py3-none-any.whl", hash = "sha256:3232e0e9c850d781933cf0207523d1ece087eb8d87b23777ae38456e2fbe7c6e"}, - {file = "flask-3.0.2.tar.gz", hash = "sha256:822c03f4b799204250a7ee84b1eddc40665395333973dfb9deebfe425fefcb7d"}, -] - -[package.dependencies] -blinker = ">=1.6.2" -click = ">=8.1.3" -itsdangerous = ">=2.1.2" -Jinja2 = ">=3.1.2" -Werkzeug = ">=3.0.0" - -[package.extras] -async = ["asgiref (>=3.2)"] -dotenv = ["python-dotenv"] - [[package]] name = "folium" version = "0.16.0" description = "Make beautiful maps with Leaflet.js & Python" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -553,7 +440,6 @@ testing = ["pytest"] name = "geopandas" version = "0.14.3" description = "Geographic pandas extensions" -category = "main" optional = false python-versions = ">=3.9" files = [ @@ -567,6 +453,8 @@ packaging = "*" pandas = ">=1.4.0" pyproj = ">=3.3.0" shapely = ">=1.8.0" + +[[package]] name = "gitdb" version = "4.0.11" description = "Git Object Database" @@ -576,6 +464,8 @@ files = [ {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, ] + +[package.dependencies] smmap = ">=3.0.1,<6" [[package]] @@ -621,25 +511,6 @@ files = [ {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, ] -[[package]] -name = "importlib-metadata" -version = "7.1.0" -description = "Read metadata from Python packages" -optional = false -python-versions = ">=3.8" -files = [ - {file = "importlib_metadata-7.1.0-py3-none-any.whl", hash = "sha256:30962b96c0c223483ed6cc7280e7f0199feb01a0e40cfae4d4450fc6fab1f570"}, - {file = "importlib_metadata-7.1.0.tar.gz", hash = "sha256:b78938b926ee8d5f020fc4772d487045805a55ddbad2ecf21c6d60938dc7fcd2"}, -] - -[package.dependencies] -zipp = ">=0.5" - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] - [[package]] name = "iniconfig" version = "2.0.0" @@ -651,17 +522,6 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] -[[package]] -name = "itsdangerous" -version = "2.1.2" -description = "Safely pass data to untrusted environments and back." -optional = false -python-versions = ">=3.7" -files = [ - {file = "itsdangerous-2.1.2-py3-none-any.whl", hash = "sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44"}, - {file = "itsdangerous-2.1.2.tar.gz", hash = "sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a"}, -] - [[package]] name = "jinja2" version = "3.1.3" @@ -818,17 +678,6 @@ files = [ {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, ] -[[package]] -name = "nest-asyncio" -version = "1.6.0" -description = "Patch asyncio to allow nested event loops" -optional = false -python-versions = ">=3.5" -files = [ - {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, - {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, -] - [[package]] name = "nodeenv" version = "1.8.0" @@ -901,76 +750,69 @@ files = [ [[package]] name = "pandas" -version = "2.2.1" +version = "2.0.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false -python-versions = ">=3.9" +python-versions = ">=3.8" files = [ - {file = "pandas-2.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8df8612be9cd1c7797c93e1c5df861b2ddda0b48b08f2c3eaa0702cf88fb5f88"}, - {file = "pandas-2.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0f573ab277252ed9aaf38240f3b54cfc90fff8e5cab70411ee1d03f5d51f3944"}, - {file = "pandas-2.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f02a3a6c83df4026e55b63c1f06476c9aa3ed6af3d89b4f04ea656ccdaaaa359"}, - {file = "pandas-2.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c38ce92cb22a4bea4e3929429aa1067a454dcc9c335799af93ba9be21b6beb51"}, - {file = "pandas-2.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c2ce852e1cf2509a69e98358e8458775f89599566ac3775e70419b98615f4b06"}, - {file = "pandas-2.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:53680dc9b2519cbf609c62db3ed7c0b499077c7fefda564e330286e619ff0dd9"}, - {file = "pandas-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:94e714a1cca63e4f5939cdce5f29ba8d415d85166be3441165edd427dc9f6bc0"}, - {file = "pandas-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f821213d48f4ab353d20ebc24e4faf94ba40d76680642fb7ce2ea31a3ad94f9b"}, - {file = "pandas-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c70e00c2d894cb230e5c15e4b1e1e6b2b478e09cf27cc593a11ef955b9ecc81a"}, - {file = "pandas-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e97fbb5387c69209f134893abc788a6486dbf2f9e511070ca05eed4b930b1b02"}, - {file = "pandas-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101d0eb9c5361aa0146f500773395a03839a5e6ecde4d4b6ced88b7e5a1a6403"}, - {file = "pandas-2.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7d2ed41c319c9fb4fd454fe25372028dfa417aacb9790f68171b2e3f06eae8cd"}, - {file = "pandas-2.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:af5d3c00557d657c8773ef9ee702c61dd13b9d7426794c9dfeb1dc4a0bf0ebc7"}, - {file = "pandas-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:06cf591dbaefb6da9de8472535b185cba556d0ce2e6ed28e21d919704fef1a9e"}, - {file = "pandas-2.2.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:88ecb5c01bb9ca927ebc4098136038519aa5d66b44671861ffab754cae75102c"}, - {file = "pandas-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:04f6ec3baec203c13e3f8b139fb0f9f86cd8c0b94603ae3ae8ce9a422e9f5bee"}, - {file = "pandas-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a935a90a76c44fe170d01e90a3594beef9e9a6220021acfb26053d01426f7dc2"}, - {file = "pandas-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c391f594aae2fd9f679d419e9a4d5ba4bce5bb13f6a989195656e7dc4b95c8f0"}, - {file = "pandas-2.2.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9d1265545f579edf3f8f0cb6f89f234f5e44ba725a34d86535b1a1d38decbccc"}, - {file = "pandas-2.2.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:11940e9e3056576ac3244baef2fedade891977bcc1cb7e5cc8f8cc7d603edc89"}, - {file = "pandas-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:4acf681325ee1c7f950d058b05a820441075b0dd9a2adf5c4835b9bc056bf4fb"}, - {file = "pandas-2.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9bd8a40f47080825af4317d0340c656744f2bfdb6819f818e6ba3cd24c0e1397"}, - {file = "pandas-2.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:df0c37ebd19e11d089ceba66eba59a168242fc6b7155cba4ffffa6eccdfb8f16"}, - {file = "pandas-2.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:739cc70eaf17d57608639e74d63387b0d8594ce02f69e7a0b046f117974b3019"}, - {file = "pandas-2.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9d3558d263073ed95e46f4650becff0c5e1ffe0fc3a015de3c79283dfbdb3df"}, - {file = "pandas-2.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4aa1d8707812a658debf03824016bf5ea0d516afdea29b7dc14cf687bc4d4ec6"}, - {file = "pandas-2.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:76f27a809cda87e07f192f001d11adc2b930e93a2b0c4a236fde5429527423be"}, - {file = "pandas-2.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:1ba21b1d5c0e43416218db63037dbe1a01fc101dc6e6024bcad08123e48004ab"}, - {file = "pandas-2.2.1.tar.gz", hash = "sha256:0ab90f87093c13f3e8fa45b48ba9f39181046e8f3317d3aadb2fffbb1b978572"}, + {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"}, + {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0c6f76a0f1ba361551f3e6dceaff06bde7514a374aa43e33b588ec10420183"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba619e410a21d8c387a1ea6e8a0e49bb42216474436245718d7f2e88a2f8d7c0"}, + {file = "pandas-2.0.3-cp310-cp310-win32.whl", hash = "sha256:3ef285093b4fe5058eefd756100a367f27029913760773c8bf1d2d8bebe5d210"}, + {file = "pandas-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:9ee1a69328d5c36c98d8e74db06f4ad518a1840e8ccb94a4ba86920986bb617e"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b084b91d8d66ab19f5bb3256cbd5ea661848338301940e17f4492b2ce0801fe8"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:37673e3bdf1551b95bf5d4ce372b37770f9529743d2498032439371fc7b7eb26"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9cb1e14fdb546396b7e1b923ffaeeac24e4cedd14266c3497216dd4448e4f2d"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9cd88488cceb7635aebb84809d087468eb33551097d600c6dad13602029c2df"}, + {file = "pandas-2.0.3-cp311-cp311-win32.whl", hash = "sha256:694888a81198786f0e164ee3a581df7d505024fbb1f15202fc7db88a71d84ebd"}, + {file = "pandas-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6a21ab5c89dcbd57f78d0ae16630b090eec626360085a4148693def5452d8a6b"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4da0d45e7f34c069fe4d522359df7d23badf83abc1d1cef398895822d11061"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:32fca2ee1b0d93dd71d979726b12b61faa06aeb93cf77468776287f41ff8fdc5"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258d3624b3ae734490e4d63c430256e716f488c4fcb7c8e9bde2d3aa46c29089"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eae3dc34fa1aa7772dd3fc60270d13ced7346fcbcfee017d3132ec625e23bb0"}, + {file = "pandas-2.0.3-cp38-cp38-win32.whl", hash = "sha256:f3421a7afb1a43f7e38e82e844e2bca9a6d793d66c1a7f9f0ff39a795bbc5e02"}, + {file = "pandas-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:69d7f3884c95da3a31ef82b7618af5710dba95bb885ffab339aad925c3e8ce78"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5247fb1ba347c1261cbbf0fcfba4a3121fbb4029d95d9ef4dc45406620b25c8b"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81af086f4543c9d8bb128328b5d32e9986e0c84d3ee673a2ac6fb57fd14f755e"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1994c789bf12a7c5098277fb43836ce090f1073858c10f9220998ac74f37c69b"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec591c48e29226bcbb316e0c1e9423622bc7a4eaf1ef7c3c9fa1a3981f89641"}, + {file = "pandas-2.0.3-cp39-cp39-win32.whl", hash = "sha256:04dbdbaf2e4d46ca8da896e1805bc04eb85caa9a82e259e8eed00254d5e0c682"}, + {file = "pandas-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:1168574b036cd8b93abc746171c9b4f1b83467438a5e45909fed645cf8692dbc"}, + {file = "pandas-2.0.3.tar.gz", hash = "sha256:c02f372a88e0d17f36d3093a644c73cfc1788e876a7c4bcb4020a77512e2043c"}, ] [package.dependencies] numpy = [ - {version = ">=1.22.4,<2", markers = "python_version < \"3.11\""}, - {version = ">=1.23.2,<2", markers = "python_version == \"3.11\""}, - {version = ">=1.26.0,<2", markers = "python_version >= \"3.12\""}, + {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" -tzdata = ">=2022.7" +tzdata = ">=2022.1" [package.extras] -all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] -aws = ["s3fs (>=2022.11.0)"] -clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] -compression = ["zstandard (>=0.19.0)"] -computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] -consortium-standard = ["dataframe-api-compat (>=0.1.7)"] -excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] -feather = ["pyarrow (>=10.0.1)"] -fss = ["fsspec (>=2022.11.0)"] -gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] -hdf5 = ["tables (>=3.8.0)"] -html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] -mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] -output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] -parquet = ["pyarrow (>=10.0.1)"] -performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] -plot = ["matplotlib (>=3.6.3)"] -postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] -pyarrow = ["pyarrow (>=10.0.1)"] -spss = ["pyreadstat (>=1.2.0)"] -sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] -test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] -xml = ["lxml (>=4.9.2)"] +all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"] +aws = ["s3fs (>=2021.08.0)"] +clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"] +compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"] +computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"] +feather = ["pyarrow (>=7.0.0)"] +fss = ["fsspec (>=2021.07.0)"] +gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"] +hdf5 = ["tables (>=3.6.1)"] +html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"] +mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"] +parquet = ["pyarrow (>=7.0.0)"] +performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"] +plot = ["matplotlib (>=3.6.1)"] +postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"] +spss = ["pyreadstat (>=1.1.2)"] +sql-other = ["SQLAlchemy (>=1.4.16)"] +test = ["hypothesis (>=6.34.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.6.3)"] [[package]] name = "patsy" @@ -1159,44 +1001,6 @@ pyyaml = ">=5.1" virtualenv = ">=20.10.0" [[package]] -name = "pyproj" -version = "3.6.1" -description = "Python interface to PROJ (cartographic projections and coordinate transformations library)" -category = "main" -optional = false -python-versions = ">=3.9" -files = [ - {file = "pyproj-3.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ab7aa4d9ff3c3acf60d4b285ccec134167a948df02347585fdd934ebad8811b4"}, - {file = "pyproj-3.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4bc0472302919e59114aa140fd7213c2370d848a7249d09704f10f5b062031fe"}, - {file = "pyproj-3.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5279586013b8d6582e22b6f9e30c49796966770389a9d5b85e25a4223286cd3f"}, - {file = "pyproj-3.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fafd1f3eb421694857f254a9bdbacd1eb22fc6c24ca74b136679f376f97d35"}, - {file = "pyproj-3.6.1-cp310-cp310-win32.whl", hash = "sha256:c41e80ddee130450dcb8829af7118f1ab69eaf8169c4bf0ee8d52b72f098dc2f"}, - {file = "pyproj-3.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:db3aedd458e7f7f21d8176f0a1d924f1ae06d725228302b872885a1c34f3119e"}, - {file = "pyproj-3.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ebfbdbd0936e178091309f6cd4fcb4decd9eab12aa513cdd9add89efa3ec2882"}, - {file = "pyproj-3.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:447db19c7efad70ff161e5e46a54ab9cc2399acebb656b6ccf63e4bc4a04b97a"}, - {file = "pyproj-3.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7e13c40183884ec7f94eb8e0f622f08f1d5716150b8d7a134de48c6110fee85"}, - {file = "pyproj-3.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65ad699e0c830e2b8565afe42bd58cc972b47d829b2e0e48ad9638386d994915"}, - {file = "pyproj-3.6.1-cp311-cp311-win32.whl", hash = "sha256:8b8acc31fb8702c54625f4d5a2a6543557bec3c28a0ef638778b7ab1d1772132"}, - {file = "pyproj-3.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:38a3361941eb72b82bd9a18f60c78b0df8408416f9340521df442cebfc4306e2"}, - {file = "pyproj-3.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1e9fbaf920f0f9b4ee62aab832be3ae3968f33f24e2e3f7fbb8c6728ef1d9746"}, - {file = "pyproj-3.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6d227a865356f225591b6732430b1d1781e946893789a609bb34f59d09b8b0f8"}, - {file = "pyproj-3.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83039e5ae04e5afc974f7d25ee0870a80a6bd6b7957c3aca5613ccbe0d3e72bf"}, - {file = "pyproj-3.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb059ba3bced6f6725961ba758649261d85ed6ce670d3e3b0a26e81cf1aa8d"}, - {file = "pyproj-3.6.1-cp312-cp312-win32.whl", hash = "sha256:2d6ff73cc6dbbce3766b6c0bce70ce070193105d8de17aa2470009463682a8eb"}, - {file = "pyproj-3.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:7a27151ddad8e1439ba70c9b4b2b617b290c39395fa9ddb7411ebb0eb86d6fb0"}, - {file = "pyproj-3.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4ba1f9b03d04d8cab24d6375609070580a26ce76eaed54631f03bab00a9c737b"}, - {file = "pyproj-3.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18faa54a3ca475bfe6255156f2f2874e9a1c8917b0004eee9f664b86ccc513d3"}, - {file = "pyproj-3.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd43bd9a9b9239805f406fd82ba6b106bf4838d9ef37c167d3ed70383943ade1"}, - {file = "pyproj-3.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50100b2726a3ca946906cbaa789dd0749f213abf0cbb877e6de72ca7aa50e1ae"}, - {file = "pyproj-3.6.1-cp39-cp39-win32.whl", hash = "sha256:9274880263256f6292ff644ca92c46d96aa7e57a75c6df3f11d636ce845a1877"}, - {file = "pyproj-3.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:36b64c2cb6ea1cc091f329c5bd34f9c01bb5da8c8e4492c709bda6a09f96808f"}, - {file = "pyproj-3.6.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd93c1a0c6c4aedc77c0fe275a9f2aba4d59b8acf88cebfc19fe3c430cfabf4f"}, - {file = "pyproj-3.6.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6420ea8e7d2a88cb148b124429fba8cd2e0fae700a2d96eab7083c0928a85110"}, - {file = "pyproj-3.6.1.tar.gz", hash = "sha256:44aa7c704c2b7d8fb3d483bbf75af6cb2350d30a63b144279a09b75fead501bf"}, -] - -[package.dependencies] -certifi = "*" name = "protobuf" version = "4.25.3" description = "" @@ -1298,6 +1102,45 @@ files = [ plugins = ["importlib-metadata"] windows-terminal = ["colorama (>=0.4.6)"] +[[package]] +name = "pyproj" +version = "3.6.1" +description = "Python interface to PROJ (cartographic projections and coordinate transformations library)" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pyproj-3.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ab7aa4d9ff3c3acf60d4b285ccec134167a948df02347585fdd934ebad8811b4"}, + {file = "pyproj-3.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4bc0472302919e59114aa140fd7213c2370d848a7249d09704f10f5b062031fe"}, + {file = "pyproj-3.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5279586013b8d6582e22b6f9e30c49796966770389a9d5b85e25a4223286cd3f"}, + {file = "pyproj-3.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fafd1f3eb421694857f254a9bdbacd1eb22fc6c24ca74b136679f376f97d35"}, + {file = "pyproj-3.6.1-cp310-cp310-win32.whl", hash = "sha256:c41e80ddee130450dcb8829af7118f1ab69eaf8169c4bf0ee8d52b72f098dc2f"}, + {file = "pyproj-3.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:db3aedd458e7f7f21d8176f0a1d924f1ae06d725228302b872885a1c34f3119e"}, + {file = "pyproj-3.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ebfbdbd0936e178091309f6cd4fcb4decd9eab12aa513cdd9add89efa3ec2882"}, + {file = "pyproj-3.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:447db19c7efad70ff161e5e46a54ab9cc2399acebb656b6ccf63e4bc4a04b97a"}, + {file = "pyproj-3.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7e13c40183884ec7f94eb8e0f622f08f1d5716150b8d7a134de48c6110fee85"}, + {file = "pyproj-3.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65ad699e0c830e2b8565afe42bd58cc972b47d829b2e0e48ad9638386d994915"}, + {file = "pyproj-3.6.1-cp311-cp311-win32.whl", hash = "sha256:8b8acc31fb8702c54625f4d5a2a6543557bec3c28a0ef638778b7ab1d1772132"}, + {file = "pyproj-3.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:38a3361941eb72b82bd9a18f60c78b0df8408416f9340521df442cebfc4306e2"}, + {file = "pyproj-3.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1e9fbaf920f0f9b4ee62aab832be3ae3968f33f24e2e3f7fbb8c6728ef1d9746"}, + {file = "pyproj-3.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6d227a865356f225591b6732430b1d1781e946893789a609bb34f59d09b8b0f8"}, + {file = "pyproj-3.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83039e5ae04e5afc974f7d25ee0870a80a6bd6b7957c3aca5613ccbe0d3e72bf"}, + {file = "pyproj-3.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb059ba3bced6f6725961ba758649261d85ed6ce670d3e3b0a26e81cf1aa8d"}, + {file = "pyproj-3.6.1-cp312-cp312-win32.whl", hash = "sha256:2d6ff73cc6dbbce3766b6c0bce70ce070193105d8de17aa2470009463682a8eb"}, + {file = "pyproj-3.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:7a27151ddad8e1439ba70c9b4b2b617b290c39395fa9ddb7411ebb0eb86d6fb0"}, + {file = "pyproj-3.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4ba1f9b03d04d8cab24d6375609070580a26ce76eaed54631f03bab00a9c737b"}, + {file = "pyproj-3.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18faa54a3ca475bfe6255156f2f2874e9a1c8917b0004eee9f664b86ccc513d3"}, + {file = "pyproj-3.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd43bd9a9b9239805f406fd82ba6b106bf4838d9ef37c167d3ed70383943ade1"}, + {file = "pyproj-3.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50100b2726a3ca946906cbaa789dd0749f213abf0cbb877e6de72ca7aa50e1ae"}, + {file = "pyproj-3.6.1-cp39-cp39-win32.whl", hash = "sha256:9274880263256f6292ff644ca92c46d96aa7e57a75c6df3f11d636ce845a1877"}, + {file = "pyproj-3.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:36b64c2cb6ea1cc091f329c5bd34f9c01bb5da8c8e4492c709bda6a09f96808f"}, + {file = "pyproj-3.6.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd93c1a0c6c4aedc77c0fe275a9f2aba4d59b8acf88cebfc19fe3c430cfabf4f"}, + {file = "pyproj-3.6.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6420ea8e7d2a88cb148b124429fba8cd2e0fae700a2d96eab7083c0928a85110"}, + {file = "pyproj-3.6.1.tar.gz", hash = "sha256:44aa7c704c2b7d8fb3d483bbf75af6cb2350d30a63b144279a09b75fead501bf"}, +] + +[package.dependencies] +certifi = "*" + [[package]] name = "pyproject-api" version = "1.6.1" @@ -1460,20 +1303,6 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] -[[package]] -name = "retrying" -version = "1.3.4" -description = "Retrying" -optional = false -python-versions = "*" -files = [ - {file = "retrying-1.3.4-py3-none-any.whl", hash = "sha256:8cc4d43cb8e1125e0ff3344e9de678fefd85db3b750b81b2240dc0183af37b35"}, - {file = "retrying-1.3.4.tar.gz", hash = "sha256:345da8c5765bd982b1d1915deb9102fd3d1f7ad16bd84a9700b85f64d24e8f3e"}, -] - -[package.dependencies] -six = ">=1.7.0" - [[package]] name = "rich" version = "13.7.1" @@ -1662,7 +1491,6 @@ testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jar name = "shapely" version = "2.0.3" description = "Manipulation and analysis of geometric objects" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1713,7 +1541,7 @@ files = [ numpy = ">=1.14,<2" [package.extras] -docs = ["matplotlib", "numpydoc (>=1.1.0,<1.2.0)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] +docs = ["matplotlib", "numpydoc (==1.1.*)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] test = ["pytest", "pytest-cov"] [[package]] @@ -1778,8 +1606,8 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.22.3,<2", markers = "python_version == \"3.10\" and platform_system == \"Windows\" and platform_python_implementation != \"PyPy\""}, {version = ">=1.18,<2", markers = "python_version != \"3.10\" or platform_system != \"Windows\" or platform_python_implementation == \"PyPy\""}, + {version = ">=1.22.3,<2", markers = "python_version == \"3.10\" and platform_system == \"Windows\" and platform_python_implementation != \"PyPy\""}, ] packaging = ">=21.3" pandas = ">=1.0,<2.1.0 || >2.1.0" @@ -1826,20 +1654,6 @@ watchdog = {version = ">=2.1.5", markers = "platform_system != \"Darwin\""} [package.extras] snowflake = ["snowflake-connector-python (>=2.8.0)", "snowflake-snowpark-python (>=0.9.0)"] -[[package]] -name = "streamlit-dynamic-filters" -version = "0.1.6" -description = "Dynamic multiselect filters for Streamlit" -optional = false -python-versions = "*" -files = [ - {file = "streamlit_dynamic_filters-0.1.6-py3-none-any.whl", hash = "sha256:882f213dd3b846704a894c8e31271f0401775334f979a9e4e492a85035179d56"}, - {file = "streamlit_dynamic_filters-0.1.6.tar.gz", hash = "sha256:3d4f53007bf281c846477a2d9f202e61bb97c19c5c43d3dadab75019133c28f2"}, -] - -[package.dependencies] -streamlit = "*" - [[package]] name = "tenacity" version = "8.2.3" @@ -2034,58 +1848,18 @@ files = [ [package.extras] watchmedo = ["PyYAML (>=3.10)"] -[[package]] -name = "werkzeug" -version = "3.0.1" -description = "The comprehensive WSGI web application library." -optional = false -python-versions = ">=3.8" -files = [ - {file = "werkzeug-3.0.1-py3-none-any.whl", hash = "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10"}, - {file = "werkzeug-3.0.1.tar.gz", hash = "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc"}, -] - -[package.dependencies] -MarkupSafe = ">=2.1.1" - -[package.extras] -watchdog = ["watchdog (>=2.3)"] - [[package]] name = "xyzservices" version = "2024.4.0" description = "Source of XYZ tiles providers" -category = "main" optional = false python-versions = ">=3.8" files = [ {file = "xyzservices-2024.4.0-py3-none-any.whl", hash = "sha256:b83e48c5b776c9969fffcfff57b03d02b1b1cd6607a9d9c4e7f568b01ef47f4c"}, {file = "xyzservices-2024.4.0.tar.gz", hash = "sha256:6a04f11487a6fb77d92a98984cd107fbd9157fd5e65f929add9c3d6e604ee88c"}, -version = "2023.10.1" -description = "Source of XYZ tiles providers" -optional = false -python-versions = ">=3.8" -files = [ - {file = "xyzservices-2023.10.1-py3-none-any.whl", hash = "sha256:6a4c38d3a9f89d3e77153eff9414b36a8ee0850c9e8b85796fd1b2a85b8dfd68"}, - {file = "xyzservices-2023.10.1.tar.gz", hash = "sha256:091229269043bc8258042edbedad4fcb44684b0473ede027b5672ad40dc9fa02"}, -] - -[[package]] -name = "zipp" -version = "3.18.1" -description = "Backport of pathlib-compatible object wrapper for zip files" -optional = false -python-versions = ">=3.8" -files = [ - {file = "zipp-3.18.1-py3-none-any.whl", hash = "sha256:206f5a15f2af3dbaee80769fb7dc6f249695e940acca08dfb2a4769fe61e538b"}, - {file = "zipp-3.18.1.tar.gz", hash = "sha256:2884ed22e7d8961de1c9a05142eb69a247f120291bc0206a00a7642f09b5b715"}, ] -[package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] - [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "d12b7177519b2078c58a3688cb984d42a5e65a8d6ff60a9ad83343b43641a566" \ No newline at end of file +content-hash = "658c67e7dc4fb5c0f32fee6963b86666d2602730dfd998cb3aa7cd85de85dd44" diff --git a/pyproject.toml b/pyproject.toml index dfbe1dd..fa0318f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,8 +16,8 @@ python = "^3.10" # pandas = "^1.1.1" # jupyter = "^1.0.0" # ipykernel = "^5.3.4" -pandas = "^2.2.1" -duckdb = "^0.10.1" +pandas = "2.0.3" +duckdb = "0.10.0" geopandas = "^0.14.3" folium = "^0.16.0" streamlit = "^1.32.2" From 8cc68806d05ead88b01b2dd14487eb1ba529c61d Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Thu, 20 Jun 2024 17:40:01 +0200 Subject: [PATCH 138/147] [tg] black reformat to hotspots.py --- dashboards/app/pages/hotspots.py | 100 +++++++++++++++++++------------ 1 file changed, 61 insertions(+), 39 deletions(-) diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index 6c4ae3f..95eac35 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -432,25 +432,30 @@ def calculate_and_display_metrics(data, indicator_col1, indicator_col2, indicato def couleur_milieu(type): return couleur.get(type, "white") # Returns 'white' if the type is not found + def update_lieu_options(selected_milieu): if selected_milieu and selected_milieu != "Sélectionnez un milieu...": - filtered_data = data_zds[data_zds['TYPE_MILIEU'] == selected_milieu] - return ["Sélectionnez un lieu..."] + list(filtered_data['TYPE_LIEU2'].dropna().unique()) + filtered_data = data_zds[data_zds["TYPE_MILIEU"] == selected_milieu] + return ["Sélectionnez un lieu..."] + list( + filtered_data["TYPE_LIEU2"].dropna().unique() + ) return ["Sélectionnez un lieu..."] + @st.cache_data def process_data(data_zds): # Filtering data to ensure surface area is not zero - data_zds = data_zds[data_zds['SURFACE'] > 0] + data_zds = data_zds[data_zds["SURFACE"] > 0] # Calculating density and filtering out anomalous values - data_zds['DENSITE'] = data_zds['VOLUME_TOTAL'] / data_zds['SURFACE'] - data_zds = data_zds[data_zds['DENSITE'] < 20] + data_zds["DENSITE"] = data_zds["VOLUME_TOTAL"] / data_zds["SURFACE"] + data_zds = data_zds[data_zds["DENSITE"] < 20] # Rounding values for better display - data_zds['DENSITE'] = data_zds['DENSITE'].round(4) - data_zds['SURFACE_ROND'] = data_zds['SURFACE'].round(2) + data_zds["DENSITE"] = data_zds["DENSITE"].round(4) + data_zds["SURFACE_ROND"] = data_zds["SURFACE"].round(2) return data_zds -#Zoom from admin level + +# Zoom from admin level if NIVEAU_ADMIN == "Commune": zoom_admin = 12 elif NIVEAU_ADMIN == "EPCI": @@ -469,14 +474,16 @@ def plot_density_map(data_zds: pd.DataFrame, filtered_data: pd.DataFrame) -> fol else: # Use processed data - processed_data = process_data(filtered_data if not filtered_data.empty else data_zds) + processed_data = process_data( + filtered_data if not filtered_data.empty else data_zds + ) m = folium.Map( location=[ - processed_data['LIEU_COORD_GPS_Y'].mean(), - processed_data['LIEU_COORD_GPS_X'].mean() + processed_data["LIEU_COORD_GPS_Y"].mean(), + processed_data["LIEU_COORD_GPS_X"].mean(), ], - zoom_start=zoom_admin + zoom_start=zoom_admin, ) # Loop over each row in the DataFrame to place markers @@ -491,26 +498,25 @@ def plot_density_map(data_zds: pd.DataFrame, filtered_data: pd.DataFrame) -> fol """ lgd_txt = '{txt}' - color = couleur_milieu(row['TYPE_MILIEU']) + color = couleur_milieu(row["TYPE_MILIEU"]) folium.CircleMarker( - fg = folium.FeatureGroup(name= lgd_txt.format( txt= ['TYPE_MILIEU'], col= color)), - location=[row['LIEU_COORD_GPS_Y'], row['LIEU_COORD_GPS_X']], - radius=np.log(row['DENSITE'] + 1)*15, + fg=folium.FeatureGroup( + name=lgd_txt.format(txt=["TYPE_MILIEU"], col=color) + ), + location=[row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]], + radius=np.log(row["DENSITE"] + 1) * 15, popup=folium.Popup(popup_html, max_width=300), color=color, fill=True, - ).add_to(m) folium_static(m) return m + # Function for 'milieu' density table -def density_table_milieu( - data_zds: pd.DataFrame, - filtered_data: pd.DataFrame -): +def density_table_milieu(data_zds: pd.DataFrame, filtered_data: pd.DataFrame): if data_zds.empty: st.write("Aucune donnée disponible pour la région sélectionnée.") @@ -554,10 +560,7 @@ def density_table_milieu( ) -def density_table_lieu( - data_zds: pd.DataFrame, - filtered_data: pd.DataFrame -): +def density_table_lieu(data_zds: pd.DataFrame, filtered_data: pd.DataFrame): if data_zds.empty: st.write("Aucune donnée disponible pour la région sélectionnée.") @@ -766,55 +769,74 @@ def create_contributors_table(data_zds: pd.DataFrame, multi_filter_dict: dict) - # Add a default "Select a milieu..." option selected_milieu = st.selectbox( "Sélectionnez un milieu:", - ["Sélectionnez un milieu..."] + list(pd.unique(data_zds_correct['TYPE_MILIEU'])) + ["Sélectionnez un milieu..."] + + list(pd.unique(data_zds_correct["TYPE_MILIEU"])), ) with right_column: # Update lieu options based on selected milieu lieu_options = update_lieu_options(selected_milieu) selected_lieu = st.selectbox("Sélectionnez un lieu:", lieu_options) - # Place the map centrally by using a wider column for the map and narrower ones on the sides col1, map_col, col3 = st.columns([4, 10, 1]) # Adjust column ratios as needed with map_col: st.markdown("### Carte des Densités") - if selected_milieu != "Sélectionnez un milieu..." and selected_lieu != "Sélectionnez un lieu...": - filtered_data = data_zds_correct[(data_zds_correct['TYPE_MILIEU'] == selected_milieu) & (data_zds_correct['TYPE_LIEU2'] == selected_lieu)] + if ( + selected_milieu != "Sélectionnez un milieu..." + and selected_lieu != "Sélectionnez un lieu..." + ): + filtered_data = data_zds_correct[ + (data_zds_correct["TYPE_MILIEU"] == selected_milieu) + & (data_zds_correct["TYPE_LIEU2"] == selected_lieu) + ] plot_density_map(data_zds_correct, filtered_data) else: - plot_density_map(data_zds_correct, data_zds_correct) # Show all data by default - + plot_density_map( + data_zds_correct, data_zds_correct + ) # Show all data by default col1, col2, col3 = st.columns([3, 3, 2]) with col1: st.markdown("#### Tableau des Densités par Milieu") - if selected_milieu != "Sélectionnez un milieu..." and selected_lieu != "Sélectionnez un lieu...": - filtered_data = data_zds_correct[(data_zds_correct['TYPE_MILIEU'] == selected_milieu) & (data_zds_correct['TYPE_LIEU2'] == selected_lieu)] + if ( + selected_milieu != "Sélectionnez un milieu..." + and selected_lieu != "Sélectionnez un lieu..." + ): + filtered_data = data_zds_correct[ + (data_zds_correct["TYPE_MILIEU"] == selected_milieu) + & (data_zds_correct["TYPE_LIEU2"] == selected_lieu) + ] density_table_milieu(data_zds_correct, filtered_data) else: density_table_milieu(data_zds_correct, data_zds_correct) with col2: st.markdown("#### Tableau des Densités par Lieu") - if selected_milieu != "Sélectionnez un milieu..." and selected_lieu != "Sélectionnez un lieu...": - filtered_data = data_zds_correct[(data_zds_correct['TYPE_MILIEU'] == selected_milieu) & (data_zds_correct['TYPE_LIEU2'] == selected_lieu)] + if ( + selected_milieu != "Sélectionnez un milieu..." + and selected_lieu != "Sélectionnez un lieu..." + ): + filtered_data = data_zds_correct[ + (data_zds_correct["TYPE_MILIEU"] == selected_milieu) + & (data_zds_correct["TYPE_LIEU2"] == selected_lieu) + ] density_table_lieu(data_zds_correct, filtered_data) else: density_table_lieu(data_zds_correct, data_zds_correct) with col3: - with st.expander("###### Notice ℹ️", expanded=True): - st.write( - """ + with st.expander("###### Notice ℹ️", expanded=True): + st.write( + """ **Milieu** désigne de grands types d'environnements comme le Littoral, les Cours d'eau ou la Montagne.\n Chaque Milieu est ensuite divisé en **Lieux** plus spécifiques. Par exemple, sous le Milieu Littoral, on trouve des Lieux comme les Plages, les Roches, les Digues, ou les Parkings. """ - ) + ) with tab2: # Use the selected filters From bf87a1460545f6aac3e69fd92660ce04683cd016 Mon Sep 17 00:00:00 2001 From: Mendi33 Date: Thu, 20 Jun 2024 17:01:19 +0000 Subject: [PATCH 139/147] =?UTF-8?q?Am=C3=A9lioration=20de=20l'onglet=20Act?= =?UTF-8?q?ions=20suite=20aux=20retours=20de=20MerTerre=20:=20-=20Wording?= =?UTF-8?q?=20-=20modification=20du=20popup=20de=20la=20map=20sur=20l'ongl?= =?UTF-8?q?et=20'ramassages=20r=C3=A9alis=C3=A9s'=20-=20modification=20des?= =?UTF-8?q?=20tooltips=20des=20graphiques=20sur=20l'onglet=20'ramassages?= =?UTF-8?q?=20r=C3=A9alis=C3=A9s'=20-=20d=C3=A9placement=20dans=20home.py?= =?UTF-8?q?=20de=20la=20fonction=20load=5Fdf=5Fevents=5Fclean()=20pour=20c?= =?UTF-8?q?harger=20les=20evenements=20=C3=A0=20venir=20-=20suppression=20?= =?UTF-8?q?de=20la=20police=20Montserrat=20-=20conflit=20pyproject.toml=20?= =?UTF-8?q?OK?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/home.py | 12 ++- dashboards/app/pages/actions.py | 148 +++++++++++++++++++++++++------- pyproject.toml | 1 - 3 files changed, 128 insertions(+), 33 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index e43f5a0..3e42583 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -104,10 +104,19 @@ def load_df_nb_dechet() -> pd.DataFrame: "sation/data/data_releve_nb_dechet.csv", ) - # Appel des fonctions pour charger les données + @st.cache_data + # Définition d'une fonction pour charger les evenements à venir + def load_df_events_clean() -> pd.DataFrame: + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/export_events_cleaned.csv", + ) + # Appel des fonctions pour charger les données df_other = load_df_other() df_structures = load_structures() + df_events = load_df_events_clean() # Création du filtre par niveau géographique : correspondance labels et variables df_nb_dechets = load_df_nb_dechet() @@ -187,6 +196,7 @@ def load_df_nb_dechet() -> pd.DataFrame: ] st.session_state["structures_filtre"] = df_structures_filtre st.session_state["structures"] = df_structures + st.session_state["events"] = df_events # Filtrer et enregistrer le dataframe nb_dechets dans session.State # Récuperer la liste des relevés diff --git a/dashboards/app/pages/actions.py b/dashboards/app/pages/actions.py index fc3feb4..5183159 100644 --- a/dashboards/app/pages/actions.py +++ b/dashboards/app/pages/actions.py @@ -30,19 +30,10 @@ else: st.write(f"Votre territoire : {filtre_niveau} {filtre_collectivite}") - # Définition d'une fonction pour charger les evenements à venir - def load_df_events_clean() -> pd.DataFrame: - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/export_events_cleaned.csv" - ) - - # Appel des fonctions pour charger les données - df_events = load_df_events_clean() - # Appeler les dataframes volumes et nb_dechets filtré depuis le session state - if "df_other_filtre" not in st.session_state: + if ("df_other_filtre" not in st.session_state) or ( + "events" not in st.session_state + ): st.write( """ ### :warning: Merci de sélectionner une collectivité\ @@ -52,6 +43,7 @@ def load_df_events_clean() -> pd.DataFrame: st.stop() else: df_other = st.session_state["df_other_filtre"].copy() + df_events = st.session_state["events"].copy() # 2 Onglets : Evènements, Evènements à venir tab1, tab2 = st.tabs( @@ -62,7 +54,7 @@ def load_df_events_clean() -> pd.DataFrame: ) # Locale du package Babel - locale = Locale("fr", "FR") + bbl_locale = Locale("fr", "FR") # Onglet 1 : Evènements with tab1: @@ -119,7 +111,7 @@ def load_df_events_clean() -> pd.DataFrame: format_date( datetime(2022, mois_dict[mois_liste[x - 1]], 1), format="MMMM", - locale=locale, + locale=bbl_locale, ) ), index=0, @@ -141,8 +133,6 @@ def load_df_events_clean() -> pd.DataFrame: df_ramassages = df_other_filtre.copy() # Calcul des indicateurs clés de haut de tableau avant transformation - volume_total = df_ramassages["VOLUME_TOTAL"].sum() - poids_total = df_ramassages["POIDS_TOTAL"].sum() nombre_participants = df_ramassages["NB_PARTICIPANTS"].sum() nb_collectes = len(df_ramassages) nombre_structures = df_ramassages["ID_STRUCTURE"].nunique() @@ -201,10 +191,40 @@ def load_df_events_clean() -> pd.DataFrame: # Application d'une limite minimale pour le rayon si nécessaire radius = max(radius, 5) + format_participants = "{:.0f}".format(row.NB_PARTICIPANTS) + + html = f""" +
+

+

+ Evénement +
+
+ {row.NOM_EVENEMENT} +
+
+ Structure +
+
+ {row.NOM_STRUCTURE} +
+
+
+ Date : {row.DATE.strftime("%d/%m/%Y")} +
+
+ Nombre de participants : {format_participants} +
+

+
+ """ + + popup = folium.Popup(html, max_width=300) + folium.CircleMarker( location=(row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]), radius=radius, # Utilisation du rayon ajusté - popup=f"{row['NOM_ZONE']}, {row['LIEU_VILLE']}, {row['NOM_EVENEMENT']}, {row['DATE']} : nombre de participants : {row['NB_PARTICIPANTS']}", + popup=popup, color="#3186cc", fill=True, fill_color="#3186cc", @@ -235,8 +255,14 @@ def load_df_events_clean() -> pd.DataFrame: color_discrete_sequence=colors, category_orders={"NIVEAU_CARAC": [0, 1, 2, 3, 4]}, ) + fig1_actions.update_traces( - textposition="inside", texttemplate="%{label}
%{percent:.1%}" + textfont_size=12, + textfont_color="white", + textposition="inside", + textinfo="percent+label", + texttemplate="%{label}
%{percent:.1%}", + hovertemplate="Niveau %{label}
%{value} ramassages", ) # préparation du dataframe et figure releves types de déchets @@ -248,14 +274,29 @@ def load_df_events_clean() -> pd.DataFrame: df_type_dechet_counts_sorted = df_type_dechet_counts.sort_values( by="counts", ascending=False ) + fig2_actions = px.bar( df_type_dechet_counts_sorted, y="counts", x="TYPE_DECHET", - title="Nombre de relevés par types de déchets", + title="Nombre de relevés par type de déchets", text="counts", ) - fig2_actions.update_layout(xaxis_title="", yaxis_title="") + + fig2_actions.update_layout( + uniformtext_minsize=8, + yaxis_title=None, + xaxis_title=None, + separators=", ", # Formatte les nombres en français (séparateur décimale, séparateur milliers) + ) + + fig2_actions.update_traces( + texttemplate="%{text:,.0f}", + textfont_size=12, + hovertemplate="Type de déchets : %{label}
%{y} ramassages", + ) + + fig2_actions.update_yaxes(tickfont=dict(size=12)) l3_col1, l3_col2 = st.columns(2) cell4 = l3_col1.container(border=True) @@ -288,11 +329,25 @@ def load_df_events_clean() -> pd.DataFrame: df_milieux_counts_sorted, y="TYPE_MILIEU", x="counts", - title="Nombre de relevés par types de milieux", + title="Nombre de relevés par type de milieux", text="counts", orientation="h", ) - fig3_actions.update_layout(xaxis_title="", yaxis_title="") + + fig3_actions.update_layout( + uniformtext_minsize=8, + yaxis_title=None, + xaxis_title=None, + separators=", ", # Formatte les nombres en français (séparateur décimale, séparateur milliers) + ) + + fig3_actions.update_traces( + texttemplate="%{text:,.0f}", + textfont_size=12, + hovertemplate="Type de milieux : %{label}
%{x} ramassages", + ) + + fig3_actions.update_yaxes(tickfont=dict(size=12)) # préparation du dataframe et figure releves types de lieux 2 df_type_lieu2 = df_other_filtre.copy() @@ -313,11 +368,25 @@ def load_df_events_clean() -> pd.DataFrame: df_type_lieu2_counts_sorted, y="counts", x="TYPE_LIEU2", - title="Nombre de relevés par types de lieu", + title="Nombre de relevés par type de lieu", text="counts", ) - fig4_actions.update_layout(xaxis_title="", yaxis_title="") + + fig4_actions.update_layout( + uniformtext_minsize=8, + yaxis_title=None, + xaxis_title=None, + separators=", ", # Formatte les nombres en français (séparateur décimale, séparateur milliers) + ) + + fig4_actions.update_traces( + texttemplate="%{text:,.0f}", + textfont_size=12, + hovertemplate="Type de lieu : %{label}
%{y} ramassages", + ) + fig4_actions.update_xaxes(tickangle=45) + fig4_actions.update_yaxes(tickfont=dict(size=12)) l4_col1, l4_col2 = st.columns(2) cell6 = l4_col1.container(border=True) @@ -333,7 +402,7 @@ def load_df_events_clean() -> pd.DataFrame: # préparation du dataframe et figure volume + nb collectes volume + nb collectes par mois # Créer une liste ordonnée des noms de mois dans l'ordre souhaité mois_ordre = [ - str.capitalize(format_date(dt, format="MMMM", locale=locale)) + str.capitalize(format_date(dt, format="MMMM", locale=bbl_locale)) for dt in pd.date_range(start="2022-01-01", end="2022-12-01", freq="MS") ] @@ -350,7 +419,21 @@ def load_df_events_clean() -> pd.DataFrame: title="Nombre de relevés par mois", text="counts", ) - fig5_actions.update_layout(xaxis_title="", yaxis_title="") + + fig5_actions.update_layout( + uniformtext_minsize=8, + yaxis_title=None, + xaxis_title=None, + separators=", ", # Formatte les nombres en français (séparateur décimale, séparateur milliers) + ) + + fig5_actions.update_traces( + texttemplate="%{text:,.0f}", + textfont_size=12, + hovertemplate="Mois : %{label}
%{y} ramassages", + ) + + fig5_actions.update_yaxes(tickfont=dict(size=12)) # Utiliser la liste mois_ordre comme étiquettes sur l'axe x fig5_actions.update_xaxes(tickvals=list(range(1, 13)), ticktext=mois_ordre) @@ -404,12 +487,12 @@ def load_df_events_clean() -> pd.DataFrame:
{row.TYPE_EVENEMENT}
-
+
{row.NOM_EVENEMENT}

- {str.capitalize(format_date(row.DATE, format="full", locale=locale))} + {str.capitalize(format_date(row.DATE, format="full", locale=bbl_locale))}

@@ -419,8 +502,11 @@ def load_df_events_clean() -> pd.DataFrame:

""" - iframe = folium.IFrame(html=html, width=300, height=120) - popup = folium.Popup(iframe, parse_html=True, max_width=300) + # Adapte la hauteur du popup par iFrame + iframe_height = 140 if event_envg else 120 + + iframe = folium.IFrame(html=html, width=300, height=iframe_height) + popup = folium.Popup(iframe, parse_html=True) folium.Marker( location=[row.COORD_GPS_Y, row.COORD_GPS_X], @@ -451,7 +537,7 @@ def load_df_events_clean() -> pd.DataFrame: for idx, row in df_events_a_venir.iterrows(): with st.container(border=True): # Bloc contenant la date - date_block = f"
{row.DATE.day}
{str.capitalize(locale.months['format']['wide'][row.DATE.month - 1])}
" + date_block = f"
{row.DATE.day}
{str.capitalize(bbl_locale.months['format']['abbreviated'][row.DATE.month])}
" # Bloc contenant le nom de l'événement event_block = ( f"
{row.NOM_EVENEMENT}
" diff --git a/pyproject.toml b/pyproject.toml index 7d1d9cb..f5d7dad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,6 @@ dash = "^2.16.1" duckdb = "^0.10.1" geopandas = "^0.14.3" folium = "^0.16.0" -folium = "^0.16.0" streamlit = "^1.32.2" plotly-express = "^0.4.1" streamlit-dynamic-filters = "^0.1.6" From 452556376874bd82bb2cce9282cbd21c50cb81b2 Mon Sep 17 00:00:00 2001 From: Mendi33 Date: Thu, 20 Jun 2024 18:43:03 +0000 Subject: [PATCH 140/147] update poetry.lock to solve precommit conflicts --- .gitignore | 2 +- dashboards/app/requirements.txt | 1 - poetry.lock | 1078 ++----------------------------- pyproject.toml | 4 +- 4 files changed, 56 insertions(+), 1029 deletions(-) diff --git a/.gitignore b/.gitignore index a21d144..ae45a6f 100644 --- a/.gitignore +++ b/.gitignore @@ -102,7 +102,7 @@ ipython_config.py # This is especially recommended for binary packages to ensure reproducibility, and is more # commonly ignored for libraries. # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock +poetry.lock # pdm # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. diff --git a/dashboards/app/requirements.txt b/dashboards/app/requirements.txt index 54ac0f9..b1aa314 100644 --- a/dashboards/app/requirements.txt +++ b/dashboards/app/requirements.txt @@ -7,7 +7,6 @@ streamlit==1.32.2 openpyxl==3.1.2 streamlit-folium==0.19.1 plotly==5.19.0 -streamlit-dynamic-filters==0.1.6 streamlit-authenticator==0.3.2 st-pages==0.4.5 babel==2.11.0 diff --git a/poetry.lock b/poetry.lock index 254c136..9bcff55 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,73 +1,10 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. - -[[package]] -name = "altair" -version = "5.3.0" -description = "Vega-Altair: A declarative statistical visualization library for Python." -optional = false -python-versions = ">=3.8" -files = [ - {file = "altair-5.3.0-py3-none-any.whl", hash = "sha256:7084a1dab4d83c5e7e5246b92dc1b4451a6c68fd057f3716ee9d315c8980e59a"}, - {file = "altair-5.3.0.tar.gz", hash = "sha256:5a268b1a0983b23d8f9129f819f956174aa7aea2719ed55a52eba9979b9f6675"}, -] - -[package.dependencies] -jinja2 = "*" -jsonschema = ">=3.0" -numpy = "*" -packaging = "*" -pandas = ">=0.25" -toolz = "*" -typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} - -[package.extras] -all = ["altair-tiles (>=0.3.0)", "anywidget (>=0.9.0)", "pyarrow (>=11)", "vega-datasets (>=0.9.0)", "vegafusion[embed] (>=1.6.6)", "vl-convert-python (>=1.3.0)"] -dev = ["geopandas", "hatch", "ipython", "m2r", "mypy", "pandas-stubs", "pytest", "pytest-cov", "ruff (>=0.3.0)", "types-jsonschema", "types-setuptools"] -doc = ["docutils", "jinja2", "myst-parser", "numpydoc", "pillow (>=9,<10)", "pydata-sphinx-theme (>=0.14.1)", "scipy", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinxext-altair"] - -[[package]] -name = "attrs" -version = "23.2.0" -description = "Classes Without Boilerplate" -optional = false -python-versions = ">=3.7" -files = [ - {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, - {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, -] - -[package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] -tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] - -[[package]] -name = "attrs" -version = "23.2.0" -description = "Classes Without Boilerplate" -category = "main" -optional = false -python-versions = ">=3.7" -files = [ - {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, - {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, -] - -[package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] -tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] +# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand. [[package]] name = "blinker" version = "1.7.0" description = "Fast, simple object-to-object and broadcast signaling" +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -75,25 +12,11 @@ files = [ {file = "blinker-1.7.0.tar.gz", hash = "sha256:e6820ff6fa4e4d1d8e2747c2283749c3f547e4fee112b98555cdcdae32996182"}, ] -[[package]] -name = "branca" -version = "0.7.1" -description = "Generate complex HTML+JS pages with Python" -category = "main" -optional = false -python-versions = ">=3.7" -files = [ - {file = "branca-0.7.1-py3-none-any.whl", hash = "sha256:70515944ed2d1ed2784c552508df58037ca19402a8a1069d57f9113e3e012f51"}, - {file = "branca-0.7.1.tar.gz", hash = "sha256:e6b6f37a37bc0abffd960c68c045a7fe025d628eff87fedf6ab6ca814812110c"}, -] - -[package.dependencies] -jinja2 = ">=3" - [[package]] name = "cachetools" version = "5.3.2" description = "Extensible memoizing collections and decorators" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -105,6 +28,7 @@ files = [ name = "certifi" version = "2024.2.2" description = "Python package for providing Mozilla's CA Bundle." +category = "main" optional = false python-versions = ">=3.6" files = [ @@ -116,6 +40,7 @@ files = [ name = "cfgv" version = "3.4.0" description = "Validate configuration and produce human readable error messages." +category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -127,6 +52,7 @@ files = [ name = "chardet" version = "5.2.0" description = "Universal encoding detector for Python 3" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -138,6 +64,7 @@ files = [ name = "charset-normalizer" version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -237,6 +164,7 @@ files = [ name = "click" version = "8.1.7" description = "Composable command line interface toolkit" +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -247,46 +175,11 @@ files = [ [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} -[[package]] -name = "click-plugins" -version = "1.1.1" -description = "An extension module for click to enable registering CLI commands via setuptools entry-points." -category = "main" -optional = false -python-versions = "*" -files = [ - {file = "click-plugins-1.1.1.tar.gz", hash = "sha256:46ab999744a9d831159c3411bb0c79346d94a444df9a3a3742e9ed63645f264b"}, - {file = "click_plugins-1.1.1-py2.py3-none-any.whl", hash = "sha256:5d262006d3222f5057fd81e1623d4443e41dcda5dc815c06b442aa3c02889fc8"}, -] - -[package.dependencies] -click = ">=4.0" - -[package.extras] -dev = ["coveralls", "pytest (>=3.6)", "pytest-cov", "wheel"] - -[[package]] -name = "cligj" -version = "0.7.2" -description = "Click params for commmand line interfaces to GeoJSON" -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, <4" -files = [ - {file = "cligj-0.7.2-py3-none-any.whl", hash = "sha256:c1ca117dbce1fe20a5809dc96f01e1c2840f6dcc939b3ddbb1111bf330ba82df"}, - {file = "cligj-0.7.2.tar.gz", hash = "sha256:a4bc13d623356b373c2c27c53dbd9c68cae5d526270bfa71f6c6fa69669c6b27"}, -] - -[package.dependencies] -click = ">=4.0" - -[package.extras] -test = ["pytest-cov"] - [[package]] name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." +category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ @@ -298,6 +191,7 @@ files = [ name = "dash" version = "2.16.1" description = "A Python framework for building reactive web-apps. Developed by Plotly." +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -331,6 +225,7 @@ testing = ["beautifulsoup4 (>=4.8.2)", "cryptography (<3.4)", "dash-testing-stub name = "dash-core-components" version = "2.0.0" description = "Core component suite for Dash" +category = "main" optional = false python-versions = "*" files = [ @@ -342,6 +237,7 @@ files = [ name = "dash-html-components" version = "2.0.0" description = "Vanilla HTML components for Dash" +category = "main" optional = false python-versions = "*" files = [ @@ -353,6 +249,7 @@ files = [ name = "dash-table" version = "5.0.0" description = "Dash table" +category = "main" optional = false python-versions = "*" files = [ @@ -364,6 +261,7 @@ files = [ name = "distlib" version = "0.3.8" description = "Distribution utilities" +category = "dev" optional = false python-versions = "*" files = [ @@ -375,6 +273,7 @@ files = [ name = "duckdb" version = "0.10.1" description = "DuckDB in-process database" +category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -431,6 +330,7 @@ files = [ name = "exceptiongroup" version = "1.2.0" description = "Backport of PEP 654 (exception groups)" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -445,6 +345,7 @@ test = ["pytest (>=6)"] name = "filelock" version = "3.13.1" description = "A platform independent file lock." +category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -457,58 +358,11 @@ docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1 testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] typing = ["typing-extensions (>=4.8)"] -[[package]] -name = "fiona" -version = "1.9.6" -description = "Fiona reads and writes spatial data files" -category = "main" -optional = false -python-versions = ">=3.7" -files = [ - {file = "fiona-1.9.6-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:63e528b5ea3d8b1038d788e7c65117835c787ba7fdc94b1b42f09c2cbc0aaff2"}, - {file = "fiona-1.9.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:918bd27d8625416672e834593970f96dff63215108f81efb876fe5c0bc58a3b4"}, - {file = "fiona-1.9.6-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:e313210b30d09ed8f829bf625599e248dadd78622728030221f6526580ff26c5"}, - {file = "fiona-1.9.6-cp310-cp310-win_amd64.whl", hash = "sha256:89095c2d542325ee45894b8837e8048cdbb2f22274934e1be3b673ca628010d7"}, - {file = "fiona-1.9.6-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:98cea6f435843b2119731c6b0470e5b7386aa16b6aa7edabbf1ed93aefe029c3"}, - {file = "fiona-1.9.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f4230eccbd896a79d1ebfa551d84bf90f512f7bcbe1ca61e3f82231321f1a532"}, - {file = "fiona-1.9.6-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:48b6218224e96de5e36b5eb259f37160092260e5de0dcd82ca200b1887aa9884"}, - {file = "fiona-1.9.6-cp311-cp311-win_amd64.whl", hash = "sha256:c1dd5fbc29b7303bb87eb683455e8451e1a53bb8faf20ef97fdcd843c9e4a7f6"}, - {file = "fiona-1.9.6-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:42d8a0e5570948d3821c493b6141866d9a4d7a64edad2be4ecbb89f81904baac"}, - {file = "fiona-1.9.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39819fb8f5ec6d9971cb01b912b4431615a3d3f50c83798565d8ce41917930db"}, - {file = "fiona-1.9.6-cp312-cp312-manylinux2014_x86_64.whl", hash = "sha256:9b53034efdf93ada9295b081e6a8280af7c75496a20df82d4c2ca46d65b85905"}, - {file = "fiona-1.9.6-cp312-cp312-win_amd64.whl", hash = "sha256:1dcd6eca7524535baf2a39d7981b4a46d33ae28c313934a7c3eae62eecf9dfa5"}, - {file = "fiona-1.9.6-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:e5404ed08c711489abcb3a50a184816825b8af06eb73ad2a99e18b8e7b47c96a"}, - {file = "fiona-1.9.6-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:53bedd2989e255df1bf3378ae9c06d6d241ec273c280c544bb44ffffebb97fb0"}, - {file = "fiona-1.9.6-cp37-cp37m-win_amd64.whl", hash = "sha256:77653a08564a44e634c44cd74a068d2f55d1d4029edd16d1c8aadcc4d8cc1d2c"}, - {file = "fiona-1.9.6-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:e7617563b36d2be99f048f0d0054b4d765f4aae454398f88f19de9c2c324b7f8"}, - {file = "fiona-1.9.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:50037c3b7a5f6f434b562b5b1a5b664f1caa7a4383b00af23cdb59bfc6ba852c"}, - {file = "fiona-1.9.6-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:bf51846ad602757bf27876f458c5c9f14b09421fac612f64273cc4e3fcabc441"}, - {file = "fiona-1.9.6-cp38-cp38-win_amd64.whl", hash = "sha256:11af1afc1255642a7787fe112c29d01f968f1053e4d4700fc6f3bb879c1622e0"}, - {file = "fiona-1.9.6-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:52e8fec650b72fc5253d8f86b63859acc687182281c29bfacd3930496cf982d1"}, - {file = "fiona-1.9.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c9b92aa1badb2773e7cac19bef3064d73e9d80c67c42f0928db2520a04be6f2f"}, - {file = "fiona-1.9.6-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:0eaffbf3bfae9960484c0c08ea461b0c40e111497f04e9475ebf15ac7a22d9dc"}, - {file = "fiona-1.9.6-cp39-cp39-win_amd64.whl", hash = "sha256:f1b49d51a744874608b689f029766aa1e078dd72e94b44cf8eeef6d7bd2e9051"}, - {file = "fiona-1.9.6.tar.gz", hash = "sha256:791b3494f8b218c06ea56f892bd6ba893dfa23525347761d066fb7738acda3b1"}, -] - -[package.dependencies] -attrs = ">=19.2.0" -certifi = "*" -click = ">=8.0,<9.0" -click-plugins = ">=1.0" -cligj = ">=0.5" -six = "*" - -[package.extras] -all = ["fiona[calc,s3,test]"] -calc = ["shapely"] -s3 = ["boto3 (>=1.3.1)"] -test = ["fiona[s3]", "pytest (>=7)", "pytest-cov", "pytz"] - [[package]] name = "flask" version = "3.0.2" description = "A simple framework for building complex web applications." +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -527,81 +381,11 @@ Werkzeug = ">=3.0.0" async = ["asgiref (>=3.2)"] dotenv = ["python-dotenv"] -[[package]] -name = "folium" -version = "0.16.0" -description = "Make beautiful maps with Leaflet.js & Python" -category = "main" -optional = false -python-versions = ">=3.7" -files = [ - {file = "folium-0.16.0-py2.py3-none-any.whl", hash = "sha256:ba72505db18bef995c880da19457d2b10c931db8059af5f6ccec9310d262b584"}, - {file = "folium-0.16.0.tar.gz", hash = "sha256:2585ee9253dc758d3a365534caa6fb5fa0c244646db4dc5819afc67bbd4daabb"}, -] - -[package.dependencies] -branca = ">=0.6.0" -jinja2 = ">=2.9" -numpy = "*" -requests = "*" -xyzservices = "*" - -[package.extras] -testing = ["pytest"] - -[[package]] -name = "geopandas" -version = "0.14.3" -description = "Geographic pandas extensions" -category = "main" -optional = false -python-versions = ">=3.9" -files = [ - {file = "geopandas-0.14.3-py3-none-any.whl", hash = "sha256:41b31ad39e21bc9e8c4254f78f8dc4ce3d33d144e22e630a00bb336c83160204"}, - {file = "geopandas-0.14.3.tar.gz", hash = "sha256:748af035d4a068a4ae00cab384acb61d387685c833b0022e0729aa45216b23ac"}, -] - -[package.dependencies] -fiona = ">=1.8.21" -packaging = "*" -pandas = ">=1.4.0" -pyproj = ">=3.3.0" -shapely = ">=1.8.0" -name = "gitdb" -version = "4.0.11" -description = "Git Object Database" -optional = false -python-versions = ">=3.7" -files = [ - {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, - {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, -] - -[package.dependencies] -smmap = ">=3.0.1,<6" - -[[package]] -name = "gitpython" -version = "3.1.43" -description = "GitPython is a Python library used to interact with Git repositories" -optional = false -python-versions = ">=3.7" -files = [ - {file = "GitPython-3.1.43-py3-none-any.whl", hash = "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff"}, - {file = "GitPython-3.1.43.tar.gz", hash = "sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c"}, -] - -[package.dependencies] -gitdb = ">=4.0.1,<5" - -[package.extras] -doc = ["sphinx (==4.3.2)", "sphinx-autodoc-typehints", "sphinx-rtd-theme", "sphinxcontrib-applehelp (>=1.0.2,<=1.0.4)", "sphinxcontrib-devhelp (==1.0.2)", "sphinxcontrib-htmlhelp (>=2.0.0,<=2.0.1)", "sphinxcontrib-qthelp (==1.0.3)", "sphinxcontrib-serializinghtml (==1.1.5)"] -test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions"] - [[package]] name = "identify" version = "2.5.33" description = "File identification library for Python" +category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -616,6 +400,7 @@ license = ["ukkonen"] name = "idna" version = "3.6" description = "Internationalized Domain Names in Applications (IDNA)" +category = "main" optional = false python-versions = ">=3.5" files = [ @@ -627,6 +412,7 @@ files = [ name = "importlib-metadata" version = "7.1.0" description = "Read metadata from Python packages" +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -646,6 +432,7 @@ testing = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -657,6 +444,7 @@ files = [ name = "itsdangerous" version = "2.1.2" description = "Safely pass data to untrusted environments and back." +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -668,6 +456,7 @@ files = [ name = "jinja2" version = "3.1.3" description = "A very fast and expressive template engine." +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -681,69 +470,11 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] -[[package]] -name = "jsonschema" -version = "4.21.1" -description = "An implementation of JSON Schema validation for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "jsonschema-4.21.1-py3-none-any.whl", hash = "sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f"}, - {file = "jsonschema-4.21.1.tar.gz", hash = "sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5"}, -] - -[package.dependencies] -attrs = ">=22.2.0" -jsonschema-specifications = ">=2023.03.6" -referencing = ">=0.28.4" -rpds-py = ">=0.7.1" - -[package.extras] -format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] -format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] - -[[package]] -name = "jsonschema-specifications" -version = "2023.12.1" -description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" -optional = false -python-versions = ">=3.8" -files = [ - {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, - {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, -] - -[package.dependencies] -referencing = ">=0.31.0" - -[[package]] -name = "markdown-it-py" -version = "3.0.0" -description = "Python port of markdown-it. Markdown parsing, done right!" -optional = false -python-versions = ">=3.8" -files = [ - {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, - {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, -] - -[package.dependencies] -mdurl = ">=0.1,<1.0" - -[package.extras] -benchmarking = ["psutil", "pytest", "pytest-benchmark"] -code-style = ["pre-commit (>=3.0,<4.0)"] -compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] -linkify = ["linkify-it-py (>=1,<3)"] -plugins = ["mdit-py-plugins"] -profiling = ["gprof2dot"] -rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] - [[package]] name = "markupsafe" version = "2.1.5" description = "Safely add untrusted strings to HTML/XML markup." +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -809,21 +540,11 @@ files = [ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] -[[package]] -name = "mdurl" -version = "0.1.2" -description = "Markdown URL utilities" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, - {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, -] - [[package]] name = "nest-asyncio" version = "1.6.0" description = "Patch asyncio to allow nested event loops" +category = "main" optional = false python-versions = ">=3.5" files = [ @@ -835,6 +556,7 @@ files = [ name = "nodeenv" version = "1.8.0" description = "Node.js virtual environment builder" +category = "dev" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" files = [ @@ -849,6 +571,7 @@ setuptools = "*" name = "numpy" version = "1.26.4" description = "Fundamental package for array computing in Python" +category = "main" optional = false python-versions = ">=3.9" files = [ @@ -894,6 +617,7 @@ files = [ name = "packaging" version = "23.2" description = "Core utilities for Python packages" +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -905,6 +629,7 @@ files = [ name = "pandas" version = "2.2.1" description = "Powerful data structures for data analysis, time series, and statistics" +category = "main" optional = false python-versions = ">=3.9" files = [ @@ -974,114 +699,11 @@ sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-d test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] xml = ["lxml (>=4.9.2)"] -[[package]] -name = "patsy" -version = "0.5.6" -description = "A Python package for describing statistical models and for building design matrices." -optional = false -python-versions = "*" -files = [ - {file = "patsy-0.5.6-py2.py3-none-any.whl", hash = "sha256:19056886fd8fa71863fa32f0eb090267f21fb74be00f19f5c70b2e9d76c883c6"}, - {file = "patsy-0.5.6.tar.gz", hash = "sha256:95c6d47a7222535f84bff7f63d7303f2e297747a598db89cf5c67f0c0c7d2cdb"}, -] - -[package.dependencies] -numpy = ">=1.4" -six = "*" - -[package.extras] -test = ["pytest", "pytest-cov", "scipy"] - -[[package]] -name = "pillow" -version = "10.3.0" -description = "Python Imaging Library (Fork)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"}, - {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf"}, - {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3"}, - {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5"}, - {file = "pillow-10.3.0-cp310-cp310-win32.whl", hash = "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2"}, - {file = "pillow-10.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f"}, - {file = "pillow-10.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b"}, - {file = "pillow-10.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795"}, - {file = "pillow-10.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd"}, - {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad"}, - {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c"}, - {file = "pillow-10.3.0-cp311-cp311-win32.whl", hash = "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09"}, - {file = "pillow-10.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d"}, - {file = "pillow-10.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f"}, - {file = "pillow-10.3.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84"}, - {file = "pillow-10.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"}, - {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef"}, - {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3"}, - {file = "pillow-10.3.0-cp312-cp312-win32.whl", hash = "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d"}, - {file = "pillow-10.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b"}, - {file = "pillow-10.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a"}, - {file = "pillow-10.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b"}, - {file = "pillow-10.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd"}, - {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d"}, - {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3"}, - {file = "pillow-10.3.0-cp38-cp38-win32.whl", hash = "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b"}, - {file = "pillow-10.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999"}, - {file = "pillow-10.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936"}, - {file = "pillow-10.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8"}, - {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9"}, - {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb"}, - {file = "pillow-10.3.0-cp39-cp39-win32.whl", hash = "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572"}, - {file = "pillow-10.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb"}, - {file = "pillow-10.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591"}, - {file = "pillow-10.3.0.tar.gz", hash = "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d"}, -] - -[package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] -fpx = ["olefile"] -mic = ["olefile"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] -typing = ["typing-extensions"] -xmp = ["defusedxml"] - [[package]] name = "platformdirs" version = "4.1.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1097,6 +719,7 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-co name = "plotly" version = "5.20.0" description = "An open-source, interactive data visualization library for Python" +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1108,29 +731,11 @@ files = [ packaging = "*" tenacity = ">=6.2.0" -[[package]] -name = "plotly-express" -version = "0.4.1" -description = "Plotly Express - a high level wrapper for Plotly.py" -optional = false -python-versions = "*" -files = [ - {file = "plotly_express-0.4.1-py2.py3-none-any.whl", hash = "sha256:5f112922b0a6225dc7c010e3b86295a74449e3eac6cac8faa95175e99b7698ce"}, - {file = "plotly_express-0.4.1.tar.gz", hash = "sha256:ff73a41ce02fb43d1d8e8fa131ef3e6589857349ca216b941b8f3f862bce0278"}, -] - -[package.dependencies] -numpy = ">=1.11" -pandas = ">=0.20.0" -patsy = ">=0.5" -plotly = ">=4.1.0" -scipy = ">=0.18" -statsmodels = ">=0.9.0" - [[package]] name = "pluggy" version = "1.4.0" description = "plugin and hook calling mechanisms for python" +category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1146,6 +751,7 @@ testing = ["pytest", "pytest-benchmark"] name = "pre-commit" version = "2.21.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1160,150 +766,11 @@ nodeenv = ">=0.11.1" pyyaml = ">=5.1" virtualenv = ">=20.10.0" -[[package]] -name = "pyproj" -version = "3.6.1" -description = "Python interface to PROJ (cartographic projections and coordinate transformations library)" -category = "main" -optional = false -python-versions = ">=3.9" -files = [ - {file = "pyproj-3.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ab7aa4d9ff3c3acf60d4b285ccec134167a948df02347585fdd934ebad8811b4"}, - {file = "pyproj-3.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4bc0472302919e59114aa140fd7213c2370d848a7249d09704f10f5b062031fe"}, - {file = "pyproj-3.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5279586013b8d6582e22b6f9e30c49796966770389a9d5b85e25a4223286cd3f"}, - {file = "pyproj-3.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fafd1f3eb421694857f254a9bdbacd1eb22fc6c24ca74b136679f376f97d35"}, - {file = "pyproj-3.6.1-cp310-cp310-win32.whl", hash = "sha256:c41e80ddee130450dcb8829af7118f1ab69eaf8169c4bf0ee8d52b72f098dc2f"}, - {file = "pyproj-3.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:db3aedd458e7f7f21d8176f0a1d924f1ae06d725228302b872885a1c34f3119e"}, - {file = "pyproj-3.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ebfbdbd0936e178091309f6cd4fcb4decd9eab12aa513cdd9add89efa3ec2882"}, - {file = "pyproj-3.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:447db19c7efad70ff161e5e46a54ab9cc2399acebb656b6ccf63e4bc4a04b97a"}, - {file = "pyproj-3.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7e13c40183884ec7f94eb8e0f622f08f1d5716150b8d7a134de48c6110fee85"}, - {file = "pyproj-3.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65ad699e0c830e2b8565afe42bd58cc972b47d829b2e0e48ad9638386d994915"}, - {file = "pyproj-3.6.1-cp311-cp311-win32.whl", hash = "sha256:8b8acc31fb8702c54625f4d5a2a6543557bec3c28a0ef638778b7ab1d1772132"}, - {file = "pyproj-3.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:38a3361941eb72b82bd9a18f60c78b0df8408416f9340521df442cebfc4306e2"}, - {file = "pyproj-3.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1e9fbaf920f0f9b4ee62aab832be3ae3968f33f24e2e3f7fbb8c6728ef1d9746"}, - {file = "pyproj-3.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6d227a865356f225591b6732430b1d1781e946893789a609bb34f59d09b8b0f8"}, - {file = "pyproj-3.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83039e5ae04e5afc974f7d25ee0870a80a6bd6b7957c3aca5613ccbe0d3e72bf"}, - {file = "pyproj-3.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb059ba3bced6f6725961ba758649261d85ed6ce670d3e3b0a26e81cf1aa8d"}, - {file = "pyproj-3.6.1-cp312-cp312-win32.whl", hash = "sha256:2d6ff73cc6dbbce3766b6c0bce70ce070193105d8de17aa2470009463682a8eb"}, - {file = "pyproj-3.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:7a27151ddad8e1439ba70c9b4b2b617b290c39395fa9ddb7411ebb0eb86d6fb0"}, - {file = "pyproj-3.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4ba1f9b03d04d8cab24d6375609070580a26ce76eaed54631f03bab00a9c737b"}, - {file = "pyproj-3.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18faa54a3ca475bfe6255156f2f2874e9a1c8917b0004eee9f664b86ccc513d3"}, - {file = "pyproj-3.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd43bd9a9b9239805f406fd82ba6b106bf4838d9ef37c167d3ed70383943ade1"}, - {file = "pyproj-3.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50100b2726a3ca946906cbaa789dd0749f213abf0cbb877e6de72ca7aa50e1ae"}, - {file = "pyproj-3.6.1-cp39-cp39-win32.whl", hash = "sha256:9274880263256f6292ff644ca92c46d96aa7e57a75c6df3f11d636ce845a1877"}, - {file = "pyproj-3.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:36b64c2cb6ea1cc091f329c5bd34f9c01bb5da8c8e4492c709bda6a09f96808f"}, - {file = "pyproj-3.6.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd93c1a0c6c4aedc77c0fe275a9f2aba4d59b8acf88cebfc19fe3c430cfabf4f"}, - {file = "pyproj-3.6.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6420ea8e7d2a88cb148b124429fba8cd2e0fae700a2d96eab7083c0928a85110"}, - {file = "pyproj-3.6.1.tar.gz", hash = "sha256:44aa7c704c2b7d8fb3d483bbf75af6cb2350d30a63b144279a09b75fead501bf"}, -] - -[package.dependencies] -certifi = "*" -name = "protobuf" -version = "4.25.3" -description = "" -optional = false -python-versions = ">=3.8" -files = [ - {file = "protobuf-4.25.3-cp310-abi3-win32.whl", hash = "sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa"}, - {file = "protobuf-4.25.3-cp310-abi3-win_amd64.whl", hash = "sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8"}, - {file = "protobuf-4.25.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c"}, - {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019"}, - {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d"}, - {file = "protobuf-4.25.3-cp38-cp38-win32.whl", hash = "sha256:f4f118245c4a087776e0a8408be33cf09f6c547442c00395fbfb116fac2f8ac2"}, - {file = "protobuf-4.25.3-cp38-cp38-win_amd64.whl", hash = "sha256:c053062984e61144385022e53678fbded7aea14ebb3e0305ae3592fb219ccfa4"}, - {file = "protobuf-4.25.3-cp39-cp39-win32.whl", hash = "sha256:19b270aeaa0099f16d3ca02628546b8baefe2955bbe23224aaf856134eccf1e4"}, - {file = "protobuf-4.25.3-cp39-cp39-win_amd64.whl", hash = "sha256:e3c97a1555fd6388f857770ff8b9703083de6bf1f9274a002a332d65fbb56c8c"}, - {file = "protobuf-4.25.3-py3-none-any.whl", hash = "sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9"}, - {file = "protobuf-4.25.3.tar.gz", hash = "sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c"}, -] - -[[package]] -name = "pyarrow" -version = "15.0.2" -description = "Python library for Apache Arrow" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pyarrow-15.0.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:88b340f0a1d05b5ccc3d2d986279045655b1fe8e41aba6ca44ea28da0d1455d8"}, - {file = "pyarrow-15.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eaa8f96cecf32da508e6c7f69bb8401f03745c050c1dd42ec2596f2e98deecac"}, - {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23c6753ed4f6adb8461e7c383e418391b8d8453c5d67e17f416c3a5d5709afbd"}, - {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f639c059035011db8c0497e541a8a45d98a58dbe34dc8fadd0ef128f2cee46e5"}, - {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:290e36a59a0993e9a5224ed2fb3e53375770f07379a0ea03ee2fce2e6d30b423"}, - {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:06c2bb2a98bc792f040bef31ad3e9be6a63d0cb39189227c08a7d955db96816e"}, - {file = "pyarrow-15.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:f7a197f3670606a960ddc12adbe8075cea5f707ad7bf0dffa09637fdbb89f76c"}, - {file = "pyarrow-15.0.2-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:5f8bc839ea36b1f99984c78e06e7a06054693dc2af8920f6fb416b5bca9944e4"}, - {file = "pyarrow-15.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f5e81dfb4e519baa6b4c80410421528c214427e77ca0ea9461eb4097c328fa33"}, - {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a4f240852b302a7af4646c8bfe9950c4691a419847001178662a98915fd7ee7"}, - {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e7d9cfb5a1e648e172428c7a42b744610956f3b70f524aa3a6c02a448ba853e"}, - {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2d4f905209de70c0eb5b2de6763104d5a9a37430f137678edfb9a675bac9cd98"}, - {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:90adb99e8ce5f36fbecbbc422e7dcbcbed07d985eed6062e459e23f9e71fd197"}, - {file = "pyarrow-15.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:b116e7fd7889294cbd24eb90cd9bdd3850be3738d61297855a71ac3b8124ee38"}, - {file = "pyarrow-15.0.2-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:25335e6f1f07fdaa026a61c758ee7d19ce824a866b27bba744348fa73bb5a440"}, - {file = "pyarrow-15.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:90f19e976d9c3d8e73c80be84ddbe2f830b6304e4c576349d9360e335cd627fc"}, - {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a22366249bf5fd40ddacc4f03cd3160f2d7c247692945afb1899bab8a140ddfb"}, - {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2a335198f886b07e4b5ea16d08ee06557e07db54a8400cc0d03c7f6a22f785f"}, - {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e6d459c0c22f0b9c810a3917a1de3ee704b021a5fb8b3bacf968eece6df098f"}, - {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:033b7cad32198754d93465dcfb71d0ba7cb7cd5c9afd7052cab7214676eec38b"}, - {file = "pyarrow-15.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:29850d050379d6e8b5a693098f4de7fd6a2bea4365bfd073d7c57c57b95041ee"}, - {file = "pyarrow-15.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:7167107d7fb6dcadb375b4b691b7e316f4368f39f6f45405a05535d7ad5e5058"}, - {file = "pyarrow-15.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e85241b44cc3d365ef950432a1b3bd44ac54626f37b2e3a0cc89c20e45dfd8bf"}, - {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:248723e4ed3255fcd73edcecc209744d58a9ca852e4cf3d2577811b6d4b59818"}, - {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ff3bdfe6f1b81ca5b73b70a8d482d37a766433823e0c21e22d1d7dde76ca33f"}, - {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:f3d77463dee7e9f284ef42d341689b459a63ff2e75cee2b9302058d0d98fe142"}, - {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:8c1faf2482fb89766e79745670cbca04e7018497d85be9242d5350cba21357e1"}, - {file = "pyarrow-15.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:28f3016958a8e45a1069303a4a4f6a7d4910643fc08adb1e2e4a7ff056272ad3"}, - {file = "pyarrow-15.0.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:89722cb64286ab3d4daf168386f6968c126057b8c7ec3ef96302e81d8cdb8ae4"}, - {file = "pyarrow-15.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cd0ba387705044b3ac77b1b317165c0498299b08261d8122c96051024f953cd5"}, - {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad2459bf1f22b6a5cdcc27ebfd99307d5526b62d217b984b9f5c974651398832"}, - {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58922e4bfece8b02abf7159f1f53a8f4d9f8e08f2d988109126c17c3bb261f22"}, - {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:adccc81d3dc0478ea0b498807b39a8d41628fa9210729b2f718b78cb997c7c91"}, - {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:8bd2baa5fe531571847983f36a30ddbf65261ef23e496862ece83bdceb70420d"}, - {file = "pyarrow-15.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6669799a1d4ca9da9c7e06ef48368320f5856f36f9a4dd31a11839dda3f6cc8c"}, - {file = "pyarrow-15.0.2.tar.gz", hash = "sha256:9c9bc803cb3b7bfacc1e96ffbfd923601065d9d3f911179d81e72d99fd74a3d9"}, -] - -[package.dependencies] -numpy = ">=1.16.6,<2" - -[[package]] -name = "pydeck" -version = "0.8.0" -description = "Widget for deck.gl maps" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pydeck-0.8.0-py2.py3-none-any.whl", hash = "sha256:a8fa7757c6f24bba033af39db3147cb020eef44012ba7e60d954de187f9ed4d5"}, - {file = "pydeck-0.8.0.tar.gz", hash = "sha256:07edde833f7cfcef6749124351195aa7dcd24663d4909fd7898dbd0b6fbc01ec"}, -] - -[package.dependencies] -jinja2 = ">=2.10.1" -numpy = ">=1.16.4" - -[package.extras] -carto = ["pydeck-carto"] -jupyter = ["ipykernel (>=5.1.2)", "ipython (>=5.8.0)", "ipywidgets (>=7,<8)", "traitlets (>=4.3.2)"] - -[[package]] -name = "pygments" -version = "2.17.2" -description = "Pygments is a syntax highlighting package written in Python." -optional = false -python-versions = ">=3.7" -files = [ - {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, - {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, -] - -[package.extras] -plugins = ["importlib-metadata"] -windows-terminal = ["colorama (>=0.4.6)"] - [[package]] name = "pyproject-api" version = "1.6.1" description = "API to interact with the python pyproject.toml based projects" +category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1323,6 +790,7 @@ testing = ["covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytes name = "pytest" version = "7.4.4" description = "pytest: simple powerful testing with Python" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1345,6 +813,7 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no name = "python-dateutil" version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" +category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ @@ -1359,6 +828,7 @@ six = ">=1.5" name = "pytz" version = "2024.1" description = "World timezone definitions, modern and historical" +category = "main" optional = false python-versions = "*" files = [ @@ -1370,6 +840,7 @@ files = [ name = "pyyaml" version = "6.0.1" description = "YAML parser and emitter for Python" +category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1426,25 +897,11 @@ files = [ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] -[[package]] -name = "referencing" -version = "0.34.0" -description = "JSON Referencing + Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "referencing-0.34.0-py3-none-any.whl", hash = "sha256:d53ae300ceddd3169f1ffa9caf2cb7b769e92657e4fafb23d34b93679116dfd4"}, - {file = "referencing-0.34.0.tar.gz", hash = "sha256:5773bd84ef41799a5a8ca72dc34590c041eb01bf9aa02632b4a973fb0181a844"}, -] - -[package.dependencies] -attrs = ">=22.2.0" -rpds-py = ">=0.7.0" - [[package]] name = "requests" version = "2.31.0" description = "Python HTTP for Humans." +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1466,6 +923,7 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] name = "retrying" version = "1.3.4" description = "Retrying" +category = "main" optional = false python-versions = "*" files = [ @@ -1476,178 +934,11 @@ files = [ [package.dependencies] six = ">=1.7.0" -[[package]] -name = "rich" -version = "13.7.1" -description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"}, - {file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"}, -] - -[package.dependencies] -markdown-it-py = ">=2.2.0" -pygments = ">=2.13.0,<3.0.0" - -[package.extras] -jupyter = ["ipywidgets (>=7.5.1,<9)"] - -[[package]] -name = "rpds-py" -version = "0.18.0" -description = "Python bindings to Rust's persistent data structures (rpds)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "rpds_py-0.18.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:5b4e7d8d6c9b2e8ee2d55c90b59c707ca59bc30058269b3db7b1f8df5763557e"}, - {file = "rpds_py-0.18.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c463ed05f9dfb9baebef68048aed8dcdc94411e4bf3d33a39ba97e271624f8f7"}, - {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01e36a39af54a30f28b73096dd39b6802eddd04c90dbe161c1b8dbe22353189f"}, - {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d62dec4976954a23d7f91f2f4530852b0c7608116c257833922a896101336c51"}, - {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd18772815d5f008fa03d2b9a681ae38d5ae9f0e599f7dda233c439fcaa00d40"}, - {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:923d39efa3cfb7279a0327e337a7958bff00cc447fd07a25cddb0a1cc9a6d2da"}, - {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39514da80f971362f9267c600b6d459bfbbc549cffc2cef8e47474fddc9b45b1"}, - {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a34d557a42aa28bd5c48a023c570219ba2593bcbbb8dc1b98d8cf5d529ab1434"}, - {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:93df1de2f7f7239dc9cc5a4a12408ee1598725036bd2dedadc14d94525192fc3"}, - {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:34b18ba135c687f4dac449aa5157d36e2cbb7c03cbea4ddbd88604e076aa836e"}, - {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c0b5dcf9193625afd8ecc92312d6ed78781c46ecbf39af9ad4681fc9f464af88"}, - {file = "rpds_py-0.18.0-cp310-none-win32.whl", hash = "sha256:c4325ff0442a12113a6379af66978c3fe562f846763287ef66bdc1d57925d337"}, - {file = "rpds_py-0.18.0-cp310-none-win_amd64.whl", hash = "sha256:7223a2a5fe0d217e60a60cdae28d6949140dde9c3bcc714063c5b463065e3d66"}, - {file = "rpds_py-0.18.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3a96e0c6a41dcdba3a0a581bbf6c44bb863f27c541547fb4b9711fd8cf0ffad4"}, - {file = "rpds_py-0.18.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30f43887bbae0d49113cbaab729a112251a940e9b274536613097ab8b4899cf6"}, - {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fcb25daa9219b4cf3a0ab24b0eb9a5cc8949ed4dc72acb8fa16b7e1681aa3c58"}, - {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d68c93e381010662ab873fea609bf6c0f428b6d0bb00f2c6939782e0818d37bf"}, - {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b34b7aa8b261c1dbf7720b5d6f01f38243e9b9daf7e6b8bc1fd4657000062f2c"}, - {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e6d75ab12b0bbab7215e5d40f1e5b738aa539598db27ef83b2ec46747df90e1"}, - {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b8612cd233543a3781bc659c731b9d607de65890085098986dfd573fc2befe5"}, - {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aec493917dd45e3c69d00a8874e7cbed844efd935595ef78a0f25f14312e33c6"}, - {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:661d25cbffaf8cc42e971dd570d87cb29a665f49f4abe1f9e76be9a5182c4688"}, - {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1df3659d26f539ac74fb3b0c481cdf9d725386e3552c6fa2974f4d33d78e544b"}, - {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a1ce3ba137ed54f83e56fb983a5859a27d43a40188ba798993812fed73c70836"}, - {file = "rpds_py-0.18.0-cp311-none-win32.whl", hash = "sha256:69e64831e22a6b377772e7fb337533c365085b31619005802a79242fee620bc1"}, - {file = "rpds_py-0.18.0-cp311-none-win_amd64.whl", hash = "sha256:998e33ad22dc7ec7e030b3df701c43630b5bc0d8fbc2267653577e3fec279afa"}, - {file = "rpds_py-0.18.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7f2facbd386dd60cbbf1a794181e6aa0bd429bd78bfdf775436020172e2a23f0"}, - {file = "rpds_py-0.18.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1d9a5be316c15ffb2b3c405c4ff14448c36b4435be062a7f578ccd8b01f0c4d8"}, - {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd5bf1af8efe569654bbef5a3e0a56eca45f87cfcffab31dd8dde70da5982475"}, - {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5417558f6887e9b6b65b4527232553c139b57ec42c64570569b155262ac0754f"}, - {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:56a737287efecafc16f6d067c2ea0117abadcd078d58721f967952db329a3e5c"}, - {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8f03bccbd8586e9dd37219bce4d4e0d3ab492e6b3b533e973fa08a112cb2ffc9"}, - {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4457a94da0d5c53dc4b3e4de1158bdab077db23c53232f37a3cb7afdb053a4e3"}, - {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0ab39c1ba9023914297dd88ec3b3b3c3f33671baeb6acf82ad7ce883f6e8e157"}, - {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9d54553c1136b50fd12cc17e5b11ad07374c316df307e4cfd6441bea5fb68496"}, - {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0af039631b6de0397ab2ba16eaf2872e9f8fca391b44d3d8cac317860a700a3f"}, - {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:84ffab12db93b5f6bad84c712c92060a2d321b35c3c9960b43d08d0f639d60d7"}, - {file = "rpds_py-0.18.0-cp312-none-win32.whl", hash = "sha256:685537e07897f173abcf67258bee3c05c374fa6fff89d4c7e42fb391b0605e98"}, - {file = "rpds_py-0.18.0-cp312-none-win_amd64.whl", hash = "sha256:e003b002ec72c8d5a3e3da2989c7d6065b47d9eaa70cd8808b5384fbb970f4ec"}, - {file = "rpds_py-0.18.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:08f9ad53c3f31dfb4baa00da22f1e862900f45908383c062c27628754af2e88e"}, - {file = "rpds_py-0.18.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0013fe6b46aa496a6749c77e00a3eb07952832ad6166bd481c74bda0dcb6d58"}, - {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e32a92116d4f2a80b629778280103d2a510a5b3f6314ceccd6e38006b5e92dcb"}, - {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e541ec6f2ec456934fd279a3120f856cd0aedd209fc3852eca563f81738f6861"}, - {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bed88b9a458e354014d662d47e7a5baafd7ff81c780fd91584a10d6ec842cb73"}, - {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2644e47de560eb7bd55c20fc59f6daa04682655c58d08185a9b95c1970fa1e07"}, - {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e8916ae4c720529e18afa0b879473049e95949bf97042e938530e072fde061d"}, - {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:465a3eb5659338cf2a9243e50ad9b2296fa15061736d6e26240e713522b6235c"}, - {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ea7d4a99f3b38c37eac212dbd6ec42b7a5ec51e2c74b5d3223e43c811609e65f"}, - {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:67071a6171e92b6da534b8ae326505f7c18022c6f19072a81dcf40db2638767c"}, - {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:41ef53e7c58aa4ef281da975f62c258950f54b76ec8e45941e93a3d1d8580594"}, - {file = "rpds_py-0.18.0-cp38-none-win32.whl", hash = "sha256:fdea4952db2793c4ad0bdccd27c1d8fdd1423a92f04598bc39425bcc2b8ee46e"}, - {file = "rpds_py-0.18.0-cp38-none-win_amd64.whl", hash = "sha256:7cd863afe7336c62ec78d7d1349a2f34c007a3cc6c2369d667c65aeec412a5b1"}, - {file = "rpds_py-0.18.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5307def11a35f5ae4581a0b658b0af8178c65c530e94893345bebf41cc139d33"}, - {file = "rpds_py-0.18.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f195baa60a54ef9d2de16fbbfd3ff8b04edc0c0140a761b56c267ac11aa467"}, - {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39f5441553f1c2aed4de4377178ad8ff8f9d733723d6c66d983d75341de265ab"}, - {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a00312dea9310d4cb7dbd7787e722d2e86a95c2db92fbd7d0155f97127bcb40"}, - {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f2fc11e8fe034ee3c34d316d0ad8808f45bc3b9ce5857ff29d513f3ff2923a1"}, - {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:586f8204935b9ec884500498ccc91aa869fc652c40c093bd9e1471fbcc25c022"}, - {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddc2f4dfd396c7bfa18e6ce371cba60e4cf9d2e5cdb71376aa2da264605b60b9"}, - {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ddcba87675b6d509139d1b521e0c8250e967e63b5909a7e8f8944d0f90ff36f"}, - {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7bd339195d84439cbe5771546fe8a4e8a7a045417d8f9de9a368c434e42a721e"}, - {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:d7c36232a90d4755b720fbd76739d8891732b18cf240a9c645d75f00639a9024"}, - {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6b0817e34942b2ca527b0e9298373e7cc75f429e8da2055607f4931fded23e20"}, - {file = "rpds_py-0.18.0-cp39-none-win32.whl", hash = "sha256:99f70b740dc04d09e6b2699b675874367885217a2e9f782bdf5395632ac663b7"}, - {file = "rpds_py-0.18.0-cp39-none-win_amd64.whl", hash = "sha256:6ef687afab047554a2d366e112dd187b62d261d49eb79b77e386f94644363294"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ad36cfb355e24f1bd37cac88c112cd7730873f20fb0bdaf8ba59eedf8216079f"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:36b3ee798c58ace201289024b52788161e1ea133e4ac93fba7d49da5fec0ef9e"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8a2f084546cc59ea99fda8e070be2fd140c3092dc11524a71aa8f0f3d5a55ca"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e4461d0f003a0aa9be2bdd1b798a041f177189c1a0f7619fe8c95ad08d9a45d7"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8db715ebe3bb7d86d77ac1826f7d67ec11a70dbd2376b7cc214199360517b641"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:793968759cd0d96cac1e367afd70c235867831983f876a53389ad869b043c948"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66e6a3af5a75363d2c9a48b07cb27c4ea542938b1a2e93b15a503cdfa8490795"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6ef0befbb5d79cf32d0266f5cff01545602344eda89480e1dd88aca964260b18"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d4acf42190d449d5e89654d5c1ed3a4f17925eec71f05e2a41414689cda02d1"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:a5f446dd5055667aabaee78487f2b5ab72e244f9bc0b2ffebfeec79051679984"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9dbbeb27f4e70bfd9eec1be5477517365afe05a9b2c441a0b21929ee61048124"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:22806714311a69fd0af9b35b7be97c18a0fc2826e6827dbb3a8c94eac6cf7eeb"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b34ae4636dfc4e76a438ab826a0d1eed2589ca7d9a1b2d5bb546978ac6485461"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c8370641f1a7f0e0669ddccca22f1da893cef7628396431eb445d46d893e5cd"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c8362467a0fdeccd47935f22c256bec5e6abe543bf0d66e3d3d57a8fb5731863"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11a8c85ef4a07a7638180bf04fe189d12757c696eb41f310d2426895356dcf05"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b316144e85316da2723f9d8dc75bada12fa58489a527091fa1d5a612643d1a0e"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf1ea2e34868f6fbf070e1af291c8180480310173de0b0c43fc38a02929fc0e3"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e546e768d08ad55b20b11dbb78a745151acbd938f8f00d0cfbabe8b0199b9880"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4901165d170a5fde6f589acb90a6b33629ad1ec976d4529e769c6f3d885e3e80"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:618a3d6cae6ef8ec88bb76dd80b83cfe415ad4f1d942ca2a903bf6b6ff97a2da"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ed4eb745efbff0a8e9587d22a84be94a5eb7d2d99c02dacf7bd0911713ed14dd"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c81e5f372cd0dc5dc4809553d34f832f60a46034a5f187756d9b90586c2c307"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:43fbac5f22e25bee1d482c97474f930a353542855f05c1161fd804c9dc74a09d"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d7faa6f14017c0b1e69f5e2c357b998731ea75a442ab3841c0dbbbfe902d2c4"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:08231ac30a842bd04daabc4d71fddd7e6d26189406d5a69535638e4dcb88fe76"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:044a3e61a7c2dafacae99d1e722cc2d4c05280790ec5a05031b3876809d89a5c"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f26b5bd1079acdb0c7a5645e350fe54d16b17bfc5e71f371c449383d3342e17"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:482103aed1dfe2f3b71a58eff35ba105289b8d862551ea576bd15479aba01f66"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1374f4129f9bcca53a1bba0bb86bf78325a0374577cf7e9e4cd046b1e6f20e24"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:635dc434ff724b178cb192c70016cc0ad25a275228f749ee0daf0eddbc8183b1"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:bc362ee4e314870a70f4ae88772d72d877246537d9f8cb8f7eacf10884862432"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:4832d7d380477521a8c1644bbab6588dfedea5e30a7d967b5fb75977c45fd77f"}, - {file = "rpds_py-0.18.0.tar.gz", hash = "sha256:42821446ee7a76f5d9f71f9e33a4fb2ffd724bb3e7f93386150b61a43115788d"}, -] - -[[package]] -name = "scipy" -version = "1.13.0" -description = "Fundamental algorithms for scientific computing in Python" -optional = false -python-versions = ">=3.9" -files = [ - {file = "scipy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ba419578ab343a4e0a77c0ef82f088238a93eef141b2b8017e46149776dfad4d"}, - {file = "scipy-1.13.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:22789b56a999265431c417d462e5b7f2b487e831ca7bef5edeb56efe4c93f86e"}, - {file = "scipy-1.13.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05f1432ba070e90d42d7fd836462c50bf98bd08bed0aa616c359eed8a04e3922"}, - {file = "scipy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8434f6f3fa49f631fae84afee424e2483289dfc30a47755b4b4e6b07b2633a4"}, - {file = "scipy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:dcbb9ea49b0167de4167c40eeee6e167caeef11effb0670b554d10b1e693a8b9"}, - {file = "scipy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:1d2f7bb14c178f8b13ebae93f67e42b0a6b0fc50eba1cd8021c9b6e08e8fb1cd"}, - {file = "scipy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fbcf8abaf5aa2dc8d6400566c1a727aed338b5fe880cde64907596a89d576fa"}, - {file = "scipy-1.13.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5e4a756355522eb60fcd61f8372ac2549073c8788f6114449b37e9e8104f15a5"}, - {file = "scipy-1.13.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5acd8e1dbd8dbe38d0004b1497019b2dbbc3d70691e65d69615f8a7292865d7"}, - {file = "scipy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ff7dad5d24a8045d836671e082a490848e8639cabb3dbdacb29f943a678683d"}, - {file = "scipy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4dca18c3ffee287ddd3bc8f1dabaf45f5305c5afc9f8ab9cbfab855e70b2df5c"}, - {file = "scipy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:a2f471de4d01200718b2b8927f7d76b5d9bde18047ea0fa8bd15c5ba3f26a1d6"}, - {file = "scipy-1.13.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d0de696f589681c2802f9090fff730c218f7c51ff49bf252b6a97ec4a5d19e8b"}, - {file = "scipy-1.13.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:b2a3ff461ec4756b7e8e42e1c681077349a038f0686132d623fa404c0bee2551"}, - {file = "scipy-1.13.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bf9fe63e7a4bf01d3645b13ff2aa6dea023d38993f42aaac81a18b1bda7a82a"}, - {file = "scipy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e7626dfd91cdea5714f343ce1176b6c4745155d234f1033584154f60ef1ff42"}, - {file = "scipy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:109d391d720fcebf2fbe008621952b08e52907cf4c8c7efc7376822151820820"}, - {file = "scipy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:8930ae3ea371d6b91c203b1032b9600d69c568e537b7988a3073dfe4d4774f21"}, - {file = "scipy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5407708195cb38d70fd2d6bb04b1b9dd5c92297d86e9f9daae1576bd9e06f602"}, - {file = "scipy-1.13.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:ac38c4c92951ac0f729c4c48c9e13eb3675d9986cc0c83943784d7390d540c78"}, - {file = "scipy-1.13.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09c74543c4fbeb67af6ce457f6a6a28e5d3739a87f62412e4a16e46f164f0ae5"}, - {file = "scipy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28e286bf9ac422d6beb559bc61312c348ca9b0f0dae0d7c5afde7f722d6ea13d"}, - {file = "scipy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:33fde20efc380bd23a78a4d26d59fc8704e9b5fd9b08841693eb46716ba13d86"}, - {file = "scipy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:45c08bec71d3546d606989ba6e7daa6f0992918171e2a6f7fbedfa7361c2de1e"}, - {file = "scipy-1.13.0.tar.gz", hash = "sha256:58569af537ea29d3f78e5abd18398459f195546bb3be23d16677fb26616cc11e"}, -] - -[package.dependencies] -numpy = ">=1.22.4,<2.3" - -[package.extras] -dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] -doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.12.0)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0)", "sphinx-design (>=0.4.0)"] -test = ["array-api-strict", "asv", "gmpy2", "hypothesis (>=6.30)", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] - [[package]] name = "setuptools" version = "69.0.3" description = "Easily download, build, install, upgrade, and uninstall Python packages" +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1660,68 +951,11 @@ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] -[[package]] -name = "shapely" -version = "2.0.3" -description = "Manipulation and analysis of geometric objects" -category = "main" -optional = false -python-versions = ">=3.7" -files = [ - {file = "shapely-2.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:af7e9abe180b189431b0f490638281b43b84a33a960620e6b2e8d3e3458b61a1"}, - {file = "shapely-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:98040462b36ced9671e266b95c326b97f41290d9d17504a1ee4dc313a7667b9c"}, - {file = "shapely-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:71eb736ef2843f23473c6e37f6180f90f0a35d740ab284321548edf4e55d9a52"}, - {file = "shapely-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:881eb9dbbb4a6419667e91fcb20313bfc1e67f53dbb392c6840ff04793571ed1"}, - {file = "shapely-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f10d2ccf0554fc0e39fad5886c839e47e207f99fdf09547bc687a2330efda35b"}, - {file = "shapely-2.0.3-cp310-cp310-win32.whl", hash = "sha256:6dfdc077a6fcaf74d3eab23a1ace5abc50c8bce56ac7747d25eab582c5a2990e"}, - {file = "shapely-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:64c5013dacd2d81b3bb12672098a0b2795c1bf8190cfc2980e380f5ef9d9e4d9"}, - {file = "shapely-2.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:56cee3e4e8159d6f2ce32e421445b8e23154fd02a0ac271d6a6c0b266a8e3cce"}, - {file = "shapely-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:619232c8276fded09527d2a9fd91a7885ff95c0ff9ecd5e3cb1e34fbb676e2ae"}, - {file = "shapely-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2a7d256db6f5b4b407dc0c98dd1b2fcf1c9c5814af9416e5498d0a2e4307a4b"}, - {file = "shapely-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45f0c8cd4583647db3216d965d49363e6548c300c23fd7e57ce17a03f824034"}, - {file = "shapely-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13cb37d3826972a82748a450328fe02a931dcaed10e69a4d83cc20ba021bc85f"}, - {file = "shapely-2.0.3-cp311-cp311-win32.whl", hash = "sha256:9302d7011e3e376d25acd30d2d9e70d315d93f03cc748784af19b00988fc30b1"}, - {file = "shapely-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6b464f2666b13902835f201f50e835f2f153f37741db88f68c7f3b932d3505fa"}, - {file = "shapely-2.0.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e86e7cb8e331a4850e0c2a8b2d66dc08d7a7b301b8d1d34a13060e3a5b4b3b55"}, - {file = "shapely-2.0.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c91981c99ade980fc49e41a544629751a0ccd769f39794ae913e53b07b2f78b9"}, - {file = "shapely-2.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bd45d456983dc60a42c4db437496d3f08a4201fbf662b69779f535eb969660af"}, - {file = "shapely-2.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:882fb1ffc7577e88c1194f4f1757e277dc484ba096a3b94844319873d14b0f2d"}, - {file = "shapely-2.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9f2d93bff2ea52fa93245798cddb479766a18510ea9b93a4fb9755c79474889"}, - {file = "shapely-2.0.3-cp312-cp312-win32.whl", hash = "sha256:99abad1fd1303b35d991703432c9481e3242b7b3a393c186cfb02373bf604004"}, - {file = "shapely-2.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:6f555fe3304a1f40398977789bc4fe3c28a11173196df9ece1e15c5bc75a48db"}, - {file = "shapely-2.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a983cc418c1fa160b7d797cfef0e0c9f8c6d5871e83eae2c5793fce6a837fad9"}, - {file = "shapely-2.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18bddb8c327f392189a8d5d6b9a858945722d0bb95ccbd6a077b8e8fc4c7890d"}, - {file = "shapely-2.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:442f4dcf1eb58c5a4e3428d88e988ae153f97ab69a9f24e07bf4af8038536325"}, - {file = "shapely-2.0.3-cp37-cp37m-win32.whl", hash = "sha256:31a40b6e3ab00a4fd3a1d44efb2482278642572b8e0451abdc8e0634b787173e"}, - {file = "shapely-2.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:59b16976c2473fec85ce65cc9239bef97d4205ab3acead4e6cdcc72aee535679"}, - {file = "shapely-2.0.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:705efbce1950a31a55b1daa9c6ae1c34f1296de71ca8427974ec2f27d57554e3"}, - {file = "shapely-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:601c5c0058a6192df704cb889439f64994708563f57f99574798721e9777a44b"}, - {file = "shapely-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f24ecbb90a45c962b3b60d8d9a387272ed50dc010bfe605f1d16dfc94772d8a1"}, - {file = "shapely-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8c2a2989222c6062f7a0656e16276c01bb308bc7e5d999e54bf4e294ce62e76"}, - {file = "shapely-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42bceb9bceb3710a774ce04908fda0f28b291323da2688f928b3f213373b5aee"}, - {file = "shapely-2.0.3-cp38-cp38-win32.whl", hash = "sha256:54d925c9a311e4d109ec25f6a54a8bd92cc03481a34ae1a6a92c1fe6729b7e01"}, - {file = "shapely-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:300d203b480a4589adefff4c4af0b13919cd6d760ba3cbb1e56275210f96f654"}, - {file = "shapely-2.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:083d026e97b6c1f4a9bd2a9171c7692461092ed5375218170d91705550eecfd5"}, - {file = "shapely-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:27b6e1910094d93e9627f2664121e0e35613262fc037051680a08270f6058daf"}, - {file = "shapely-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:71b2de56a9e8c0e5920ae5ddb23b923490557ac50cb0b7fa752761bf4851acde"}, - {file = "shapely-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d279e56bbb68d218d63f3efc80c819cedcceef0e64efbf058a1df89dc57201b"}, - {file = "shapely-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88566d01a30f0453f7d038db46bc83ce125e38e47c5f6bfd4c9c287010e9bf74"}, - {file = "shapely-2.0.3-cp39-cp39-win32.whl", hash = "sha256:58afbba12c42c6ed44c4270bc0e22f3dadff5656d711b0ad335c315e02d04707"}, - {file = "shapely-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:5026b30433a70911979d390009261b8c4021ff87c7c3cbd825e62bb2ffa181bc"}, - {file = "shapely-2.0.3.tar.gz", hash = "sha256:4d65d0aa7910af71efa72fd6447e02a8e5dd44da81a983de9d736d6e6ccbe674"}, -] - -[package.dependencies] -numpy = ">=1.14,<2" - -[package.extras] -docs = ["matplotlib", "numpydoc (>=1.1.0,<1.2.0)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] -test = ["pytest", "pytest-cov"] - [[package]] name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" +category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -1729,123 +963,11 @@ files = [ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] -[[package]] -name = "smmap" -version = "5.0.1" -description = "A pure Python implementation of a sliding window memory map manager" -optional = false -python-versions = ">=3.7" -files = [ - {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, - {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, -] - -[[package]] -name = "statsmodels" -version = "0.14.1" -description = "Statistical computations and models for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "statsmodels-0.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:43af9c0b07c9d72f275cf14ea54a481a3f20911f0b443181be4769def258fdeb"}, - {file = "statsmodels-0.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a16975ab6ad505d837ba9aee11f92a8c5b49c4fa1ff45b60fe23780b19e5705e"}, - {file = "statsmodels-0.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e278fe74da5ed5e06c11a30851eda1af08ef5af6be8507c2c45d2e08f7550dde"}, - {file = "statsmodels-0.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0564d92cb05b219b4538ed09e77d96658a924a691255e1f7dd23ee338df441b"}, - {file = "statsmodels-0.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5385e22e72159a09c099c4fb975f350a9f3afeb57c1efce273b89dcf1fe44c0f"}, - {file = "statsmodels-0.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:0a8aae75a2e08ebd990e5fa394f8e32738b55785cb70798449a3f4207085e667"}, - {file = "statsmodels-0.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b69a63ad6c979a6e4cde11870ffa727c76a318c225a7e509f031fbbdfb4e416a"}, - {file = "statsmodels-0.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7562cb18a90a114f39fab6f1c25b9c7b39d9cd5f433d0044b430ca9d44a8b52c"}, - {file = "statsmodels-0.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3abaca4b963259a2bf349c7609cfbb0ce64ad5fb3d92d6f08e21453e4890248"}, - {file = "statsmodels-0.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0f727fe697f6406d5f677b67211abe5a55101896abdfacdb3f38410405f6ad8"}, - {file = "statsmodels-0.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b6838ac6bdb286daabb5e91af90fd4258f09d0cec9aace78cc441cb2b17df428"}, - {file = "statsmodels-0.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:709bfcef2dbe66f705b17e56d1021abad02243ee1a5d1efdb90f9bad8b06a329"}, - {file = "statsmodels-0.14.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f32a7cd424cf33304a54daee39d32cccf1d0265e652c920adeaeedff6d576457"}, - {file = "statsmodels-0.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f8c30181c084173d662aaf0531867667be2ff1bee103b84feb64f149f792dbd2"}, - {file = "statsmodels-0.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de2b97413913d52ad6342dece2d653e77f78620013b7705fad291d4e4266ccb"}, - {file = "statsmodels-0.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3420f88289c593ba2bca33619023059c476674c160733bd7d858564787c83d3"}, - {file = "statsmodels-0.14.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c008e16096f24f0514e53907890ccac6589a16ad6c81c218f2ee6752fdada555"}, - {file = "statsmodels-0.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:bc0351d279c4e080f0ce638a3d886d312aa29eade96042e3ba0a73771b1abdfb"}, - {file = "statsmodels-0.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf293ada63b2859d95210165ad1dfcd97bd7b994a5266d6fbeb23659d8f0bf68"}, - {file = "statsmodels-0.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44ca8cb88fa3d3a4ffaff1fb8eb0e98bbf83fc936fcd9b9eedee258ecc76696a"}, - {file = "statsmodels-0.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d5373d176239993c095b00d06036690a50309a4e00c2da553b65b840f956ae6"}, - {file = "statsmodels-0.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a532dfe899f8b6632cd8caa0b089b403415618f51e840d1817a1e4b97e200c73"}, - {file = "statsmodels-0.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:4fe0a60695952b82139ae8750952786a700292f9e0551d572d7685070944487b"}, - {file = "statsmodels-0.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:04293890f153ffe577e60a227bd43babd5f6c1fc50ea56a3ab1862ae85247a95"}, - {file = "statsmodels-0.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e70a2e93d54d40b2cb6426072acbc04f35501b1ea2569f6786964adde6ca572"}, - {file = "statsmodels-0.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab3a73d16c0569adbba181ebb967e5baaa74935f6d2efe86ac6fc5857449b07d"}, - {file = "statsmodels-0.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eefa5bcff335440ee93e28745eab63559a20cd34eea0375c66d96b016de909b3"}, - {file = "statsmodels-0.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:bc43765710099ca6a942b5ffa1bac7668965052542ba793dd072d26c83453572"}, - {file = "statsmodels-0.14.1.tar.gz", hash = "sha256:2260efdc1ef89f39c670a0bd8151b1d0843567781bcafec6cda0534eb47a94f6"}, -] - -[package.dependencies] -numpy = [ - {version = ">=1.22.3,<2", markers = "python_version == \"3.10\" and platform_system == \"Windows\" and platform_python_implementation != \"PyPy\""}, - {version = ">=1.18,<2", markers = "python_version != \"3.10\" or platform_system != \"Windows\" or platform_python_implementation == \"PyPy\""}, -] -packaging = ">=21.3" -pandas = ">=1.0,<2.1.0 || >2.1.0" -patsy = ">=0.5.4" -scipy = ">=1.4,<1.9.2 || >1.9.2" - -[package.extras] -build = ["cython (>=0.29.33)"] -develop = ["colorama", "cython (>=0.29.33)", "cython (>=0.29.33,<4.0.0)", "flake8", "isort", "joblib", "matplotlib (>=3)", "oldest-supported-numpy (>=2022.4.18)", "pytest (>=7.3.0)", "pytest-cov", "pytest-randomly", "pytest-xdist", "pywinpty", "setuptools-scm[toml] (>=8.0,<9.0)"] -docs = ["ipykernel", "jupyter-client", "matplotlib", "nbconvert", "nbformat", "numpydoc", "pandas-datareader", "sphinx"] - -[[package]] -name = "streamlit" -version = "1.32.2" -description = "A faster way to build and share data apps" -optional = false -python-versions = ">=3.8, !=3.9.7" -files = [ - {file = "streamlit-1.32.2-py2.py3-none-any.whl", hash = "sha256:a0b8044e76fec364b07be145f8b40dbd8d083e20ebbb189ceb1fa9423f3dedea"}, - {file = "streamlit-1.32.2.tar.gz", hash = "sha256:1258b9cbc3ff957bf7d09b1bfc85cedc308f1065b30748545295a9af8d5577ab"}, -] - -[package.dependencies] -altair = ">=4.0,<6" -blinker = ">=1.0.0,<2" -cachetools = ">=4.0,<6" -click = ">=7.0,<9" -gitpython = ">=3.0.7,<3.1.19 || >3.1.19,<4" -numpy = ">=1.19.3,<2" -packaging = ">=16.8,<24" -pandas = ">=1.3.0,<3" -pillow = ">=7.1.0,<11" -protobuf = ">=3.20,<5" -pyarrow = ">=7.0" -pydeck = ">=0.8.0b4,<1" -requests = ">=2.27,<3" -rich = ">=10.14.0,<14" -tenacity = ">=8.1.0,<9" -toml = ">=0.10.1,<2" -tornado = ">=6.0.3,<7" -typing-extensions = ">=4.3.0,<5" -watchdog = {version = ">=2.1.5", markers = "platform_system != \"Darwin\""} - -[package.extras] -snowflake = ["snowflake-connector-python (>=2.8.0)", "snowflake-snowpark-python (>=0.9.0)"] - -[[package]] -name = "streamlit-dynamic-filters" -version = "0.1.6" -description = "Dynamic multiselect filters for Streamlit" -optional = false -python-versions = "*" -files = [ - {file = "streamlit_dynamic_filters-0.1.6-py3-none-any.whl", hash = "sha256:882f213dd3b846704a894c8e31271f0401775334f979a9e4e492a85035179d56"}, - {file = "streamlit_dynamic_filters-0.1.6.tar.gz", hash = "sha256:3d4f53007bf281c846477a2d9f202e61bb97c19c5c43d3dadab75019133c28f2"}, -] - -[package.dependencies] -streamlit = "*" - [[package]] name = "tenacity" version = "8.2.3" description = "Retry code until it succeeds" +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1856,21 +978,11 @@ files = [ [package.extras] doc = ["reno", "sphinx", "tornado (>=4.5)"] -[[package]] -name = "toml" -version = "0.10.2" -description = "Python Library for Tom's Obvious, Minimal Language" -optional = false -python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, - {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, -] - [[package]] name = "tomli" version = "2.0.1" description = "A lil' TOML parser" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1878,41 +990,11 @@ files = [ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] -[[package]] -name = "toolz" -version = "0.12.1" -description = "List processing tools and functional utilities" -optional = false -python-versions = ">=3.7" -files = [ - {file = "toolz-0.12.1-py3-none-any.whl", hash = "sha256:d22731364c07d72eea0a0ad45bafb2c2937ab6fd38a3507bf55eae8744aa7d85"}, - {file = "toolz-0.12.1.tar.gz", hash = "sha256:ecca342664893f177a13dac0e6b41cbd8ac25a358e5f215316d43e2100224f4d"}, -] - -[[package]] -name = "tornado" -version = "6.4" -description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -optional = false -python-versions = ">= 3.8" -files = [ - {file = "tornado-6.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0"}, - {file = "tornado-6.4-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263"}, - {file = "tornado-6.4-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e"}, - {file = "tornado-6.4-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579"}, - {file = "tornado-6.4-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212"}, - {file = "tornado-6.4-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2"}, - {file = "tornado-6.4-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78"}, - {file = "tornado-6.4-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f"}, - {file = "tornado-6.4-cp38-abi3-win32.whl", hash = "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052"}, - {file = "tornado-6.4-cp38-abi3-win_amd64.whl", hash = "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63"}, - {file = "tornado-6.4.tar.gz", hash = "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee"}, -] - [[package]] name = "tox" version = "4.12.1" description = "tox is a generic virtualenv management and test command line tool" +category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1940,6 +1022,7 @@ testing = ["build[virtualenv] (>=1.0.3)", "covdefaults (>=2.3)", "detect-test-po name = "typing-extensions" version = "4.10.0" description = "Backported and Experimental Type Hints for Python 3.8+" +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1951,6 +1034,7 @@ files = [ name = "tzdata" version = "2024.1" description = "Provider of IANA time zone data" +category = "main" optional = false python-versions = ">=2" files = [ @@ -1962,6 +1046,7 @@ files = [ name = "urllib3" version = "2.2.1" description = "HTTP library with thread-safe connection pooling, file post, and more." +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1979,6 +1064,7 @@ zstd = ["zstandard (>=0.18.0)"] name = "virtualenv" version = "20.25.0" description = "Virtual Python Environment builder" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1995,51 +1081,11 @@ platformdirs = ">=3.9.1,<5" docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] -[[package]] -name = "watchdog" -version = "4.0.0" -description = "Filesystem events monitoring" -optional = false -python-versions = ">=3.8" -files = [ - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, - {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, - {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, - {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, - {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, - {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, - {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, - {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, -] - -[package.extras] -watchmedo = ["PyYAML (>=3.10)"] - [[package]] name = "werkzeug" version = "3.0.1" description = "The comprehensive WSGI web application library." +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -2053,29 +1099,11 @@ MarkupSafe = ">=2.1.1" [package.extras] watchdog = ["watchdog (>=2.3)"] -[[package]] -name = "xyzservices" -version = "2024.4.0" -description = "Source of XYZ tiles providers" -category = "main" -optional = false -python-versions = ">=3.8" -files = [ - {file = "xyzservices-2024.4.0-py3-none-any.whl", hash = "sha256:b83e48c5b776c9969fffcfff57b03d02b1b1cd6607a9d9c4e7f568b01ef47f4c"}, - {file = "xyzservices-2024.4.0.tar.gz", hash = "sha256:6a04f11487a6fb77d92a98984cd107fbd9157fd5e65f929add9c3d6e604ee88c"}, -version = "2023.10.1" -description = "Source of XYZ tiles providers" -optional = false -python-versions = ">=3.8" -files = [ - {file = "xyzservices-2023.10.1-py3-none-any.whl", hash = "sha256:6a4c38d3a9f89d3e77153eff9414b36a8ee0850c9e8b85796fd1b2a85b8dfd68"}, - {file = "xyzservices-2023.10.1.tar.gz", hash = "sha256:091229269043bc8258042edbedad4fcb44684b0473ede027b5672ad40dc9fa02"}, -] - [[package]] name = "zipp" version = "3.18.1" description = "Backport of pathlib-compatible object wrapper for zip files" +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -2090,4 +1118,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "d12b7177519b2078c58a3688cb984d42a5e65a8d6ff60a9ad83343b43641a566" \ No newline at end of file +content-hash = "a604d3b769ffc5079bf789d1557a112f77a5fbf91071732ab27de41caf356da8" diff --git a/pyproject.toml b/pyproject.toml index f5d7dad..3476549 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,9 +16,9 @@ python = "^3.10" # pandas = "^1.1.1" # jupyter = "^1.0.0" # ipykernel = "^5.3.4" -pandas = "^2.2.1" +pandas = "2.0.3" dash = "^2.16.1" -duckdb = "^0.10.1" +duckdb = "0.10.0" geopandas = "^0.14.3" folium = "^0.16.0" streamlit = "^1.32.2" From 301fc1a5e9e795ad311a7e01f1060e62b258560a Mon Sep 17 00:00:00 2001 From: Mendi33 Date: Thu, 20 Jun 2024 18:55:06 +0000 Subject: [PATCH 141/147] upload hotspot.py from data --- dashboards/app/pages/hotspots.py | 100 +++++++++++++++++++------------ 1 file changed, 61 insertions(+), 39 deletions(-) diff --git a/dashboards/app/pages/hotspots.py b/dashboards/app/pages/hotspots.py index 6c4ae3f..95eac35 100644 --- a/dashboards/app/pages/hotspots.py +++ b/dashboards/app/pages/hotspots.py @@ -432,25 +432,30 @@ def calculate_and_display_metrics(data, indicator_col1, indicator_col2, indicato def couleur_milieu(type): return couleur.get(type, "white") # Returns 'white' if the type is not found + def update_lieu_options(selected_milieu): if selected_milieu and selected_milieu != "Sélectionnez un milieu...": - filtered_data = data_zds[data_zds['TYPE_MILIEU'] == selected_milieu] - return ["Sélectionnez un lieu..."] + list(filtered_data['TYPE_LIEU2'].dropna().unique()) + filtered_data = data_zds[data_zds["TYPE_MILIEU"] == selected_milieu] + return ["Sélectionnez un lieu..."] + list( + filtered_data["TYPE_LIEU2"].dropna().unique() + ) return ["Sélectionnez un lieu..."] + @st.cache_data def process_data(data_zds): # Filtering data to ensure surface area is not zero - data_zds = data_zds[data_zds['SURFACE'] > 0] + data_zds = data_zds[data_zds["SURFACE"] > 0] # Calculating density and filtering out anomalous values - data_zds['DENSITE'] = data_zds['VOLUME_TOTAL'] / data_zds['SURFACE'] - data_zds = data_zds[data_zds['DENSITE'] < 20] + data_zds["DENSITE"] = data_zds["VOLUME_TOTAL"] / data_zds["SURFACE"] + data_zds = data_zds[data_zds["DENSITE"] < 20] # Rounding values for better display - data_zds['DENSITE'] = data_zds['DENSITE'].round(4) - data_zds['SURFACE_ROND'] = data_zds['SURFACE'].round(2) + data_zds["DENSITE"] = data_zds["DENSITE"].round(4) + data_zds["SURFACE_ROND"] = data_zds["SURFACE"].round(2) return data_zds -#Zoom from admin level + +# Zoom from admin level if NIVEAU_ADMIN == "Commune": zoom_admin = 12 elif NIVEAU_ADMIN == "EPCI": @@ -469,14 +474,16 @@ def plot_density_map(data_zds: pd.DataFrame, filtered_data: pd.DataFrame) -> fol else: # Use processed data - processed_data = process_data(filtered_data if not filtered_data.empty else data_zds) + processed_data = process_data( + filtered_data if not filtered_data.empty else data_zds + ) m = folium.Map( location=[ - processed_data['LIEU_COORD_GPS_Y'].mean(), - processed_data['LIEU_COORD_GPS_X'].mean() + processed_data["LIEU_COORD_GPS_Y"].mean(), + processed_data["LIEU_COORD_GPS_X"].mean(), ], - zoom_start=zoom_admin + zoom_start=zoom_admin, ) # Loop over each row in the DataFrame to place markers @@ -491,26 +498,25 @@ def plot_density_map(data_zds: pd.DataFrame, filtered_data: pd.DataFrame) -> fol """ lgd_txt = '{txt}' - color = couleur_milieu(row['TYPE_MILIEU']) + color = couleur_milieu(row["TYPE_MILIEU"]) folium.CircleMarker( - fg = folium.FeatureGroup(name= lgd_txt.format( txt= ['TYPE_MILIEU'], col= color)), - location=[row['LIEU_COORD_GPS_Y'], row['LIEU_COORD_GPS_X']], - radius=np.log(row['DENSITE'] + 1)*15, + fg=folium.FeatureGroup( + name=lgd_txt.format(txt=["TYPE_MILIEU"], col=color) + ), + location=[row["LIEU_COORD_GPS_Y"], row["LIEU_COORD_GPS_X"]], + radius=np.log(row["DENSITE"] + 1) * 15, popup=folium.Popup(popup_html, max_width=300), color=color, fill=True, - ).add_to(m) folium_static(m) return m + # Function for 'milieu' density table -def density_table_milieu( - data_zds: pd.DataFrame, - filtered_data: pd.DataFrame -): +def density_table_milieu(data_zds: pd.DataFrame, filtered_data: pd.DataFrame): if data_zds.empty: st.write("Aucune donnée disponible pour la région sélectionnée.") @@ -554,10 +560,7 @@ def density_table_milieu( ) -def density_table_lieu( - data_zds: pd.DataFrame, - filtered_data: pd.DataFrame -): +def density_table_lieu(data_zds: pd.DataFrame, filtered_data: pd.DataFrame): if data_zds.empty: st.write("Aucune donnée disponible pour la région sélectionnée.") @@ -766,55 +769,74 @@ def create_contributors_table(data_zds: pd.DataFrame, multi_filter_dict: dict) - # Add a default "Select a milieu..." option selected_milieu = st.selectbox( "Sélectionnez un milieu:", - ["Sélectionnez un milieu..."] + list(pd.unique(data_zds_correct['TYPE_MILIEU'])) + ["Sélectionnez un milieu..."] + + list(pd.unique(data_zds_correct["TYPE_MILIEU"])), ) with right_column: # Update lieu options based on selected milieu lieu_options = update_lieu_options(selected_milieu) selected_lieu = st.selectbox("Sélectionnez un lieu:", lieu_options) - # Place the map centrally by using a wider column for the map and narrower ones on the sides col1, map_col, col3 = st.columns([4, 10, 1]) # Adjust column ratios as needed with map_col: st.markdown("### Carte des Densités") - if selected_milieu != "Sélectionnez un milieu..." and selected_lieu != "Sélectionnez un lieu...": - filtered_data = data_zds_correct[(data_zds_correct['TYPE_MILIEU'] == selected_milieu) & (data_zds_correct['TYPE_LIEU2'] == selected_lieu)] + if ( + selected_milieu != "Sélectionnez un milieu..." + and selected_lieu != "Sélectionnez un lieu..." + ): + filtered_data = data_zds_correct[ + (data_zds_correct["TYPE_MILIEU"] == selected_milieu) + & (data_zds_correct["TYPE_LIEU2"] == selected_lieu) + ] plot_density_map(data_zds_correct, filtered_data) else: - plot_density_map(data_zds_correct, data_zds_correct) # Show all data by default - + plot_density_map( + data_zds_correct, data_zds_correct + ) # Show all data by default col1, col2, col3 = st.columns([3, 3, 2]) with col1: st.markdown("#### Tableau des Densités par Milieu") - if selected_milieu != "Sélectionnez un milieu..." and selected_lieu != "Sélectionnez un lieu...": - filtered_data = data_zds_correct[(data_zds_correct['TYPE_MILIEU'] == selected_milieu) & (data_zds_correct['TYPE_LIEU2'] == selected_lieu)] + if ( + selected_milieu != "Sélectionnez un milieu..." + and selected_lieu != "Sélectionnez un lieu..." + ): + filtered_data = data_zds_correct[ + (data_zds_correct["TYPE_MILIEU"] == selected_milieu) + & (data_zds_correct["TYPE_LIEU2"] == selected_lieu) + ] density_table_milieu(data_zds_correct, filtered_data) else: density_table_milieu(data_zds_correct, data_zds_correct) with col2: st.markdown("#### Tableau des Densités par Lieu") - if selected_milieu != "Sélectionnez un milieu..." and selected_lieu != "Sélectionnez un lieu...": - filtered_data = data_zds_correct[(data_zds_correct['TYPE_MILIEU'] == selected_milieu) & (data_zds_correct['TYPE_LIEU2'] == selected_lieu)] + if ( + selected_milieu != "Sélectionnez un milieu..." + and selected_lieu != "Sélectionnez un lieu..." + ): + filtered_data = data_zds_correct[ + (data_zds_correct["TYPE_MILIEU"] == selected_milieu) + & (data_zds_correct["TYPE_LIEU2"] == selected_lieu) + ] density_table_lieu(data_zds_correct, filtered_data) else: density_table_lieu(data_zds_correct, data_zds_correct) with col3: - with st.expander("###### Notice ℹ️", expanded=True): - st.write( - """ + with st.expander("###### Notice ℹ️", expanded=True): + st.write( + """ **Milieu** désigne de grands types d'environnements comme le Littoral, les Cours d'eau ou la Montagne.\n Chaque Milieu est ensuite divisé en **Lieux** plus spécifiques. Par exemple, sous le Milieu Littoral, on trouve des Lieux comme les Plages, les Roches, les Digues, ou les Parkings. """ - ) + ) with tab2: # Use the selected filters From e919f144daf5e0557ce9296fd25be4ab326e550d Mon Sep 17 00:00:00 2001 From: Thibaut Gazagnes <122997528+tgazagnes@users.noreply.github.com> Date: Tue, 25 Jun 2024 11:09:46 +0200 Subject: [PATCH 142/147] [tg] corrections suite retours Kyllian --- dashboards/app/pages/data.py | 358 +++++------------- .../pages/ongletdata_colormap_materiaux.json | 1 + .../pages/ongletdata_colormap_secteurs.json | 1 + poetry.lock | 105 ++++- pyproject.toml | 2 + 5 files changed, 212 insertions(+), 255 deletions(-) create mode 100644 dashboards/app/pages/ongletdata_colormap_materiaux.json create mode 100644 dashboards/app/pages/ongletdata_colormap_secteurs.json diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 7d264a6..3779ace 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -7,30 +7,72 @@ import math import locale import duckdb +import json - +########################################################### # Configuration de la page +########################################################### st.set_page_config( layout="wide", page_title="Dashboard Zéro Déchet Sauvage : onglet Data" ) +# Titre de l'onglet +st.markdown( + """# 🔎 Data +Visualisez les impacts sur les milieux naturels et secteurs/filières/marques à l’origine de cette pollution +""" +) + + +# 3 Onglets : Matériaux, Top déchets, Filières et marques +tab1, tab2, tab3 = st.tabs( + [ + "**Matériaux :wood:**", + "**Top Déchets :wastebasket:**", + "**Secteurs économiques, filières et marques :womans_clothes:**", + ] +) + # Définir les paramètres linguistiques FR pour l'affichage des nombres locale.setlocale(locale.LC_NUMERIC, "fr_FR") -# Session state -session_state = st.session_state +# Fonction pour améliorer l'affichage des nombres (milliers, millions, milliards) +def french_format(x: int) -> str: + if x >= 1e9: + y = x / 1e9 + y = locale.format_string("%.2f", y, grouping=True) + return f"{y} milliards" + elif x >= 1e6: + y = x / 1e6 + y = locale.format_string("%.2f", y, grouping=True) + return f"{y} millions" + elif x >= 10: + y = locale.format_string("%d", x, grouping=True) + return f"{y}" + else: + y = locale.format_string("%.2f", x, grouping=True) + return f"{y}" + + +########################################################### +# Import des données +########################################################### -# Récupérer les filtres géographiques s'ils ont été fixés +# Importer le session.state pour récupérer les filtres géographiques filtre_niveau = st.session_state.get("niveau_admin", "") filtre_collectivite = st.session_state.get("collectivite", "") -# Titre de l'onglet -st.markdown( - """# 🔎 Data -Visualisez les impacts sur les milieux naturels et secteurs/filières/marques à l’origine de cette pollution -""" -) +## Import des fichiers de config des chartes graphiques +# Couleurs par matériaux charte graphique MERTERRE +with open("pages/ongletdata_colormap_materiaux.json", "r") as jsonfile1: + colors_map = json.load(jsonfile1) + +# Couleurs par secteur (charte Merterre) +with open("pages/ongletdata_colormap_secteurs.json", "r") as jsonfile2: + colors_map_secteur = json.load(jsonfile2) + +# Authentification if st.session_state["authentication_status"]: if filtre_niveau == "" and filtre_collectivite == "": with st.sidebar: @@ -86,35 +128,6 @@ def carac_exclusions(df): } ) - # Copier le df pour la partie filtrée par milieu/lieu/année - # df_other_metrics_raw = df_other.copy() - - # Fonction pour améliorer l'affichage des nombres (milliers, millions, milliards) - def french_format(x: int) -> str: - if x >= 1e9: - y = x / 1e9 - y = locale.format_string("%.2f", y, grouping=True) - return f"{y} milliards" - elif x >= 1e6: - y = x / 1e6 - y = locale.format_string("%.2f", y, grouping=True) - return f"{y} millions" - elif x >= 10: - y = locale.format_string("%d", x, grouping=True) - return f"{y}" - else: - y = locale.format_string("%.2f", x, grouping=True) - return f"{y}" - - # 3 Onglets : Matériaux, Top déchets, Filières et marques - tab1, tab2, tab3 = st.tabs( - [ - "**Matériaux :wood:**", - "**Top Déchets :wastebasket:**", - "**Secteurs économiques, filières et marques :womans_clothes:**", - ] - ) - milieu_lieu_dict = ( df_other.groupby("TYPE_MILIEU")["TYPE_LIEU"] .unique() @@ -123,8 +136,9 @@ def french_format(x: int) -> str: ) annee_liste = sorted(df_other["ANNEE"].unique().tolist(), reverse=True) - + ########################################################### # Onglet 1 : Matériaux + ########################################################### with tab1: # Transformation du dataframe pour les graphiques @@ -193,7 +207,7 @@ def french_format(x: int) -> str: ].sum() df_totals_sorted = df_totals_sorted.sort_values(["Volume_m3"], ascending=False) - # replace "Verre" with "Verre/Céramique" in df_totals_sorted + # Remplacer "Verre" with "Verre/Céramique" dans df_totals_sorted df_totals_sorted["Matériau"] = df_totals_sorted["Matériau"].replace( "Verre", "Verre/Céramique" ) @@ -201,31 +215,15 @@ def french_format(x: int) -> str: "Papier", "Papier/Carton" ) - # Charte graphique MERTERRE : - colors_map = { - "Textile": "#C384B1", - "Papier": "#CAA674", - "Metal": "#A0A0A0", - "Verre": "#3DCE89", - "Autre": "#F3B900", - "Plastique": "#48BEF0", - "Caoutchouc": "#364E74", - "Bois": "#673C11", - "Papier/Carton": "#CAA674", - "Métal": "#A0A0A0", - "Verre/Céramique": "#3DCE89", - "Autre": "#F3B900", - } - # Message d'avertissement en haut de page si nb de collectes < 5 if nb_collectes_int < 5: st.warning("⚠️ Moins de 5 ramassages dans la base de données") - # Ligne 1 : 2 cellules avec les indicateurs clés en haut de page + ### 3 METRIQUES CLES + # Création des colonnes l1_col1, l1_col2, l1_col3 = st.columns(3) - # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) - + # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne # 1ère métrique : volume total de déchets collectés cell1 = l1_col1.container(border=True) # Trick pour séparer les milliers @@ -241,7 +239,6 @@ def french_format(x: int) -> str: cell3 = l1_col3.container(border=True) cell3.metric("Nombre de ramassages", nb_collectes_int) - # Note méthodo pour expliquer les données retenues pour l'analyse # Périmètre des données volume_total_avant_exclusions_m3 = df_other["VOLUME_TOTAL"].sum() / 1000 volume_exclu = ( @@ -253,6 +250,7 @@ def french_format(x: int) -> str: / 1000 ) + # Encart méthodo pour expliquer les données retenues pour l'analyse with st.expander("Note sur les données utilisées dans cet onglet"): st.markdown( f""" @@ -278,8 +276,7 @@ def french_format(x: int) -> str: ) df_note_methodo.rename("Nombre de relevés", inplace=True) - # Ligne 2 : 2 graphiques en ligne : donut et bar chart matériaux - + ### GRAPHIQUES DONUT ET BAR CHART MATERIAUX with st.container(border=True): cell4, cell5 = st.columns(2) @@ -489,7 +486,7 @@ def french_format(x: int) -> str: "⚠️ Aucune donnée à afficher par type de milieu (nombre de ramassages trop faible)" ) - # Ligne 3 : Graphe par milieu , lieu et année + ### GRAPHIQUE TREEMAP PAR MILIEU, LIEU ET ANNEE st.write("**Détail par année, type de milieu ou de lieu**") # Étape 1: Création des filtres @@ -497,22 +494,21 @@ def french_format(x: int) -> str: with st.expander("Filtrer par année, type milieu ou type de lieu"): # Filtre par Année - # Default values for filters + # Valeurs par défaut valeur_par_defaut_annee = "Toute la période" valeur_par_defaut_milieu = "Tous les milieux" valeur_par_defaut_lieu = "Tous les lieux" - # Filter by year + # Filtre par année selected_annee = st.selectbox( "Choisir une année:", options=[valeur_par_defaut_annee] + annee_liste, ) - # Filter data based on selected year filtered_data = df_other.copy() if selected_annee != valeur_par_defaut_annee: filtered_data = filtered_data[filtered_data["ANNEE"] == selected_annee] - # Filter by milieu + # Filtre par milieu milieux_liste = [valeur_par_defaut_milieu] + sorted( filtered_data["TYPE_MILIEU"].unique() ) @@ -521,13 +517,12 @@ def french_format(x: int) -> str: options=milieux_liste, ) - # Filter data based on selected milieu if selected_type_milieu != valeur_par_defaut_milieu: filtered_data = filtered_data[ filtered_data["TYPE_MILIEU"] == selected_type_milieu ] - # Filter by lieu + # Filtre par lieu lieux_liste = [valeur_par_defaut_lieu] + sorted( filtered_data["TYPE_LIEU"].unique() ) @@ -536,20 +531,19 @@ def french_format(x: int) -> str: options=lieux_liste, ) - # Filter data based on selected lieu if selected_type_lieu != valeur_par_defaut_lieu: filtered_data = filtered_data[ filtered_data["TYPE_LIEU"] == selected_type_lieu ] - # Final filtered data + # Dataframe final filtré par année, milieu et lieu df_filtered = filtered_data.copy() # Message d'avertissement nb de collectes en dessous de 5 if len(df_filtered) < 5: st.warning("⚠️ Moins de 5 ramassages dans la base de données") - # Ligne 5 : Metriques filtrés + ### 3 METRIQUES AVEC FILTRES l5_col1, l5_col2, l5_col3 = st.columns(3) cell6 = l5_col1.container(border=True) cell7 = l5_col2.container(border=True) @@ -641,7 +635,9 @@ def french_format(x: int) -> str: else: st.write("Aucune donnée à afficher pour les filtres sélectionnés.") + ########################################################### # Onglet 2 : Top Déchets + ########################################################### with tab2: # Préparation des datas pour l'onglet 2 @@ -652,7 +648,7 @@ def french_format(x: int) -> str: filtered_df = df_other.copy() # Initialiser le df sans filtres - # Define the initial options for the selectboxes + # Definir options initiales sans filtres annee_options = [valeur_par_defaut_annee] + sorted( df_other["ANNEE"].unique().tolist(), reverse=True ) @@ -666,11 +662,11 @@ def french_format(x: int) -> str: key="topdechets_annee", # définir key pour éviter conflits ) - # Apply filters based on the selected values + # Appliquer les filtres sur les données if annee != valeur_par_defaut_annee: filtered_df = filtered_df[filtered_df["ANNEE"] == annee] - # Update milieu options based on filtered data + # Mettre à jour les options de milieux selon le filtre année milieu_options += sorted(filtered_df["TYPE_MILIEU"].unique().tolist()) milieu = st.selectbox( @@ -680,14 +676,14 @@ def french_format(x: int) -> str: key="topdechets_milieu", # définir key pour éviter conflits ) - # Apply milieu filter if selected + # Appliquer le filtre par milieu si une valeur est choisir if milieu != valeur_par_defaut_milieu: filtered_df = filtered_df[filtered_df["TYPE_MILIEU"] == milieu] - # Update lieu options based on filtered data + # Mettre à jour les options de lieu selon le milieu choisi lieu_options += sorted(filtered_df["TYPE_LIEU"].unique().tolist()) - # Lieu selection + # Selection du lieu lieu = st.selectbox( "Choisir un type de lieu :", options=lieu_options, @@ -695,12 +691,10 @@ def french_format(x: int) -> str: key="topdechets_lieu", ) - # Apply lieu filter if selected + # Appliquer filtre par lieu si choixi if lieu != valeur_par_defaut_lieu: filtered_df = filtered_df[filtered_df["TYPE_LIEU"] == lieu] - # The filtered_df now contains the data based on the selected filters - # Récupérer les index de collectes pour filtrer le dataframe nb_dechets # Filtrer les données sur les ID_RELEVES df_top_dechets = pd.merge( @@ -721,11 +715,13 @@ def french_format(x: int) -> str: if nb_collectes_int < 5: st.warning("⚠️ Moins de 5 ramassages dans la base de données") - # Ligne 1 : 3 cellules avec les indicateurs clés en haut de page + ### METRIQUES CLES EN HAUT DE PAGE + l1_col1, l1_col2 = st.columns(2) - # Pour avoir 3 cellules avec bordure, il faut nester un st.container dans chaque colonne (pas d'option bordure dans st.column) + # 1ère métrique : volume total de déchets collectés cell1 = l1_col1.container(border=True) + # Trick pour séparer les milliers cell1.metric("Nombre de déchets comptés", french_format(nb_total_dechets)) @@ -734,13 +730,13 @@ def french_format(x: int) -> str: cell2 = l1_col2.container(border=True) cell2.metric("Nombre de ramassages", nb_collec_top) - # Ligne 2 : graphique top déchets + ### GRAPHIQUE TOP DECHETS - # Filtration sur les type-regroupement selection dechets "GROUPE" uniquement + # Filtre sur les données au niveau "GROUPE" uniquement df_top_dechets = df_top_dechets[ df_top_dechets["type_regroupement"].isin(["GROUPE"]) ] - # Group by 'categorie', sum 'nb_dechet', et top 10 + # Grouper par catégorie et ne garder que le top10 déchets en nombre cumulé df_top10_dechets = ( df_top_dechets.groupby("categorie") .agg({"nb_dechet": "sum"}) @@ -827,11 +823,6 @@ def french_format(x: int) -> str: # Filtration sur le dechet top 10 sélectionné df_map_data = df_top_dechets[df_top_dechets["categorie"] == selected_dechet] - # # Création du DataFrame de travail pour la carte - # df_map_data = pd.merge( - # df_top_map, df_top_data_releves, on="ID_RELEVE", how="inner" - # ) - # Création de la carte centrée autour d'une localisation # Initialisation du zoom sur la carte if filtre_niveau == "Commune": @@ -849,6 +840,7 @@ def french_format(x: int) -> str: min_lon = df_map_data["LIEU_COORD_GPS_X"].min() max_lon = df_map_data["LIEU_COORD_GPS_X"].max() + # création de la carte avec Folium map_data = folium.Map( location=[(min_lat + max_lat) / 2, (min_lon + max_lon) / 2], zoom_start=zoom_admin, @@ -865,7 +857,7 @@ def french_format(x: int) -> str: else: radius = 0.001 - # Format the value with commas as thousands separators + # Formatter les valeurs avec séparateurs de miliers formatted_nb_dechet = locale.format_string( "%.0f", row["nb_dechet"], grouping=True ) @@ -887,7 +879,7 @@ def french_format(x: int) -> str: fill_color="#3186cc", ).add_to(map_data) - # Add a legend + # Ajout légende legend_html = """
str: key="secteurs_annee", ) - # Apply year filter if selected + # Appliquer le filtre if annee != valeur_par_defaut_annee: filtered_df = filtered_df[filtered_df["ANNEE"] == annee] - # Update milieu options based on filtered data + # Mettre à jour les valeurs des milieux selon l'année choisie milieu_options += sorted(filtered_df["TYPE_MILIEU"].unique().tolist()) - # Milieu selection + # Sélection du milieu milieu = st.selectbox( "Choisir un type de milieu :", options=milieu_options, - index=0, # Default to the first option (valeur_par_defaut_milieu) + index=0, # Valeur par défaut : valeur_par_defaut_milieu key="secteurs_milieu", ) - # Apply milieu filter if selected + # Appliquer le filtre par milieu if milieu != valeur_par_defaut_milieu: filtered_df = filtered_df[filtered_df["TYPE_MILIEU"] == milieu] - # Update lieu options based on filtered data + # Mettre à jour les valeurs des lieux selon le milieu choisi lieu_options += sorted(filtered_df["TYPE_LIEU"].unique().tolist()) - # Lieu selection + # Sélectionner ie lieu lieu = st.selectbox( "Choisir un type de lieu :", options=lieu_options, - index=0, # Default to the first option (valeur_par_defaut_lieu) + index=0, # VAleur par défaut : valeur_par_defaut_lieu key="secteurs_lieu", ) - # Apply lieu filter if selected + # Appliquer le filtre par lieu if lieu != valeur_par_defaut_lieu: filtered_df = filtered_df[filtered_df["TYPE_LIEU"] == lieu] - # The filtered_df now contains the data based on the selected filters - - # # Filtre par année - # selected_annee_onglet_3 = st.selectbox( - # "Choisir une année:", - # options=[valeur_par_defaut_annee] + annee_liste, - # key="année_select", - # ) - # if selected_annee_onglet_3 != valeur_par_defaut_annee: - # filtered_data_milieu = df_other[ - # df_other["ANNEE"] == selected_annee_onglet_3 - # ] - # else: - # filtered_data_milieu = df_other.copy() - - # ## Filtre par type de milieu - # # Initialiser la liste des lieux - # milieux_liste = [valeur_par_defaut_milieu] + sorted( - # list(filtered_data_milieu["TYPE_MILIEU"].unique()) - # ) - - # selected_type_milieu_onglet_3 = st.selectbox( - # "Choisir un type de milieu:", - # options=milieux_liste, - # key="type_milieu_select", - # ) - - # if selected_type_milieu_onglet_3 != valeur_par_defaut_milieu: - # filtered_data_lieu = filtered_data_milieu[ - # filtered_data_milieu["TYPE_MILIEU"] == selected_type_milieu_onglet_3 - # ] - # else: - # filtered_data_lieu = filtered_data_milieu - - # ## Filtre par lieu - # # Initialiser la liste des lieux - # lieux_liste = [valeur_par_defaut_lieu] + sorted( - # list(filtered_data_lieu["TYPE_LIEU"].unique()) - # ) - - # selected_type_lieu_onglet_3 = st.selectbox( - # "Choisir un type de lieu:", - # options=lieux_liste, - # key="type_lieu_select", - # ) - - # if ( - # selected_annee_onglet_3 == valeur_par_defaut_annee - # and selected_type_milieu_onglet_3 == valeur_par_defaut_milieu - # and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu - # ): - # df_filtered = df_other.copy() - # elif ( - # selected_type_milieu_onglet_3 == valeur_par_defaut_milieu - # and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu - # ): - # df_filtered = df_other[df_other["ANNEE"] == selected_annee_onglet_3].copy() - # elif ( - # selected_annee_onglet_3 == valeur_par_defaut_annee - # and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu - # and selected_type_milieu_onglet_3 != valeur_par_defaut_milieu - # ): - # df_filtered = df_other[ - # df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3 - # ].copy() - # elif ( - # selected_annee_onglet_3 == valeur_par_defaut_annee - # and selected_type_lieu_onglet_3 != valeur_par_defaut_lieu - # and selected_type_milieu_onglet_3 == valeur_par_defaut_milieu - # ): - # df_filtered = df_other[ - # df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3 - # ].copy() - # elif ( - # selected_annee_onglet_3 == valeur_par_defaut_annee - # and selected_type_lieu_onglet_3 != valeur_par_defaut_lieu - # and selected_type_milieu_onglet_3 != valeur_par_defaut_milieu - # ): - # df_filtered = df_other[ - # (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) - # & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - # ].copy() - # elif ( - # selected_annee_onglet_3 != valeur_par_defaut_annee - # and selected_type_lieu_onglet_3 != valeur_par_defaut_lieu - # and selected_type_milieu_onglet_3 == valeur_par_defaut_milieu - # ): - # df_filtered = df_other[ - # (df_other["ANNEE"] == selected_annee_onglet_3) - # & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) - # ].copy() - # elif ( - # selected_annee_onglet_3 != valeur_par_defaut_annee - # and selected_type_lieu_onglet_3 == valeur_par_defaut_lieu - # and selected_type_milieu_onglet_3 != valeur_par_defaut_milieu - # ): - # df_filtered = df_other[ - # (df_other["ANNEE"] == selected_annee_onglet_3) - # & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - # ].copy() - - # elif selected_type_lieu_onglet_3 == valeur_par_defaut_lieu: - # df_filtered = df_other[ - # (df_other["ANNEE"] == selected_annee_onglet_3) - # & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - # ].copy() - # else: - # df_filtered = df_other[ - # (df_other["ANNEE"] == selected_annee_onglet_3) - # & (df_other["TYPE_MILIEU"] == selected_type_milieu_onglet_3) - # & (df_other["TYPE_LIEU"] == selected_type_lieu_onglet_3) - # ].copy() - - # - # Filtration des données pour nb_dechets df_init = pd.merge(df_dechet_copy, filtered_df, on="ID_RELEVE", how="inner") @@ -1159,8 +1039,6 @@ def french_format(x: int) -> str: top_rep_df.columns = ["Responsabilité élargie producteur", "Nombre de déchets"] # Data pour le plot marque - - # Data pour le plot responsabilités marque_df = duckdb.query( ( "SELECT * " @@ -1204,7 +1082,7 @@ def french_format(x: int) -> str: nb_marques = marque_df["categorie"].nunique() collectes_marque = marque_df["ID_RELEVE"].nunique() - ### ANALYSE PAR SECTEUR + ### GRAPHIQUE PAR SECTEUR st.write("**Analyse par secteur économique** (relevés de niveau 4 uniquement)") # Message d'avertissement si le nombre de collectes est en dessous de 5 @@ -1236,33 +1114,6 @@ def french_format(x: int) -> str: ) # Ligne 2 : 3 cellules avec les indicateurs clés en bas de page - colors_map_secteur = { - "AGRICULTURE": "#156644", - "ALIMENTATION": "#F7D156", - "AMEUBLEMENT, DÉCORATION ET ÉQUIPEMENT DE LA MAISON": "#F79D65", - "AQUACULTURE": "#0067C2", - "BÂTIMENT, TRAVAUX ET MATÉRIAUX DE CONSTRUCTION": "#FF9900", - "CHASSE ET ARMEMENT": "#23A76F", - "COSMÉTIQUES, HYGIÈNE ET SOINS PERSONNELS": "#BF726B", - "DÉTERGENTS ET PRODUITS D'ENTRETIENS": "#506266", - "EMBALLAGE INDUSTRIEL ET COLIS": "#754B30", - "GRAPHIQUE ET PAPETERIE ET FOURNITURES DE BUREAU": "#EFEFEF", - "INDÉTERMINÉ": "#967EA1", - "INFORMATIQUE ET HIGHTECH": "#E351F7", - "JOUETS ET LOISIR": "#A64D79", - "MATÉRIEL ÉLECTRIQUE ET ÉLECTROMÉNAGER": "#AE05C3", - "MÉTALLURGIE": "#EC4773", - "PÊCHE": "#003463", - "PETROCHIMIE": "#0D0D0D", - "PHARMACEUTIQUE/PARAMÉDICAL": "#61BF5E", - "PLASTURGIE": "#05A2AD", - "TABAC": "#E9003F", - "TEXTILE ET HABILLEMENT": "#FA9EE5", - "TRAITEMENT DES EAUX": "#4AA6F7", - "TRANSPORT / AUTOMOBILE": "#6C2775", - "VAISSELLE À USAGE UNIQUE": "#732D3A", - "AUTRES SECTEURS": "#D9C190", - } fig_secteur = px.bar( top_secteur_df.tail(10).sort_values( @@ -1280,7 +1131,7 @@ def french_format(x: int) -> str: color_discrete_map=colors_map_secteur, text_auto=True, ) - # add log scale to x axis + # Passage de l'absisse en LOG pour meilleure lecture fig_secteur.update_layout(xaxis_type="log") fig_secteur.update_traces( texttemplate="%{value:,.0f}", @@ -1319,7 +1170,7 @@ def french_format(x: int) -> str: "⚠️ Aucune donnée à afficher par secteur (nombre de ramassages trop faible)" ) - ### ANALYSE PAR FILIERE REP + ### GRAPHIQUE A BARRE PAR FILIERE REP st.write( "**Analyse par filière de Responsabilité Élargie du Producteur** (relevés de niveau 4 uniquement)" @@ -1401,7 +1252,7 @@ def french_format(x: int) -> str: "⚠️ Aucune donnée à afficher par filière REP (nombre de ramassages trop faible)" ) - ### ANALYSES PAR MARQUE + ### GRAPHIQUE PAR MARQUE st.write("**Analyse par marque** (relevés de niveaux 2 à 4)") @@ -1447,7 +1298,6 @@ def french_format(x: int) -> str: text_auto=True, ) - # add log scale to x axis fig_marque.update_layout( # xaxis_type="log", # Pas besoin d'échelle log ici height=700, diff --git a/dashboards/app/pages/ongletdata_colormap_materiaux.json b/dashboards/app/pages/ongletdata_colormap_materiaux.json new file mode 100644 index 0000000..0382554 --- /dev/null +++ b/dashboards/app/pages/ongletdata_colormap_materiaux.json @@ -0,0 +1 @@ +{"Textile": "#C384B1", "Papier": "#CAA674", "Metal": "#A0A0A0", "Verre": "#3DCE89", "Autre": "#F3B900", "Plastique": "#48BEF0", "Caoutchouc": "#364E74", "Bois": "#673C11", "Papier/Carton": "#CAA674", "M\u00e9tal": "#A0A0A0", "Verre/C\u00e9ramique": "#3DCE89"} \ No newline at end of file diff --git a/dashboards/app/pages/ongletdata_colormap_secteurs.json b/dashboards/app/pages/ongletdata_colormap_secteurs.json new file mode 100644 index 0000000..5560bbf --- /dev/null +++ b/dashboards/app/pages/ongletdata_colormap_secteurs.json @@ -0,0 +1 @@ +{"AGRICULTURE": "#156644", "ALIMENTATION": "#F7D156", "AMEUBLEMENT, D\u00c9CORATION ET \u00c9QUIPEMENT DE LA MAISON": "#F79D65", "AQUACULTURE": "#0067C2", "B\u00c2TIMENT, TRAVAUX ET MAT\u00c9RIAUX DE CONSTRUCTION": "#FF9900", "CHASSE ET ARMEMENT": "#23A76F", "COSM\u00c9TIQUES, HYGI\u00c8NE ET SOINS PERSONNELS": "#BF726B", "D\u00c9TERGENTS ET PRODUITS D'ENTRETIENS": "#506266", "EMBALLAGE INDUSTRIEL ET COLIS": "#754B30", "GRAPHIQUE ET PAPETERIE ET FOURNITURES DE BUREAU": "#EFEFEF", "IND\u00c9TERMIN\u00c9": "#967EA1", "INFORMATIQUE ET HIGHTECH": "#E351F7", "JOUETS ET LOISIR": "#A64D79", "MAT\u00c9RIEL \u00c9LECTRIQUE ET \u00c9LECTROM\u00c9NAGER": "#AE05C3", "M\u00c9TALLURGIE": "#EC4773", "P\u00caCHE": "#003463", "PETROCHIMIE": "#0D0D0D", "PHARMACEUTIQUE/PARAM\u00c9DICAL": "#61BF5E", "PLASTURGIE": "#05A2AD", "TABAC": "#E9003F", "TEXTILE ET HABILLEMENT": "#FA9EE5", "TRAITEMENT DES EAUX": "#4AA6F7", "TRANSPORT / AUTOMOBILE": "#6C2775", "VAISSELLE \u00c0 USAGE UNIQUE": "#732D3A", "AUTRES SECTEURS": "#D9C190"} \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 7d3ba3d..616dacc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -44,6 +44,46 @@ tests = ["attrs[tests-no-zope]", "zope-interface"] tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] +[[package]] +name = "bcrypt" +version = "4.1.3" +description = "Modern password hashing for your software and your servers" +optional = false +python-versions = ">=3.7" +files = [ + {file = "bcrypt-4.1.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:48429c83292b57bf4af6ab75809f8f4daf52aa5d480632e53707805cc1ce9b74"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a8bea4c152b91fd8319fef4c6a790da5c07840421c2b785084989bf8bbb7455"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d3b317050a9a711a5c7214bf04e28333cf528e0ed0ec9a4e55ba628d0f07c1a"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:094fd31e08c2b102a14880ee5b3d09913ecf334cd604af27e1013c76831f7b05"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:4fb253d65da30d9269e0a6f4b0de32bd657a0208a6f4e43d3e645774fb5457f3"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:193bb49eeeb9c1e2db9ba65d09dc6384edd5608d9d672b4125e9320af9153a15"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8cbb119267068c2581ae38790e0d1fbae65d0725247a930fc9900c285d95725d"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6cac78a8d42f9d120b3987f82252bdbeb7e6e900a5e1ba37f6be6fe4e3848286"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:01746eb2c4299dd0ae1670234bf77704f581dd72cc180f444bfe74eb80495b64"}, + {file = "bcrypt-4.1.3-cp37-abi3-win32.whl", hash = "sha256:037c5bf7c196a63dcce75545c8874610c600809d5d82c305dd327cd4969995bf"}, + {file = "bcrypt-4.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:8a893d192dfb7c8e883c4576813bf18bb9d59e2cfd88b68b725990f033f1b978"}, + {file = "bcrypt-4.1.3-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d4cf6ef1525f79255ef048b3489602868c47aea61f375377f0d00514fe4a78c"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5698ce5292a4e4b9e5861f7e53b1d89242ad39d54c3da451a93cac17b61921a"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec3c2e1ca3e5c4b9edb94290b356d082b721f3f50758bce7cce11d8a7c89ce84"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3a5be252fef513363fe281bafc596c31b552cf81d04c5085bc5dac29670faa08"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5f7cd3399fbc4ec290378b541b0cf3d4398e4737a65d0f938c7c0f9d5e686611"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:c4c8d9b3e97209dd7111bf726e79f638ad9224b4691d1c7cfefa571a09b1b2d6"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:31adb9cbb8737a581a843e13df22ffb7c84638342de3708a98d5c986770f2834"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:551b320396e1d05e49cc18dd77d970accd52b322441628aca04801bbd1d52a73"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6717543d2c110a155e6821ce5670c1f512f602eabb77dba95717ca76af79867d"}, + {file = "bcrypt-4.1.3-cp39-abi3-win32.whl", hash = "sha256:6004f5229b50f8493c49232b8e75726b568535fd300e5039e255d919fc3a07f2"}, + {file = "bcrypt-4.1.3-cp39-abi3-win_amd64.whl", hash = "sha256:2505b54afb074627111b5a8dc9b6ae69d0f01fea65c2fcaea403448c503d3991"}, + {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:cb9c707c10bddaf9e5ba7cdb769f3e889e60b7d4fea22834b261f51ca2b89fed"}, + {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9f8ea645eb94fb6e7bea0cf4ba121c07a3a182ac52876493870033141aa687bc"}, + {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:f44a97780677e7ac0ca393bd7982b19dbbd8d7228c1afe10b128fd9550eef5f1"}, + {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d84702adb8f2798d813b17d8187d27076cca3cd52fe3686bb07a9083930ce650"}, + {file = "bcrypt-4.1.3.tar.gz", hash = "sha256:2ee15dd749f5952fe3f0430d0ff6b74082e159c50332a1413d51b5689cf06623"}, +] + +[package.extras] +tests = ["pytest (>=3.2.1,!=3.3.0)"] +typecheck = ["mypy"] + [[package]] name = "blinker" version = "1.7.0" @@ -352,6 +392,20 @@ files = [ [package.extras] test = ["pytest (>=6)"] +[[package]] +name = "extra-streamlit-components" +version = "0.1.71" +description = "An all-in-one place, to find complex or just natively unavailable components on streamlit." +optional = false +python-versions = ">=3.6" +files = [ + {file = "extra_streamlit_components-0.1.71-py3-none-any.whl", hash = "sha256:c8e6f98446adecd3002756362e50d0669693b7673afaa89cebfced6415cc6bd3"}, + {file = "extra_streamlit_components-0.1.71.tar.gz", hash = "sha256:d18314cf2ed009f95641882b50aa3bdb11b6a0eb6403fb43dbc8af1722419617"}, +] + +[package.dependencies] +streamlit = ">=1.18.0" + [[package]] name = "filelock" version = "3.13.1" @@ -1102,6 +1156,23 @@ files = [ plugins = ["importlib-metadata"] windows-terminal = ["colorama (>=0.4.6)"] +[[package]] +name = "pyjwt" +version = "2.8.0" +description = "JSON Web Token implementation in Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "PyJWT-2.8.0-py3-none-any.whl", hash = "sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320"}, + {file = "PyJWT-2.8.0.tar.gz", hash = "sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de"}, +] + +[package.extras] +crypto = ["cryptography (>=3.4.0)"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] + [[package]] name = "pyproj" version = "3.6.1" @@ -1566,6 +1637,20 @@ files = [ {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, ] +[[package]] +name = "st-pages" +version = "0.4.5" +description = "An experimental version of Streamlit Multi-Page Apps" +optional = false +python-versions = ">=3.8, !=2.7.*, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*, !=3.7.*" +files = [ + {file = "st_pages-0.4.5-py3-none-any.whl", hash = "sha256:7cc0c9137bc2a3aba2c7918f76a9b220673b4344762544ebf5c5e56d16a6f360"}, + {file = "st_pages-0.4.5.tar.gz", hash = "sha256:0b95b2ae53e91f9922f2f254b356e1063981b5fcc89a48c4b89011806ccda465"}, +] + +[package.dependencies] +streamlit = ">=1.10.0" + [[package]] name = "statsmodels" version = "0.14.1" @@ -1654,6 +1739,24 @@ watchdog = {version = ">=2.1.5", markers = "platform_system != \"Darwin\""} [package.extras] snowflake = ["snowflake-connector-python (>=2.8.0)", "snowflake-snowpark-python (>=0.9.0)"] +[[package]] +name = "streamlit-authenticator" +version = "0.3.2" +description = "A secure authentication module to validate user credentials in a Streamlit application." +optional = false +python-versions = ">=3.6" +files = [ + {file = "streamlit-authenticator-0.3.2.tar.gz", hash = "sha256:f17a77d0394a45d6554a72d890cc270d9be5328eeb12958898bf0183e7321ed6"}, + {file = "streamlit_authenticator-0.3.2-py3-none-any.whl", hash = "sha256:0620768d01aa6c7bff4200f062effff333a8e0cfde9b5300a67ba878c51b0adc"}, +] + +[package.dependencies] +bcrypt = ">=3.1.7" +extra-streamlit-components = ">=0.1.70" +PyJWT = ">=2.3.0" +PyYAML = ">=5.3.1" +streamlit = ">=1.25.0" + [[package]] name = "tenacity" version = "8.2.3" @@ -1862,4 +1965,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "658c67e7dc4fb5c0f32fee6963b86666d2602730dfd998cb3aa7cd85de85dd44" +content-hash = "48851ceb7f8426b10ba5d87ce97d4cb7ee8cf455c61de64b1db4534b48435a2f" diff --git a/pyproject.toml b/pyproject.toml index fa0318f..30f37cb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,6 +22,8 @@ geopandas = "^0.14.3" folium = "^0.16.0" streamlit = "^1.32.2" plotly-express = "^0.4.1" +streamlit-authenticator = "^0.3.2" +st-pages = "^0.4.5" [tool.poetry.group.dev.dependencies] pre-commit = "^2.20.0" From 17dcc63bf9f6ec202d11476aad1d2386f483b4e6 Mon Sep 17 00:00:00 2001 From: Mendi33 Date: Tue, 9 Jul 2024 10:12:31 +0000 Subject: [PATCH 143/147] =?UTF-8?q?Ajout=20docstring=20de=20:=20def=20load?= =?UTF-8?q?=5Fdf=5Fevents=5Fclean()=20Chargement=20des=20donn=C3=A9es=20de?= =?UTF-8?q?s=20=C3=A9v=C3=A9nements=20=C3=A0=20venir?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/home.py | 227 ----------------------------------------- 1 file changed, 227 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index 3e42583..e69de29 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -1,227 +0,0 @@ -from pathlib import Path - -import pandas as pd -import streamlit as st -import streamlit_authenticator as stauth -import yaml -from st_pages import Page, show_pages -from yaml.loader import SafeLoader - -# Configuration de la page -st.set_page_config( - layout="wide", - page_title="Dashboard Zéro Déchet Sauvage", - page_icon=":dolphin:", - menu_items={ - "About": "https://www.zero-dechet-sauvage.org/", - }, -) - -# load and apply CSS styles -def load_css(file_name: str) -> None: - with Path(file_name).open() as f: - st.markdown(f"", unsafe_allow_html=True) - - -# Login -p_cred = Path(".credentials.yml") -with p_cred.open() as file: - config = yaml.load(file, Loader=SafeLoader) - -authenticator = stauth.Authenticate( - config["credentials"], - config["cookie"]["name"], - config["cookie"]["key"], - config["cookie"]["expiry_days"], - config["pre-authorized"], -) -authenticator.login( - fields={ - "Form name": "Connexion", - "Username": "Identifiant", - "Password": "Mot de passe", - "Login": "Connexion", - }, -) - -if st.session_state["authentication_status"]: - show_pages( - [ - Page("home.py", "Accueil", "🏠"), - ], - ) - - # Load and apply the CSS file at the start of your app - # local debug - load_css("style.css") - - st.markdown( - """ - # Bienvenue 👋 - #### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! - """, - ) - - st.markdown("""# À propos""") - - # Chargement des données et filtre géographique à l'arrivée sur le dashboard - # Table des volumes par matériaux - @st.cache_data - def load_df_other() -> pd.DataFrame: - df = pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_zds_enriched.csv", - ) - # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE - # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) - df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] - df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] - return df - - # Table des structures - @st.cache_data - def load_structures() -> pd.DataFrame: - df = pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/4-" - "onglet-structures/Exploration_visuali" - "sation/data/structures_export_cleaned.csv", - index_col=0, - ) - # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE - # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) - df["DEP_CODE_NOM"] = df["dep"] + " - " + df["departement"] - df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["COMMUNE"] - df.columns = [c.upper() for c in df.columns] - return df - - # Table du nb de déchets - @st.cache_data - def load_df_nb_dechet() -> pd.DataFrame: - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/data_releve_nb_dechet.csv", - ) - - @st.cache_data - # Définition d'une fonction pour charger les evenements à venir - def load_df_events_clean() -> pd.DataFrame: - return pd.read_csv( - "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" - "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" - "sation/data/export_events_cleaned.csv", - ) - - # Appel des fonctions pour charger les données - df_other = load_df_other() - df_structures = load_structures() - df_events = load_df_events_clean() - - # Création du filtre par niveau géographique : correspondance labels et variables - df_nb_dechets = load_df_nb_dechet() - - # Création du filtre par niveau géographique : correspondance labels et variables du df - niveaux_admin_dict = { - "Région": "REGION", - "Département": "DEP_CODE_NOM", - "EPCI": "LIBEPCI", - "Commune": "COMMUNE_CODE_NOM", - } - - # 1ère étape : sélection du niveau administratif concerné (région, dép...) - # Si déjà saisi précédemment, initialiser le filtre avec les valeurs entrées précédemment - # Récupérer les index pour conserver la valeur des filtres au changement de pages - # Filtre niveau administratif - niveau_admin = st.session_state.get("niveau_admin", None) - index_admin = st.session_state.get("index_admin", None) - # Filtre collectivité - collectivite = st.session_state.get("collectivite", None) - index_collec = st.session_state.get("index_collec", None) - - # Initialiser la selectbox avec l'index récupéré - select_niveauadmin = st.selectbox( - "Niveau administratif : ", - niveaux_admin_dict.keys(), - index=index_admin, - placeholder="Choisir une option", - ) - - if select_niveauadmin is not None: - # Filtrer la liste des collectivités en fonction du niveau admin - liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] - liste_collectivites = liste_collectivites.sort_values().unique() - - # 2ème filtre : sélection de la collectivité concernée - select_collectivite = st.selectbox( - "Collectivité : ", - liste_collectivites, - index=index_collec, - placeholder="Choisir une collectivité", - ) - - button_disabled = not select_niveauadmin or not select_collectivite - if st.button("Enregistrer la sélection", disabled=button_disabled): - # Enregistrer les valeurs sélectionnées dans le session.state - st.session_state["niveau_admin"] = select_niveauadmin - st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( - select_niveauadmin, - ) - - st.session_state["collectivite"] = select_collectivite - st.session_state["index_collec"] = list(liste_collectivites).index( - select_collectivite, - ) - - # Afficher la collectivité sélectionnée - st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") - show_pages( - [ - Page("home.py", "Accueil", "🏠"), - Page("pages/structures.py", "Structures", "🔭"), - Page("pages/actions.py", "Actions", "👊"), - Page("pages/data.py", "Data", "🔍"), - Page("pages/hotspots.py", "Hotspots", "🔥"), - ], - ) - - # Filtrer et enregistrer le DataFrame dans un session state pour la suite - colonne_filtre = niveaux_admin_dict[select_niveauadmin] - df_other_filtre = df_other[df_other[colonne_filtre] == select_collectivite] - st.session_state["df_other_filtre"] = df_other_filtre - - # Filtrer dataframe structures et enregistrer dans le session.state - df_structures_filtre = df_structures[ - df_structures[colonne_filtre] == select_collectivite - ] - st.session_state["structures_filtre"] = df_structures_filtre - st.session_state["structures"] = df_structures - st.session_state["events"] = df_events - - # Filtrer et enregistrer le dataframe nb_dechets dans session.State - # Récuperer la liste des relevés - id_releves = df_other_filtre["ID_RELEVE"].unique() - # Filtrer df_nb_dechets sur la liste des relevés - st.session_state["df_nb_dechets_filtre"] = df_nb_dechets[ - df_nb_dechets["ID_RELEVE"].isin(id_releves) - ] - - # Afficher le nombre de relevés disponibles - nb_releves = len(st.session_state["df_other_filtre"]) - st.write( - f"{nb_releves} relevés de collecte sont disponibles \ - pour l'analyse sur votre territoire.", - ) - - authenticator.logout() -elif st.session_state["authentication_status"] is False: - st.error("Mauvais identifiants ou mot de passe.") -elif st.session_state["authentication_status"] is None: - st.warning("Veuillez entrer votre identifiant et mot de passe") - - show_pages( - [ - Page("home.py", "Home", "🏠 "), - Page("pages/register.py", "S'enregistrer", "🚀"), - ], - ) From 1795f72d64400e87494c2651aec0df8b9dbc4d25 Mon Sep 17 00:00:00 2001 From: Mendi33 Date: Tue, 9 Jul 2024 13:59:01 +0000 Subject: [PATCH 144/147] =?UTF-8?q?Correction=20erreur=20dernier=20commit?= =?UTF-8?q?=20R=C3=A9solution=20des=20conflits=20de=20merge?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/app/home.py | 236 +++++++ poetry.lock | 1427 ++++++++++++++++++++++++++++++++-------- pyproject.toml | 4 +- 3 files changed, 1375 insertions(+), 292 deletions(-) diff --git a/dashboards/app/home.py b/dashboards/app/home.py index e69de29..3e4743c 100644 --- a/dashboards/app/home.py +++ b/dashboards/app/home.py @@ -0,0 +1,236 @@ +from pathlib import Path + +import pandas as pd +import streamlit as st +import streamlit_authenticator as stauth +import yaml +from st_pages import Page, show_pages +from yaml.loader import SafeLoader + +# Configuration de la page +st.set_page_config( + layout="wide", + page_title="Dashboard Zéro Déchet Sauvage", + page_icon=":dolphin:", + menu_items={ + "About": "https://www.zero-dechet-sauvage.org/", + }, +) + +# load and apply CSS styles +def load_css(file_name: str) -> None: + with Path(file_name).open() as f: + st.markdown(f"", unsafe_allow_html=True) + + +# Login +p_cred = Path(".credentials.yml") +with p_cred.open() as file: + config = yaml.load(file, Loader=SafeLoader) + +authenticator = stauth.Authenticate( + config["credentials"], + config["cookie"]["name"], + config["cookie"]["key"], + config["cookie"]["expiry_days"], + config["pre-authorized"], +) +authenticator.login( + fields={ + "Form name": "Connexion", + "Username": "Identifiant", + "Password": "Mot de passe", + "Login": "Connexion", + }, +) + +if st.session_state["authentication_status"]: + show_pages( + [ + Page("home.py", "Accueil", "🏠"), + ], + ) + + # Load and apply the CSS file at the start of your app + # local debug + load_css("style.css") + + st.markdown( + """ + # Bienvenue 👋 + #### Visualiser les collectes de déchets qui ont lieu sur votre territoire ! + """, + ) + + st.markdown("""# À propos""") + + # Chargement des données et filtre géographique à l'arrivée sur le dashboard + # Table des volumes par matériaux + @st.cache_data + def load_df_other() -> pd.DataFrame: + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_zds_enriched.csv", + ) + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE + # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["DEP"] + " - " + df["DEPARTEMENT"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["commune"] + return df + + # Table des structures + @st.cache_data + def load_structures() -> pd.DataFrame: + df = pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/4-" + "onglet-structures/Exploration_visuali" + "sation/data/structures_export_cleaned.csv", + index_col=0, + ) + # Ajout des colonnes DEP_CODE_NOM et COMMUNE_CODE_NOM qui concatenent le numéro INSEE + # et le nom de l'entité géographique (ex : 13 - Bouches du Rhône) + df["DEP_CODE_NOM"] = df["dep"] + " - " + df["departement"] + df["COMMUNE_CODE_NOM"] = df["INSEE_COM"] + " - " + df["COMMUNE"] + df.columns = [c.upper() for c in df.columns] + return df + + # Table du nb de déchets + @st.cache_data + def load_df_nb_dechet() -> pd.DataFrame: + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/data_releve_nb_dechet.csv", + ) + + @st.cache_data + # Définition d'une fonction pour charger les evenements à venir + def load_df_events_clean() -> pd.DataFrame: + """Chargement du dataset 'export_event_cleaned.csv' + + Les 'évenements à venir' sont dans un autre dataset que data_zds + La liste des événements RAW est dans #2/explo/data/export_events_14032024.xlsx + Le script de nettoyage est #2/explo/cleaning_events.py + + Returns: + pd.DataFrame: DF des événements à venir nettoyé + """ + return pd.read_csv( + "https://github.com/dataforgoodfr/12_zero_dechet_sauvage/raw/2-" + "nettoyage-et-augmentation-des-donn%C3%A9es/Exploration_visuali" + "sation/data/export_events_cleaned.csv", + ) + + # Appel des fonctions pour charger les données + df_other = load_df_other() + df_structures = load_structures() + df_events = load_df_events_clean() + + # Création du filtre par niveau géographique : correspondance labels et variables + df_nb_dechets = load_df_nb_dechet() + + # Création du filtre par niveau géographique : correspondance labels et variables du df + niveaux_admin_dict = { + "Région": "REGION", + "Département": "DEP_CODE_NOM", + "EPCI": "LIBEPCI", + "Commune": "COMMUNE_CODE_NOM", + } + + # 1ère étape : sélection du niveau administratif concerné (région, dép...) + # Si déjà saisi précédemment, initialiser le filtre avec les valeurs entrées précédemment + # Récupérer les index pour conserver la valeur des filtres au changement de pages + # Filtre niveau administratif + niveau_admin = st.session_state.get("niveau_admin", None) + index_admin = st.session_state.get("index_admin", None) + # Filtre collectivité + collectivite = st.session_state.get("collectivite", None) + index_collec = st.session_state.get("index_collec", None) + + # Initialiser la selectbox avec l'index récupéré + select_niveauadmin = st.selectbox( + "Niveau administratif : ", + niveaux_admin_dict.keys(), + index=index_admin, + placeholder="Choisir une option", + ) + + if select_niveauadmin is not None: + # Filtrer la liste des collectivités en fonction du niveau admin + liste_collectivites = df_other[niveaux_admin_dict[select_niveauadmin]] + liste_collectivites = liste_collectivites.sort_values().unique() + + # 2ème filtre : sélection de la collectivité concernée + select_collectivite = st.selectbox( + "Collectivité : ", + liste_collectivites, + index=index_collec, + placeholder="Choisir une collectivité", + ) + + button_disabled = not select_niveauadmin or not select_collectivite + if st.button("Enregistrer la sélection", disabled=button_disabled): + # Enregistrer les valeurs sélectionnées dans le session.state + st.session_state["niveau_admin"] = select_niveauadmin + st.session_state["index_admin"] = list(niveaux_admin_dict.keys()).index( + select_niveauadmin, + ) + + st.session_state["collectivite"] = select_collectivite + st.session_state["index_collec"] = list(liste_collectivites).index( + select_collectivite, + ) + + # Afficher la collectivité sélectionnée + st.write(f"Vous avez sélectionné : {select_niveauadmin} {select_collectivite}.") + show_pages( + [ + Page("home.py", "Accueil", "🏠"), + Page("pages/structures.py", "Structures", "🔭"), + Page("pages/actions.py", "Actions", "👊"), + Page("pages/data.py", "Data", "🔍"), + Page("pages/hotspots.py", "Hotspots", "🔥"), + ], + ) + + # Filtrer et enregistrer le DataFrame dans un session state pour la suite + colonne_filtre = niveaux_admin_dict[select_niveauadmin] + df_other_filtre = df_other[df_other[colonne_filtre] == select_collectivite] + st.session_state["df_other_filtre"] = df_other_filtre + + # Filtrer dataframe structures et enregistrer dans le session.state + df_structures_filtre = df_structures[ + df_structures[colonne_filtre] == select_collectivite + ] + st.session_state["structures_filtre"] = df_structures_filtre + st.session_state["structures"] = df_structures + st.session_state["events"] = df_events + + # Filtrer et enregistrer le dataframe nb_dechets dans session.State + # Récuperer la liste des relevés + id_releves = df_other_filtre["ID_RELEVE"].unique() + # Filtrer df_nb_dechets sur la liste des relevés + st.session_state["df_nb_dechets_filtre"] = df_nb_dechets[ + df_nb_dechets["ID_RELEVE"].isin(id_releves) + ] + + # Afficher le nombre de relevés disponibles + nb_releves = len(st.session_state["df_other_filtre"]) + st.write( + f"{nb_releves} relevés de collecte sont disponibles \ + pour l'analyse sur votre territoire.", + ) + + authenticator.logout() +elif st.session_state["authentication_status"] is False: + st.error("Mauvais identifiants ou mot de passe.") +elif st.session_state["authentication_status"] is None: + st.warning("Veuillez entrer votre identifiant et mot de passe") + + show_pages( + [ + Page("home.py", "Home", "🏠 "), + Page("pages/register.py", "S'enregistrer", "🚀"), + ], + ) diff --git a/poetry.lock b/poetry.lock index 9bcff55..616dacc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,10 +1,93 @@ -# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. + +[[package]] +name = "altair" +version = "5.3.0" +description = "Vega-Altair: A declarative statistical visualization library for Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "altair-5.3.0-py3-none-any.whl", hash = "sha256:7084a1dab4d83c5e7e5246b92dc1b4451a6c68fd057f3716ee9d315c8980e59a"}, + {file = "altair-5.3.0.tar.gz", hash = "sha256:5a268b1a0983b23d8f9129f819f956174aa7aea2719ed55a52eba9979b9f6675"}, +] + +[package.dependencies] +jinja2 = "*" +jsonschema = ">=3.0" +numpy = "*" +packaging = "*" +pandas = ">=0.25" +toolz = "*" +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +all = ["altair-tiles (>=0.3.0)", "anywidget (>=0.9.0)", "pyarrow (>=11)", "vega-datasets (>=0.9.0)", "vegafusion[embed] (>=1.6.6)", "vl-convert-python (>=1.3.0)"] +dev = ["geopandas", "hatch", "ipython", "m2r", "mypy", "pandas-stubs", "pytest", "pytest-cov", "ruff (>=0.3.0)", "types-jsonschema", "types-setuptools"] +doc = ["docutils", "jinja2", "myst-parser", "numpydoc", "pillow (>=9,<10)", "pydata-sphinx-theme (>=0.14.1)", "scipy", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinxext-altair"] + +[[package]] +name = "attrs" +version = "23.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, + {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] +tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] + +[[package]] +name = "bcrypt" +version = "4.1.3" +description = "Modern password hashing for your software and your servers" +optional = false +python-versions = ">=3.7" +files = [ + {file = "bcrypt-4.1.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:48429c83292b57bf4af6ab75809f8f4daf52aa5d480632e53707805cc1ce9b74"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a8bea4c152b91fd8319fef4c6a790da5c07840421c2b785084989bf8bbb7455"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d3b317050a9a711a5c7214bf04e28333cf528e0ed0ec9a4e55ba628d0f07c1a"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:094fd31e08c2b102a14880ee5b3d09913ecf334cd604af27e1013c76831f7b05"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:4fb253d65da30d9269e0a6f4b0de32bd657a0208a6f4e43d3e645774fb5457f3"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:193bb49eeeb9c1e2db9ba65d09dc6384edd5608d9d672b4125e9320af9153a15"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8cbb119267068c2581ae38790e0d1fbae65d0725247a930fc9900c285d95725d"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6cac78a8d42f9d120b3987f82252bdbeb7e6e900a5e1ba37f6be6fe4e3848286"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:01746eb2c4299dd0ae1670234bf77704f581dd72cc180f444bfe74eb80495b64"}, + {file = "bcrypt-4.1.3-cp37-abi3-win32.whl", hash = "sha256:037c5bf7c196a63dcce75545c8874610c600809d5d82c305dd327cd4969995bf"}, + {file = "bcrypt-4.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:8a893d192dfb7c8e883c4576813bf18bb9d59e2cfd88b68b725990f033f1b978"}, + {file = "bcrypt-4.1.3-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d4cf6ef1525f79255ef048b3489602868c47aea61f375377f0d00514fe4a78c"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5698ce5292a4e4b9e5861f7e53b1d89242ad39d54c3da451a93cac17b61921a"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec3c2e1ca3e5c4b9edb94290b356d082b721f3f50758bce7cce11d8a7c89ce84"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3a5be252fef513363fe281bafc596c31b552cf81d04c5085bc5dac29670faa08"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5f7cd3399fbc4ec290378b541b0cf3d4398e4737a65d0f938c7c0f9d5e686611"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:c4c8d9b3e97209dd7111bf726e79f638ad9224b4691d1c7cfefa571a09b1b2d6"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:31adb9cbb8737a581a843e13df22ffb7c84638342de3708a98d5c986770f2834"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:551b320396e1d05e49cc18dd77d970accd52b322441628aca04801bbd1d52a73"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6717543d2c110a155e6821ce5670c1f512f602eabb77dba95717ca76af79867d"}, + {file = "bcrypt-4.1.3-cp39-abi3-win32.whl", hash = "sha256:6004f5229b50f8493c49232b8e75726b568535fd300e5039e255d919fc3a07f2"}, + {file = "bcrypt-4.1.3-cp39-abi3-win_amd64.whl", hash = "sha256:2505b54afb074627111b5a8dc9b6ae69d0f01fea65c2fcaea403448c503d3991"}, + {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:cb9c707c10bddaf9e5ba7cdb769f3e889e60b7d4fea22834b261f51ca2b89fed"}, + {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9f8ea645eb94fb6e7bea0cf4ba121c07a3a182ac52876493870033141aa687bc"}, + {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:f44a97780677e7ac0ca393bd7982b19dbbd8d7228c1afe10b128fd9550eef5f1"}, + {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d84702adb8f2798d813b17d8187d27076cca3cd52fe3686bb07a9083930ce650"}, + {file = "bcrypt-4.1.3.tar.gz", hash = "sha256:2ee15dd749f5952fe3f0430d0ff6b74082e159c50332a1413d51b5689cf06623"}, +] + +[package.extras] +tests = ["pytest (>=3.2.1,!=3.3.0)"] +typecheck = ["mypy"] [[package]] name = "blinker" version = "1.7.0" description = "Fast, simple object-to-object and broadcast signaling" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -12,11 +95,24 @@ files = [ {file = "blinker-1.7.0.tar.gz", hash = "sha256:e6820ff6fa4e4d1d8e2747c2283749c3f547e4fee112b98555cdcdae32996182"}, ] +[[package]] +name = "branca" +version = "0.7.1" +description = "Generate complex HTML+JS pages with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "branca-0.7.1-py3-none-any.whl", hash = "sha256:70515944ed2d1ed2784c552508df58037ca19402a8a1069d57f9113e3e012f51"}, + {file = "branca-0.7.1.tar.gz", hash = "sha256:e6b6f37a37bc0abffd960c68c045a7fe025d628eff87fedf6ab6ca814812110c"}, +] + +[package.dependencies] +jinja2 = ">=3" + [[package]] name = "cachetools" version = "5.3.2" description = "Extensible memoizing collections and decorators" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -28,7 +124,6 @@ files = [ name = "certifi" version = "2024.2.2" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -40,7 +135,6 @@ files = [ name = "cfgv" version = "3.4.0" description = "Validate configuration and produce human readable error messages." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -52,7 +146,6 @@ files = [ name = "chardet" version = "5.2.0" description = "Universal encoding detector for Python 3" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -64,7 +157,6 @@ files = [ name = "charset-normalizer" version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -164,7 +256,6 @@ files = [ name = "click" version = "8.1.7" description = "Composable command line interface toolkit" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -176,92 +267,54 @@ files = [ colorama = {version = "*", markers = "platform_system == \"Windows\""} [[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -category = "main" +name = "click-plugins" +version = "1.1.1" +description = "An extension module for click to enable registering CLI commands via setuptools entry-points." optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "dash" -version = "2.16.1" -description = "A Python framework for building reactive web-apps. Developed by Plotly." -category = "main" -optional = false -python-versions = ">=3.8" +python-versions = "*" files = [ - {file = "dash-2.16.1-py3-none-any.whl", hash = "sha256:8a9d2a618e415113c0b2a4d25d5dc4df5cb921f733b33dde75559db2316b1df1"}, - {file = "dash-2.16.1.tar.gz", hash = "sha256:b2871d6b8d4c9dfd0a64f89f22d001c93292910b41d92d9ff2bb424a28283976"}, + {file = "click-plugins-1.1.1.tar.gz", hash = "sha256:46ab999744a9d831159c3411bb0c79346d94a444df9a3a3742e9ed63645f264b"}, + {file = "click_plugins-1.1.1-py2.py3-none-any.whl", hash = "sha256:5d262006d3222f5057fd81e1623d4443e41dcda5dc815c06b442aa3c02889fc8"}, ] [package.dependencies] -dash-core-components = "2.0.0" -dash-html-components = "2.0.0" -dash-table = "5.0.0" -Flask = ">=1.0.4,<3.1" -importlib-metadata = "*" -nest-asyncio = "*" -plotly = ">=5.0.0" -requests = "*" -retrying = "*" -setuptools = "*" -typing-extensions = ">=4.1.1" -Werkzeug = "<3.1" +click = ">=4.0" [package.extras] -celery = ["celery[redis] (>=5.1.2)", "redis (>=3.5.3)"] -ci = ["black (==22.3.0)", "dash-dangerously-set-inner-html", "dash-flow-example (==0.0.5)", "flake8 (==7.0.0)", "flaky (==3.7.0)", "flask-talisman (==1.0.0)", "jupyterlab (<4.0.0)", "mimesis (<=11.1.0)", "mock (==4.0.3)", "numpy (<=1.26.3)", "openpyxl", "orjson (==3.9.12)", "pandas (>=1.4.0)", "pyarrow", "pylint (==3.0.3)", "pytest-mock", "pytest-rerunfailures", "pytest-sugar (==0.9.6)", "pyzmq (==25.1.2)", "xlrd (>=2.0.1)"] -compress = ["flask-compress"] -dev = ["PyYAML (>=5.4.1)", "coloredlogs (>=15.0.1)", "fire (>=0.4.0)"] -diskcache = ["diskcache (>=5.2.1)", "multiprocess (>=0.70.12)", "psutil (>=5.8.0)"] -testing = ["beautifulsoup4 (>=4.8.2)", "cryptography (<3.4)", "dash-testing-stub (>=0.0.2)", "lxml (>=4.6.2)", "multiprocess (>=0.70.12)", "percy (>=2.0.2)", "psutil (>=5.8.0)", "pytest (>=6.0.2)", "requests[security] (>=2.21.0)", "selenium (>=3.141.0,<=4.2.0)", "waitress (>=1.4.4)"] +dev = ["coveralls", "pytest (>=3.6)", "pytest-cov", "wheel"] [[package]] -name = "dash-core-components" -version = "2.0.0" -description = "Core component suite for Dash" -category = "main" +name = "cligj" +version = "0.7.2" +description = "Click params for commmand line interfaces to GeoJSON" optional = false -python-versions = "*" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, <4" files = [ - {file = "dash_core_components-2.0.0-py3-none-any.whl", hash = "sha256:52b8e8cce13b18d0802ee3acbc5e888cb1248a04968f962d63d070400af2e346"}, - {file = "dash_core_components-2.0.0.tar.gz", hash = "sha256:c6733874af975e552f95a1398a16c2ee7df14ce43fa60bb3718a3c6e0b63ffee"}, + {file = "cligj-0.7.2-py3-none-any.whl", hash = "sha256:c1ca117dbce1fe20a5809dc96f01e1c2840f6dcc939b3ddbb1111bf330ba82df"}, + {file = "cligj-0.7.2.tar.gz", hash = "sha256:a4bc13d623356b373c2c27c53dbd9c68cae5d526270bfa71f6c6fa69669c6b27"}, ] -[[package]] -name = "dash-html-components" -version = "2.0.0" -description = "Vanilla HTML components for Dash" -category = "main" -optional = false -python-versions = "*" -files = [ - {file = "dash_html_components-2.0.0-py3-none-any.whl", hash = "sha256:b42cc903713c9706af03b3f2548bda4be7307a7cf89b7d6eae3da872717d1b63"}, - {file = "dash_html_components-2.0.0.tar.gz", hash = "sha256:8703a601080f02619a6390998e0b3da4a5daabe97a1fd7a9cebc09d015f26e50"}, -] +[package.dependencies] +click = ">=4.0" + +[package.extras] +test = ["pytest-cov"] [[package]] -name = "dash-table" -version = "5.0.0" -description = "Dash table" -category = "main" +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." optional = false -python-versions = "*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ - {file = "dash_table-5.0.0-py3-none-any.whl", hash = "sha256:19036fa352bb1c11baf38068ec62d172f0515f73ca3276c79dee49b95ddc16c9"}, - {file = "dash_table-5.0.0.tar.gz", hash = "sha256:18624d693d4c8ef2ddec99a6f167593437a7ea0bf153aa20f318c170c5bc7308"}, + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] [[package]] name = "distlib" version = "0.3.8" description = "Distribution utilities" -category = "dev" optional = false python-versions = "*" files = [ @@ -271,66 +324,64 @@ files = [ [[package]] name = "duckdb" -version = "0.10.1" +version = "0.10.0" description = "DuckDB in-process database" -category = "main" optional = false python-versions = ">=3.7.0" files = [ - {file = "duckdb-0.10.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0ac172788e3d8e410e009e3699016a4d7f17b4c7cde20f98856fca1fea79d247"}, - {file = "duckdb-0.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f754c20d3b963574da58b0d22029681b79c63f2e32060f10b687f41b7bba54d7"}, - {file = "duckdb-0.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c68b1ef88b8cce185381ec69f437d20059c30623375bab41ac07a1104acdb57"}, - {file = "duckdb-0.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f566f615278844ea240c9a3497c0ef201331628f78e0f9f4d64f72f82210e750"}, - {file = "duckdb-0.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67d2996c3372a0f7d8f41f1c49e00ecdb26f83cdd9132b76730224ad68b1f1e3"}, - {file = "duckdb-0.10.1-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5c3b3a18a58eebabb426beafc2f7da01d59805d660fc909e5e143b6db04d881a"}, - {file = "duckdb-0.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:343795d13ec3d8cd06c250225a05fd3c348c3ed49cccdde01addd46cb50f3559"}, - {file = "duckdb-0.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:33f99c2e9e4060464673912312b4ec91060d66638756592c9484c62824ff4e85"}, - {file = "duckdb-0.10.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fdbe4173729043b2fd949be83135b035820bb2faf64648500563b16f3f6f02ee"}, - {file = "duckdb-0.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f90738310a76bd1618acbc7345175582d36b6907cb0ed07841a3d800dea189d6"}, - {file = "duckdb-0.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d14d00560832592cbac2817847b649bd1d573f125d064518afb6eec5b02e15a"}, - {file = "duckdb-0.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11c0bf253c96079c6139e8a0880300d80f4dc9f21a8c5c239d2ebc060b227d46"}, - {file = "duckdb-0.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcc60833bb1a1fb2c33b052cf793fef48f681c565d982acff6ac7a86369794da"}, - {file = "duckdb-0.10.1-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:88cdc0c2501dd7a65b1df2a76d7624b93d9b6d27febd2ee80b7e5643a0b40bcb"}, - {file = "duckdb-0.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:698a8d1d48b150d344d8aa6dbc30a22ea30fb14ff2b15c90004fc9fcb0b3a3e9"}, - {file = "duckdb-0.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:b450aa2b3e0eb1fc0f7ad276bd1e4a5a03b1a4def6c45366af17557de2cafbdf"}, - {file = "duckdb-0.10.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:40dd55ea9c31abc69e5a8299f16c877e0b1950fd9a311c117efb4dd3c0dc8458"}, - {file = "duckdb-0.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7c1b3538bb9c2b49f48b26f092444525b22186efa4e77ba070603ed4a348a66"}, - {file = "duckdb-0.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bce024b69bae426b0739c470803f7b44261bdc0c0700ea7c41dff5f2d70ca4f3"}, - {file = "duckdb-0.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52af2a078340b2e1b57958477ebc1be07786d3ad5796777e87d4f453e0477b4c"}, - {file = "duckdb-0.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3c52b08c773e52484542300339ebf295e3c9b12d5d7d49b2567e252c16205a7"}, - {file = "duckdb-0.10.1-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:097aa9b6d5c9f5d3ed8c35b16020a67731d04befc35f6b89ccb5db9d5f1489c4"}, - {file = "duckdb-0.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b5a14a80ad09d65c270d16761b04ea6b074811cdfde6b5e4db1a8b0184125d1b"}, - {file = "duckdb-0.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fb98dbbdbf8048b07223dc6e7401333bb4e83681dde4cded2d239051ea102b5"}, - {file = "duckdb-0.10.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:28857b0d595c229827cc3631ae9b74ff52d11614435aa715e09d8629d2e1b609"}, - {file = "duckdb-0.10.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d85645136fc25026978b5db81869e8a120cfb60e1645a29a0f6dd155be9e59e"}, - {file = "duckdb-0.10.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e2e10582db74b99051e718279c1be204c98a63a5b6aa4e09226b7249e414146"}, - {file = "duckdb-0.10.1-cp37-cp37m-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d6a88358d86a8ce689fdd4136514aebedf958e910361156a0bb0e53dc3c55f7d"}, - {file = "duckdb-0.10.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b025afa30fcdcede094386e7c519e6964d26de5ad95f4e04a2a0a713676d4465"}, - {file = "duckdb-0.10.1-cp37-cp37m-win_amd64.whl", hash = "sha256:910be5005de7427c5231a7200027e0adb951e048c612b895340effcd3e660d5a"}, - {file = "duckdb-0.10.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:13d81752763f14203a53981f32bd09731900eb6fda4048fbc532eae5e7bf30e5"}, - {file = "duckdb-0.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:21858225b8a5c5dead128f62e4e88facdcbfdce098e18cbcd86a6cd8f48fb2b3"}, - {file = "duckdb-0.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8bf46d55685906729998eca70ee751934e0425d86863148e658277526c54282e"}, - {file = "duckdb-0.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f786b4402b9c31461ea0520d919e2166df4f9e6e21fd3c7bb0035fa985b5dfe"}, - {file = "duckdb-0.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32e52c6e939a4bada220803e6bde6fc0ce870da5662a33cabdd3be14824183a6"}, - {file = "duckdb-0.10.1-cp38-cp38-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5c563b565ea68cfebe9c4078646503b3d38930218f9c3c278277d58952873771"}, - {file = "duckdb-0.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:af8382280f24273a535e08b80e9383ad739c66e22855ce68716dfbaeaf8910b9"}, - {file = "duckdb-0.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:2e6e01e2499e07873b09316bf4d6808f712c57034fa24c255565c4f92386e8e3"}, - {file = "duckdb-0.10.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7791a0aa2cea972a612d31d4a289c81c5d00181328ed4f7642907f68f8b1fb9f"}, - {file = "duckdb-0.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1ace20383fb0ba06229e060a6bb0bcfd48a4582a02e43f05991720504508eb59"}, - {file = "duckdb-0.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5aad3e085c33253c689205b5ea3c5d9d54117c1249276c90d495cb85d9adce76"}, - {file = "duckdb-0.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa08173f68e678793dfe6aab6490ac753204ca7935beb8dbde778dbe593552d8"}, - {file = "duckdb-0.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:525efad4e6caff80d0f6a51d466470839146e3880da36d4544fee7ff842e7e20"}, - {file = "duckdb-0.10.1-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48d84577216010ee407913bad9dc47af4cbc65e479c91e130f7bd909a32caefe"}, - {file = "duckdb-0.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6e65f00294c3b8576ae651e91e732ea1cefc4aada89c307fb02f49231fd11e1f"}, - {file = "duckdb-0.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:30aa9dbbfc1f9607249fc148af9e6d6fd253fdc2f4c9924d4957d6a535558b4f"}, - {file = "duckdb-0.10.1.tar.gz", hash = "sha256:0d5b6daa9bb54a635e371798994caa08f26d2f145ebcbc989e16b0a0104e84fb"}, + {file = "duckdb-0.10.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bd0ffb3fddef0f72a150e4d76e10942a84a1a0447d10907df1621b90d6668060"}, + {file = "duckdb-0.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f3d709d5c7c1a12b5e10d0b05fa916c670cd2b50178e3696faa0cc16048a1745"}, + {file = "duckdb-0.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9114aa22ec5d591a20ce5184be90f49d8e5b5348ceaab21e102c54560d07a5f8"}, + {file = "duckdb-0.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77a37877efadf39caf7cadde0f430fedf762751b9c54750c821e2f1316705a21"}, + {file = "duckdb-0.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87cbc9e1d9c3fc9f14307bea757f99f15f46843c0ab13a6061354410824ed41f"}, + {file = "duckdb-0.10.0-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f0bfec79fed387201550517d325dff4fad2705020bc139d936cab08b9e845662"}, + {file = "duckdb-0.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c5622134d2d9796b15e09de810e450859d4beb46d9b861357ec9ae40a61b775c"}, + {file = "duckdb-0.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:089ee8e831ccaef1b73fc89c43b661567175eed0115454880bafed5e35cda702"}, + {file = "duckdb-0.10.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a05af63747f1d7021995f0811c333dee7316cec3b06c0d3e4741b9bdb678dd21"}, + {file = "duckdb-0.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:072d6eba5d8a59e0069a8b5b4252fed8a21f9fe3f85a9129d186a39b3d0aea03"}, + {file = "duckdb-0.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a77b85668f59b919042832e4659538337f1c7f197123076c5311f1c9cf077df7"}, + {file = "duckdb-0.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96a666f1d2da65d03199a977aec246920920a5ea1da76b70ae02bd4fb1ffc48c"}, + {file = "duckdb-0.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ec76a4262b783628d26612d184834852d9c92fb203e91af789100c17e3d7173"}, + {file = "duckdb-0.10.0-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:009dd9d2cdbd3b061a9efbdfc79f2d1a8377bcf49f1e5f430138621f8c083a6c"}, + {file = "duckdb-0.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:878f06766088090dad4a2e5ee0081555242b2e8dcb29415ecc97e388cf0cf8d8"}, + {file = "duckdb-0.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:713ff0a1fb63a6d60f454acf67f31656549fb5d63f21ac68314e4f522daa1a89"}, + {file = "duckdb-0.10.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9c0ee450dfedfb52dd4957244e31820feef17228da31af6d052979450a80fd19"}, + {file = "duckdb-0.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ff79b2ea9994398b545c0d10601cd73565fbd09f8951b3d8003c7c5c0cebc7cb"}, + {file = "duckdb-0.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6bdf1aa71b924ef651062e6b8ff9981ad85bec89598294af8a072062c5717340"}, + {file = "duckdb-0.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0265bbc8216be3ced7b377ba8847128a3fc0ef99798a3c4557c1b88e3a01c23"}, + {file = "duckdb-0.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d418a315a07707a693bd985274c0f8c4dd77015d9ef5d8d3da4cc1942fd82e0"}, + {file = "duckdb-0.10.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2828475a292e68c71855190b818aded6bce7328f79e38c04a0c75f8f1c0ceef0"}, + {file = "duckdb-0.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c3aaeaae2eba97035c65f31ffdb18202c951337bf2b3d53d77ce1da8ae2ecf51"}, + {file = "duckdb-0.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:c51790aaaea97d8e4a58a114c371ed8d2c4e1ca7cbf29e3bdab6d8ccfc5afc1e"}, + {file = "duckdb-0.10.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8af1ae7cc77a12206b6c47ade191882cc8f49f750bb3e72bb86ac1d4fa89926a"}, + {file = "duckdb-0.10.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa4f7e8e8dc0e376aeb280b83f2584d0e25ec38985c27d19f3107b2edc4f4a97"}, + {file = "duckdb-0.10.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28ae942a79fad913defa912b56483cd7827a4e7721f4ce4bc9025b746ecb3c89"}, + {file = "duckdb-0.10.0-cp37-cp37m-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:01b57802898091455ca2a32c1335aac1e398da77c99e8a96a1e5de09f6a0add9"}, + {file = "duckdb-0.10.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:52e1ad4a55fa153d320c367046b9500578192e01c6d04308ba8b540441736f2c"}, + {file = "duckdb-0.10.0-cp37-cp37m-win_amd64.whl", hash = "sha256:904c47d04095af745e989c853f0bfc0776913dfc40dfbd2da7afdbbb5f67fed0"}, + {file = "duckdb-0.10.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:184ae7ea5874f3b8fa51ab0f1519bdd088a0b78c32080ee272b1d137e2c8fd9c"}, + {file = "duckdb-0.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bd33982ecc9bac727a032d6cedced9f19033cbad56647147408891eb51a6cb37"}, + {file = "duckdb-0.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f59bf0949899105dd5f8864cb48139bfb78454a8c017b8258ba2b5e90acf7afc"}, + {file = "duckdb-0.10.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:395f3b18948001e35dceb48a4423d574e38656606d033eef375408b539e7b076"}, + {file = "duckdb-0.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b8eb2b803be7ee1df70435c33b03a4598cdaf676cd67ad782b288dcff65d781"}, + {file = "duckdb-0.10.0-cp38-cp38-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:31b2ddd331801064326c8e3587a4db8a31d02aef11332c168f45b3bd92effb41"}, + {file = "duckdb-0.10.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c8b89e76a041424b8c2026c5dc1f74b53fbbc6c6f650d563259885ab2e7d093d"}, + {file = "duckdb-0.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:79084a82f16c0a54f6bfb7ded5600400c2daa90eb0d83337d81a56924eaee5d4"}, + {file = "duckdb-0.10.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:79799b3a270dcd9070f677ba510f1e66b112df3068425691bac97c5e278929c7"}, + {file = "duckdb-0.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8fc394bfe3434920cdbcfbdd0ac3ba40902faa1dbda088db0ba44003a45318a"}, + {file = "duckdb-0.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c116605551b4abf5786243a59bcef02bd69cc51837d0c57cafaa68cdc428aa0c"}, + {file = "duckdb-0.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3191170c3b0a43b0c12644800326f5afdea00d5a4621d59dbbd0c1059139e140"}, + {file = "duckdb-0.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fee69a50eb93c72dc77e7ab1fabe0c38d21a52c5da44a86aa217081e38f9f1bd"}, + {file = "duckdb-0.10.0-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c5f449e87dacb16b0d145dbe65fa6fdb5a55b2b6911a46d74876e445dd395bac"}, + {file = "duckdb-0.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4487d0df221b17ea4177ad08131bc606b35f25cfadf890987833055b9d10cdf6"}, + {file = "duckdb-0.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:c099ae2ff8fe939fda62da81704f91e2f92ac45e48dc0e37c679c9d243d01e65"}, + {file = "duckdb-0.10.0.tar.gz", hash = "sha256:c02bcc128002aa79e3c9d89b9de25e062d1096a8793bc0d7932317b7977f6845"}, ] [[package]] name = "exceptiongroup" version = "1.2.0" description = "Backport of PEP 654 (exception groups)" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -341,11 +392,24 @@ files = [ [package.extras] test = ["pytest (>=6)"] +[[package]] +name = "extra-streamlit-components" +version = "0.1.71" +description = "An all-in-one place, to find complex or just natively unavailable components on streamlit." +optional = false +python-versions = ">=3.6" +files = [ + {file = "extra_streamlit_components-0.1.71-py3-none-any.whl", hash = "sha256:c8e6f98446adecd3002756362e50d0669693b7673afaa89cebfced6415cc6bd3"}, + {file = "extra_streamlit_components-0.1.71.tar.gz", hash = "sha256:d18314cf2ed009f95641882b50aa3bdb11b6a0eb6403fb43dbc8af1722419617"}, +] + +[package.dependencies] +streamlit = ">=1.18.0" + [[package]] name = "filelock" version = "3.13.1" description = "A platform independent file lock." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -359,33 +423,127 @@ testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pyt typing = ["typing-extensions (>=4.8)"] [[package]] -name = "flask" -version = "3.0.2" -description = "A simple framework for building complex web applications." -category = "main" +name = "fiona" +version = "1.9.6" +description = "Fiona reads and writes spatial data files" optional = false -python-versions = ">=3.8" +python-versions = ">=3.7" +files = [ + {file = "fiona-1.9.6-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:63e528b5ea3d8b1038d788e7c65117835c787ba7fdc94b1b42f09c2cbc0aaff2"}, + {file = "fiona-1.9.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:918bd27d8625416672e834593970f96dff63215108f81efb876fe5c0bc58a3b4"}, + {file = "fiona-1.9.6-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:e313210b30d09ed8f829bf625599e248dadd78622728030221f6526580ff26c5"}, + {file = "fiona-1.9.6-cp310-cp310-win_amd64.whl", hash = "sha256:89095c2d542325ee45894b8837e8048cdbb2f22274934e1be3b673ca628010d7"}, + {file = "fiona-1.9.6-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:98cea6f435843b2119731c6b0470e5b7386aa16b6aa7edabbf1ed93aefe029c3"}, + {file = "fiona-1.9.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f4230eccbd896a79d1ebfa551d84bf90f512f7bcbe1ca61e3f82231321f1a532"}, + {file = "fiona-1.9.6-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:48b6218224e96de5e36b5eb259f37160092260e5de0dcd82ca200b1887aa9884"}, + {file = "fiona-1.9.6-cp311-cp311-win_amd64.whl", hash = "sha256:c1dd5fbc29b7303bb87eb683455e8451e1a53bb8faf20ef97fdcd843c9e4a7f6"}, + {file = "fiona-1.9.6-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:42d8a0e5570948d3821c493b6141866d9a4d7a64edad2be4ecbb89f81904baac"}, + {file = "fiona-1.9.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39819fb8f5ec6d9971cb01b912b4431615a3d3f50c83798565d8ce41917930db"}, + {file = "fiona-1.9.6-cp312-cp312-manylinux2014_x86_64.whl", hash = "sha256:9b53034efdf93ada9295b081e6a8280af7c75496a20df82d4c2ca46d65b85905"}, + {file = "fiona-1.9.6-cp312-cp312-win_amd64.whl", hash = "sha256:1dcd6eca7524535baf2a39d7981b4a46d33ae28c313934a7c3eae62eecf9dfa5"}, + {file = "fiona-1.9.6-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:e5404ed08c711489abcb3a50a184816825b8af06eb73ad2a99e18b8e7b47c96a"}, + {file = "fiona-1.9.6-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:53bedd2989e255df1bf3378ae9c06d6d241ec273c280c544bb44ffffebb97fb0"}, + {file = "fiona-1.9.6-cp37-cp37m-win_amd64.whl", hash = "sha256:77653a08564a44e634c44cd74a068d2f55d1d4029edd16d1c8aadcc4d8cc1d2c"}, + {file = "fiona-1.9.6-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:e7617563b36d2be99f048f0d0054b4d765f4aae454398f88f19de9c2c324b7f8"}, + {file = "fiona-1.9.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:50037c3b7a5f6f434b562b5b1a5b664f1caa7a4383b00af23cdb59bfc6ba852c"}, + {file = "fiona-1.9.6-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:bf51846ad602757bf27876f458c5c9f14b09421fac612f64273cc4e3fcabc441"}, + {file = "fiona-1.9.6-cp38-cp38-win_amd64.whl", hash = "sha256:11af1afc1255642a7787fe112c29d01f968f1053e4d4700fc6f3bb879c1622e0"}, + {file = "fiona-1.9.6-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:52e8fec650b72fc5253d8f86b63859acc687182281c29bfacd3930496cf982d1"}, + {file = "fiona-1.9.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c9b92aa1badb2773e7cac19bef3064d73e9d80c67c42f0928db2520a04be6f2f"}, + {file = "fiona-1.9.6-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:0eaffbf3bfae9960484c0c08ea461b0c40e111497f04e9475ebf15ac7a22d9dc"}, + {file = "fiona-1.9.6-cp39-cp39-win_amd64.whl", hash = "sha256:f1b49d51a744874608b689f029766aa1e078dd72e94b44cf8eeef6d7bd2e9051"}, + {file = "fiona-1.9.6.tar.gz", hash = "sha256:791b3494f8b218c06ea56f892bd6ba893dfa23525347761d066fb7738acda3b1"}, +] + +[package.dependencies] +attrs = ">=19.2.0" +certifi = "*" +click = ">=8.0,<9.0" +click-plugins = ">=1.0" +cligj = ">=0.5" +six = "*" + +[package.extras] +all = ["fiona[calc,s3,test]"] +calc = ["shapely"] +s3 = ["boto3 (>=1.3.1)"] +test = ["fiona[s3]", "pytest (>=7)", "pytest-cov", "pytz"] + +[[package]] +name = "folium" +version = "0.16.0" +description = "Make beautiful maps with Leaflet.js & Python" +optional = false +python-versions = ">=3.7" files = [ - {file = "flask-3.0.2-py3-none-any.whl", hash = "sha256:3232e0e9c850d781933cf0207523d1ece087eb8d87b23777ae38456e2fbe7c6e"}, - {file = "flask-3.0.2.tar.gz", hash = "sha256:822c03f4b799204250a7ee84b1eddc40665395333973dfb9deebfe425fefcb7d"}, + {file = "folium-0.16.0-py2.py3-none-any.whl", hash = "sha256:ba72505db18bef995c880da19457d2b10c931db8059af5f6ccec9310d262b584"}, + {file = "folium-0.16.0.tar.gz", hash = "sha256:2585ee9253dc758d3a365534caa6fb5fa0c244646db4dc5819afc67bbd4daabb"}, ] [package.dependencies] -blinker = ">=1.6.2" -click = ">=8.1.3" -itsdangerous = ">=2.1.2" -Jinja2 = ">=3.1.2" -Werkzeug = ">=3.0.0" +branca = ">=0.6.0" +jinja2 = ">=2.9" +numpy = "*" +requests = "*" +xyzservices = "*" [package.extras] -async = ["asgiref (>=3.2)"] -dotenv = ["python-dotenv"] +testing = ["pytest"] + +[[package]] +name = "geopandas" +version = "0.14.3" +description = "Geographic pandas extensions" +optional = false +python-versions = ">=3.9" +files = [ + {file = "geopandas-0.14.3-py3-none-any.whl", hash = "sha256:41b31ad39e21bc9e8c4254f78f8dc4ce3d33d144e22e630a00bb336c83160204"}, + {file = "geopandas-0.14.3.tar.gz", hash = "sha256:748af035d4a068a4ae00cab384acb61d387685c833b0022e0729aa45216b23ac"}, +] + +[package.dependencies] +fiona = ">=1.8.21" +packaging = "*" +pandas = ">=1.4.0" +pyproj = ">=3.3.0" +shapely = ">=1.8.0" + +[[package]] +name = "gitdb" +version = "4.0.11" +description = "Git Object Database" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, + {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, +] + +[package.dependencies] +smmap = ">=3.0.1,<6" + +[[package]] +name = "gitpython" +version = "3.1.43" +description = "GitPython is a Python library used to interact with Git repositories" +optional = false +python-versions = ">=3.7" +files = [ + {file = "GitPython-3.1.43-py3-none-any.whl", hash = "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff"}, + {file = "GitPython-3.1.43.tar.gz", hash = "sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c"}, +] + +[package.dependencies] +gitdb = ">=4.0.1,<5" + +[package.extras] +doc = ["sphinx (==4.3.2)", "sphinx-autodoc-typehints", "sphinx-rtd-theme", "sphinxcontrib-applehelp (>=1.0.2,<=1.0.4)", "sphinxcontrib-devhelp (==1.0.2)", "sphinxcontrib-htmlhelp (>=2.0.0,<=2.0.1)", "sphinxcontrib-qthelp (==1.0.3)", "sphinxcontrib-serializinghtml (==1.1.5)"] +test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions"] [[package]] name = "identify" version = "2.5.33" description = "File identification library for Python" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -400,7 +558,6 @@ license = ["ukkonen"] name = "idna" version = "3.6" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -408,31 +565,10 @@ files = [ {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, ] -[[package]] -name = "importlib-metadata" -version = "7.1.0" -description = "Read metadata from Python packages" -category = "main" -optional = false -python-versions = ">=3.8" -files = [ - {file = "importlib_metadata-7.1.0-py3-none-any.whl", hash = "sha256:30962b96c0c223483ed6cc7280e7f0199feb01a0e40cfae4d4450fc6fab1f570"}, - {file = "importlib_metadata-7.1.0.tar.gz", hash = "sha256:b78938b926ee8d5f020fc4772d487045805a55ddbad2ecf21c6d60938dc7fcd2"}, -] - -[package.dependencies] -zipp = ">=0.5" - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] - [[package]] name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -440,23 +576,10 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] -[[package]] -name = "itsdangerous" -version = "2.1.2" -description = "Safely pass data to untrusted environments and back." -category = "main" -optional = false -python-versions = ">=3.7" -files = [ - {file = "itsdangerous-2.1.2-py3-none-any.whl", hash = "sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44"}, - {file = "itsdangerous-2.1.2.tar.gz", hash = "sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a"}, -] - [[package]] name = "jinja2" version = "3.1.3" description = "A very fast and expressive template engine." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -470,11 +593,69 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "jsonschema" +version = "4.21.1" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.21.1-py3-none-any.whl", hash = "sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f"}, + {file = "jsonschema-4.21.1.tar.gz", hash = "sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "jsonschema-specifications" +version = "2023.12.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, + {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + [[package]] name = "markupsafe" version = "2.1.5" description = "Safely add untrusted strings to HTML/XML markup." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -541,22 +722,20 @@ files = [ ] [[package]] -name = "nest-asyncio" -version = "1.6.0" -description = "Patch asyncio to allow nested event loops" -category = "main" +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" optional = false -python-versions = ">=3.5" +python-versions = ">=3.7" files = [ - {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, - {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, ] [[package]] name = "nodeenv" version = "1.8.0" description = "Node.js virtual environment builder" -category = "dev" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" files = [ @@ -571,7 +750,6 @@ setuptools = "*" name = "numpy" version = "1.26.4" description = "Fundamental package for array computing in Python" -category = "main" optional = false python-versions = ">=3.9" files = [ @@ -617,7 +795,6 @@ files = [ name = "packaging" version = "23.2" description = "Core utilities for Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -627,83 +804,178 @@ files = [ [[package]] name = "pandas" -version = "2.2.1" +version = "2.0.3" description = "Powerful data structures for data analysis, time series, and statistics" -category = "main" optional = false -python-versions = ">=3.9" +python-versions = ">=3.8" files = [ - {file = "pandas-2.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8df8612be9cd1c7797c93e1c5df861b2ddda0b48b08f2c3eaa0702cf88fb5f88"}, - {file = "pandas-2.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0f573ab277252ed9aaf38240f3b54cfc90fff8e5cab70411ee1d03f5d51f3944"}, - {file = "pandas-2.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f02a3a6c83df4026e55b63c1f06476c9aa3ed6af3d89b4f04ea656ccdaaaa359"}, - {file = "pandas-2.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c38ce92cb22a4bea4e3929429aa1067a454dcc9c335799af93ba9be21b6beb51"}, - {file = "pandas-2.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c2ce852e1cf2509a69e98358e8458775f89599566ac3775e70419b98615f4b06"}, - {file = "pandas-2.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:53680dc9b2519cbf609c62db3ed7c0b499077c7fefda564e330286e619ff0dd9"}, - {file = "pandas-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:94e714a1cca63e4f5939cdce5f29ba8d415d85166be3441165edd427dc9f6bc0"}, - {file = "pandas-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f821213d48f4ab353d20ebc24e4faf94ba40d76680642fb7ce2ea31a3ad94f9b"}, - {file = "pandas-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c70e00c2d894cb230e5c15e4b1e1e6b2b478e09cf27cc593a11ef955b9ecc81a"}, - {file = "pandas-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e97fbb5387c69209f134893abc788a6486dbf2f9e511070ca05eed4b930b1b02"}, - {file = "pandas-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101d0eb9c5361aa0146f500773395a03839a5e6ecde4d4b6ced88b7e5a1a6403"}, - {file = "pandas-2.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7d2ed41c319c9fb4fd454fe25372028dfa417aacb9790f68171b2e3f06eae8cd"}, - {file = "pandas-2.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:af5d3c00557d657c8773ef9ee702c61dd13b9d7426794c9dfeb1dc4a0bf0ebc7"}, - {file = "pandas-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:06cf591dbaefb6da9de8472535b185cba556d0ce2e6ed28e21d919704fef1a9e"}, - {file = "pandas-2.2.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:88ecb5c01bb9ca927ebc4098136038519aa5d66b44671861ffab754cae75102c"}, - {file = "pandas-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:04f6ec3baec203c13e3f8b139fb0f9f86cd8c0b94603ae3ae8ce9a422e9f5bee"}, - {file = "pandas-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a935a90a76c44fe170d01e90a3594beef9e9a6220021acfb26053d01426f7dc2"}, - {file = "pandas-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c391f594aae2fd9f679d419e9a4d5ba4bce5bb13f6a989195656e7dc4b95c8f0"}, - {file = "pandas-2.2.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9d1265545f579edf3f8f0cb6f89f234f5e44ba725a34d86535b1a1d38decbccc"}, - {file = "pandas-2.2.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:11940e9e3056576ac3244baef2fedade891977bcc1cb7e5cc8f8cc7d603edc89"}, - {file = "pandas-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:4acf681325ee1c7f950d058b05a820441075b0dd9a2adf5c4835b9bc056bf4fb"}, - {file = "pandas-2.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9bd8a40f47080825af4317d0340c656744f2bfdb6819f818e6ba3cd24c0e1397"}, - {file = "pandas-2.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:df0c37ebd19e11d089ceba66eba59a168242fc6b7155cba4ffffa6eccdfb8f16"}, - {file = "pandas-2.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:739cc70eaf17d57608639e74d63387b0d8594ce02f69e7a0b046f117974b3019"}, - {file = "pandas-2.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9d3558d263073ed95e46f4650becff0c5e1ffe0fc3a015de3c79283dfbdb3df"}, - {file = "pandas-2.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4aa1d8707812a658debf03824016bf5ea0d516afdea29b7dc14cf687bc4d4ec6"}, - {file = "pandas-2.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:76f27a809cda87e07f192f001d11adc2b930e93a2b0c4a236fde5429527423be"}, - {file = "pandas-2.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:1ba21b1d5c0e43416218db63037dbe1a01fc101dc6e6024bcad08123e48004ab"}, - {file = "pandas-2.2.1.tar.gz", hash = "sha256:0ab90f87093c13f3e8fa45b48ba9f39181046e8f3317d3aadb2fffbb1b978572"}, + {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"}, + {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0c6f76a0f1ba361551f3e6dceaff06bde7514a374aa43e33b588ec10420183"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba619e410a21d8c387a1ea6e8a0e49bb42216474436245718d7f2e88a2f8d7c0"}, + {file = "pandas-2.0.3-cp310-cp310-win32.whl", hash = "sha256:3ef285093b4fe5058eefd756100a367f27029913760773c8bf1d2d8bebe5d210"}, + {file = "pandas-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:9ee1a69328d5c36c98d8e74db06f4ad518a1840e8ccb94a4ba86920986bb617e"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b084b91d8d66ab19f5bb3256cbd5ea661848338301940e17f4492b2ce0801fe8"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:37673e3bdf1551b95bf5d4ce372b37770f9529743d2498032439371fc7b7eb26"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9cb1e14fdb546396b7e1b923ffaeeac24e4cedd14266c3497216dd4448e4f2d"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9cd88488cceb7635aebb84809d087468eb33551097d600c6dad13602029c2df"}, + {file = "pandas-2.0.3-cp311-cp311-win32.whl", hash = "sha256:694888a81198786f0e164ee3a581df7d505024fbb1f15202fc7db88a71d84ebd"}, + {file = "pandas-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6a21ab5c89dcbd57f78d0ae16630b090eec626360085a4148693def5452d8a6b"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4da0d45e7f34c069fe4d522359df7d23badf83abc1d1cef398895822d11061"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:32fca2ee1b0d93dd71d979726b12b61faa06aeb93cf77468776287f41ff8fdc5"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258d3624b3ae734490e4d63c430256e716f488c4fcb7c8e9bde2d3aa46c29089"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eae3dc34fa1aa7772dd3fc60270d13ced7346fcbcfee017d3132ec625e23bb0"}, + {file = "pandas-2.0.3-cp38-cp38-win32.whl", hash = "sha256:f3421a7afb1a43f7e38e82e844e2bca9a6d793d66c1a7f9f0ff39a795bbc5e02"}, + {file = "pandas-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:69d7f3884c95da3a31ef82b7618af5710dba95bb885ffab339aad925c3e8ce78"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5247fb1ba347c1261cbbf0fcfba4a3121fbb4029d95d9ef4dc45406620b25c8b"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81af086f4543c9d8bb128328b5d32e9986e0c84d3ee673a2ac6fb57fd14f755e"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1994c789bf12a7c5098277fb43836ce090f1073858c10f9220998ac74f37c69b"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec591c48e29226bcbb316e0c1e9423622bc7a4eaf1ef7c3c9fa1a3981f89641"}, + {file = "pandas-2.0.3-cp39-cp39-win32.whl", hash = "sha256:04dbdbaf2e4d46ca8da896e1805bc04eb85caa9a82e259e8eed00254d5e0c682"}, + {file = "pandas-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:1168574b036cd8b93abc746171c9b4f1b83467438a5e45909fed645cf8692dbc"}, + {file = "pandas-2.0.3.tar.gz", hash = "sha256:c02f372a88e0d17f36d3093a644c73cfc1788e876a7c4bcb4020a77512e2043c"}, ] [package.dependencies] numpy = [ - {version = ">=1.22.4,<2", markers = "python_version < \"3.11\""}, - {version = ">=1.23.2,<2", markers = "python_version == \"3.11\""}, - {version = ">=1.26.0,<2", markers = "python_version >= \"3.12\""}, + {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" -tzdata = ">=2022.7" +tzdata = ">=2022.1" [package.extras] -all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] -aws = ["s3fs (>=2022.11.0)"] -clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] -compression = ["zstandard (>=0.19.0)"] -computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] -consortium-standard = ["dataframe-api-compat (>=0.1.7)"] -excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] -feather = ["pyarrow (>=10.0.1)"] -fss = ["fsspec (>=2022.11.0)"] -gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] -hdf5 = ["tables (>=3.8.0)"] -html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] -mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] -output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] -parquet = ["pyarrow (>=10.0.1)"] -performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] -plot = ["matplotlib (>=3.6.3)"] -postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] -pyarrow = ["pyarrow (>=10.0.1)"] -spss = ["pyreadstat (>=1.2.0)"] -sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] -test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] -xml = ["lxml (>=4.9.2)"] +all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"] +aws = ["s3fs (>=2021.08.0)"] +clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"] +compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"] +computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"] +feather = ["pyarrow (>=7.0.0)"] +fss = ["fsspec (>=2021.07.0)"] +gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"] +hdf5 = ["tables (>=3.6.1)"] +html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"] +mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"] +parquet = ["pyarrow (>=7.0.0)"] +performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"] +plot = ["matplotlib (>=3.6.1)"] +postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"] +spss = ["pyreadstat (>=1.1.2)"] +sql-other = ["SQLAlchemy (>=1.4.16)"] +test = ["hypothesis (>=6.34.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.6.3)"] + +[[package]] +name = "patsy" +version = "0.5.6" +description = "A Python package for describing statistical models and for building design matrices." +optional = false +python-versions = "*" +files = [ + {file = "patsy-0.5.6-py2.py3-none-any.whl", hash = "sha256:19056886fd8fa71863fa32f0eb090267f21fb74be00f19f5c70b2e9d76c883c6"}, + {file = "patsy-0.5.6.tar.gz", hash = "sha256:95c6d47a7222535f84bff7f63d7303f2e297747a598db89cf5c67f0c0c7d2cdb"}, +] + +[package.dependencies] +numpy = ">=1.4" +six = "*" + +[package.extras] +test = ["pytest", "pytest-cov", "scipy"] + +[[package]] +name = "pillow" +version = "10.3.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"}, + {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf"}, + {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3"}, + {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5"}, + {file = "pillow-10.3.0-cp310-cp310-win32.whl", hash = "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2"}, + {file = "pillow-10.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f"}, + {file = "pillow-10.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b"}, + {file = "pillow-10.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795"}, + {file = "pillow-10.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd"}, + {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad"}, + {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c"}, + {file = "pillow-10.3.0-cp311-cp311-win32.whl", hash = "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09"}, + {file = "pillow-10.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d"}, + {file = "pillow-10.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f"}, + {file = "pillow-10.3.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84"}, + {file = "pillow-10.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"}, + {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef"}, + {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3"}, + {file = "pillow-10.3.0-cp312-cp312-win32.whl", hash = "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d"}, + {file = "pillow-10.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b"}, + {file = "pillow-10.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a"}, + {file = "pillow-10.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b"}, + {file = "pillow-10.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd"}, + {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d"}, + {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3"}, + {file = "pillow-10.3.0-cp38-cp38-win32.whl", hash = "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b"}, + {file = "pillow-10.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999"}, + {file = "pillow-10.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936"}, + {file = "pillow-10.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8"}, + {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9"}, + {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb"}, + {file = "pillow-10.3.0-cp39-cp39-win32.whl", hash = "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572"}, + {file = "pillow-10.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb"}, + {file = "pillow-10.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591"}, + {file = "pillow-10.3.0.tar.gz", hash = "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] [[package]] name = "platformdirs" version = "4.1.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -719,7 +991,6 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-co name = "plotly" version = "5.20.0" description = "An open-source, interactive data visualization library for Python" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -731,11 +1002,29 @@ files = [ packaging = "*" tenacity = ">=6.2.0" +[[package]] +name = "plotly-express" +version = "0.4.1" +description = "Plotly Express - a high level wrapper for Plotly.py" +optional = false +python-versions = "*" +files = [ + {file = "plotly_express-0.4.1-py2.py3-none-any.whl", hash = "sha256:5f112922b0a6225dc7c010e3b86295a74449e3eac6cac8faa95175e99b7698ce"}, + {file = "plotly_express-0.4.1.tar.gz", hash = "sha256:ff73a41ce02fb43d1d8e8fa131ef3e6589857349ca216b941b8f3f862bce0278"}, +] + +[package.dependencies] +numpy = ">=1.11" +pandas = ">=0.20.0" +patsy = ">=0.5" +plotly = ">=4.1.0" +scipy = ">=0.18" +statsmodels = ">=0.9.0" + [[package]] name = "pluggy" version = "1.4.0" description = "plugin and hook calling mechanisms for python" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -751,7 +1040,6 @@ testing = ["pytest", "pytest-benchmark"] name = "pre-commit" version = "2.21.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -766,11 +1054,168 @@ nodeenv = ">=0.11.1" pyyaml = ">=5.1" virtualenv = ">=20.10.0" +[[package]] +name = "protobuf" +version = "4.25.3" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "protobuf-4.25.3-cp310-abi3-win32.whl", hash = "sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa"}, + {file = "protobuf-4.25.3-cp310-abi3-win_amd64.whl", hash = "sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8"}, + {file = "protobuf-4.25.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c"}, + {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019"}, + {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d"}, + {file = "protobuf-4.25.3-cp38-cp38-win32.whl", hash = "sha256:f4f118245c4a087776e0a8408be33cf09f6c547442c00395fbfb116fac2f8ac2"}, + {file = "protobuf-4.25.3-cp38-cp38-win_amd64.whl", hash = "sha256:c053062984e61144385022e53678fbded7aea14ebb3e0305ae3592fb219ccfa4"}, + {file = "protobuf-4.25.3-cp39-cp39-win32.whl", hash = "sha256:19b270aeaa0099f16d3ca02628546b8baefe2955bbe23224aaf856134eccf1e4"}, + {file = "protobuf-4.25.3-cp39-cp39-win_amd64.whl", hash = "sha256:e3c97a1555fd6388f857770ff8b9703083de6bf1f9274a002a332d65fbb56c8c"}, + {file = "protobuf-4.25.3-py3-none-any.whl", hash = "sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9"}, + {file = "protobuf-4.25.3.tar.gz", hash = "sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c"}, +] + +[[package]] +name = "pyarrow" +version = "15.0.2" +description = "Python library for Apache Arrow" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyarrow-15.0.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:88b340f0a1d05b5ccc3d2d986279045655b1fe8e41aba6ca44ea28da0d1455d8"}, + {file = "pyarrow-15.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eaa8f96cecf32da508e6c7f69bb8401f03745c050c1dd42ec2596f2e98deecac"}, + {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23c6753ed4f6adb8461e7c383e418391b8d8453c5d67e17f416c3a5d5709afbd"}, + {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f639c059035011db8c0497e541a8a45d98a58dbe34dc8fadd0ef128f2cee46e5"}, + {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:290e36a59a0993e9a5224ed2fb3e53375770f07379a0ea03ee2fce2e6d30b423"}, + {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:06c2bb2a98bc792f040bef31ad3e9be6a63d0cb39189227c08a7d955db96816e"}, + {file = "pyarrow-15.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:f7a197f3670606a960ddc12adbe8075cea5f707ad7bf0dffa09637fdbb89f76c"}, + {file = "pyarrow-15.0.2-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:5f8bc839ea36b1f99984c78e06e7a06054693dc2af8920f6fb416b5bca9944e4"}, + {file = "pyarrow-15.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f5e81dfb4e519baa6b4c80410421528c214427e77ca0ea9461eb4097c328fa33"}, + {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a4f240852b302a7af4646c8bfe9950c4691a419847001178662a98915fd7ee7"}, + {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e7d9cfb5a1e648e172428c7a42b744610956f3b70f524aa3a6c02a448ba853e"}, + {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2d4f905209de70c0eb5b2de6763104d5a9a37430f137678edfb9a675bac9cd98"}, + {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:90adb99e8ce5f36fbecbbc422e7dcbcbed07d985eed6062e459e23f9e71fd197"}, + {file = "pyarrow-15.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:b116e7fd7889294cbd24eb90cd9bdd3850be3738d61297855a71ac3b8124ee38"}, + {file = "pyarrow-15.0.2-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:25335e6f1f07fdaa026a61c758ee7d19ce824a866b27bba744348fa73bb5a440"}, + {file = "pyarrow-15.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:90f19e976d9c3d8e73c80be84ddbe2f830b6304e4c576349d9360e335cd627fc"}, + {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a22366249bf5fd40ddacc4f03cd3160f2d7c247692945afb1899bab8a140ddfb"}, + {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2a335198f886b07e4b5ea16d08ee06557e07db54a8400cc0d03c7f6a22f785f"}, + {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e6d459c0c22f0b9c810a3917a1de3ee704b021a5fb8b3bacf968eece6df098f"}, + {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:033b7cad32198754d93465dcfb71d0ba7cb7cd5c9afd7052cab7214676eec38b"}, + {file = "pyarrow-15.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:29850d050379d6e8b5a693098f4de7fd6a2bea4365bfd073d7c57c57b95041ee"}, + {file = "pyarrow-15.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:7167107d7fb6dcadb375b4b691b7e316f4368f39f6f45405a05535d7ad5e5058"}, + {file = "pyarrow-15.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e85241b44cc3d365ef950432a1b3bd44ac54626f37b2e3a0cc89c20e45dfd8bf"}, + {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:248723e4ed3255fcd73edcecc209744d58a9ca852e4cf3d2577811b6d4b59818"}, + {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ff3bdfe6f1b81ca5b73b70a8d482d37a766433823e0c21e22d1d7dde76ca33f"}, + {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:f3d77463dee7e9f284ef42d341689b459a63ff2e75cee2b9302058d0d98fe142"}, + {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:8c1faf2482fb89766e79745670cbca04e7018497d85be9242d5350cba21357e1"}, + {file = "pyarrow-15.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:28f3016958a8e45a1069303a4a4f6a7d4910643fc08adb1e2e4a7ff056272ad3"}, + {file = "pyarrow-15.0.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:89722cb64286ab3d4daf168386f6968c126057b8c7ec3ef96302e81d8cdb8ae4"}, + {file = "pyarrow-15.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cd0ba387705044b3ac77b1b317165c0498299b08261d8122c96051024f953cd5"}, + {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad2459bf1f22b6a5cdcc27ebfd99307d5526b62d217b984b9f5c974651398832"}, + {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58922e4bfece8b02abf7159f1f53a8f4d9f8e08f2d988109126c17c3bb261f22"}, + {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:adccc81d3dc0478ea0b498807b39a8d41628fa9210729b2f718b78cb997c7c91"}, + {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:8bd2baa5fe531571847983f36a30ddbf65261ef23e496862ece83bdceb70420d"}, + {file = "pyarrow-15.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6669799a1d4ca9da9c7e06ef48368320f5856f36f9a4dd31a11839dda3f6cc8c"}, + {file = "pyarrow-15.0.2.tar.gz", hash = "sha256:9c9bc803cb3b7bfacc1e96ffbfd923601065d9d3f911179d81e72d99fd74a3d9"}, +] + +[package.dependencies] +numpy = ">=1.16.6,<2" + +[[package]] +name = "pydeck" +version = "0.8.0" +description = "Widget for deck.gl maps" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydeck-0.8.0-py2.py3-none-any.whl", hash = "sha256:a8fa7757c6f24bba033af39db3147cb020eef44012ba7e60d954de187f9ed4d5"}, + {file = "pydeck-0.8.0.tar.gz", hash = "sha256:07edde833f7cfcef6749124351195aa7dcd24663d4909fd7898dbd0b6fbc01ec"}, +] + +[package.dependencies] +jinja2 = ">=2.10.1" +numpy = ">=1.16.4" + +[package.extras] +carto = ["pydeck-carto"] +jupyter = ["ipykernel (>=5.1.2)", "ipython (>=5.8.0)", "ipywidgets (>=7,<8)", "traitlets (>=4.3.2)"] + +[[package]] +name = "pygments" +version = "2.17.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, + {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, +] + +[package.extras] +plugins = ["importlib-metadata"] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pyjwt" +version = "2.8.0" +description = "JSON Web Token implementation in Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "PyJWT-2.8.0-py3-none-any.whl", hash = "sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320"}, + {file = "PyJWT-2.8.0.tar.gz", hash = "sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de"}, +] + +[package.extras] +crypto = ["cryptography (>=3.4.0)"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] + +[[package]] +name = "pyproj" +version = "3.6.1" +description = "Python interface to PROJ (cartographic projections and coordinate transformations library)" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pyproj-3.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ab7aa4d9ff3c3acf60d4b285ccec134167a948df02347585fdd934ebad8811b4"}, + {file = "pyproj-3.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4bc0472302919e59114aa140fd7213c2370d848a7249d09704f10f5b062031fe"}, + {file = "pyproj-3.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5279586013b8d6582e22b6f9e30c49796966770389a9d5b85e25a4223286cd3f"}, + {file = "pyproj-3.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fafd1f3eb421694857f254a9bdbacd1eb22fc6c24ca74b136679f376f97d35"}, + {file = "pyproj-3.6.1-cp310-cp310-win32.whl", hash = "sha256:c41e80ddee130450dcb8829af7118f1ab69eaf8169c4bf0ee8d52b72f098dc2f"}, + {file = "pyproj-3.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:db3aedd458e7f7f21d8176f0a1d924f1ae06d725228302b872885a1c34f3119e"}, + {file = "pyproj-3.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ebfbdbd0936e178091309f6cd4fcb4decd9eab12aa513cdd9add89efa3ec2882"}, + {file = "pyproj-3.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:447db19c7efad70ff161e5e46a54ab9cc2399acebb656b6ccf63e4bc4a04b97a"}, + {file = "pyproj-3.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7e13c40183884ec7f94eb8e0f622f08f1d5716150b8d7a134de48c6110fee85"}, + {file = "pyproj-3.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65ad699e0c830e2b8565afe42bd58cc972b47d829b2e0e48ad9638386d994915"}, + {file = "pyproj-3.6.1-cp311-cp311-win32.whl", hash = "sha256:8b8acc31fb8702c54625f4d5a2a6543557bec3c28a0ef638778b7ab1d1772132"}, + {file = "pyproj-3.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:38a3361941eb72b82bd9a18f60c78b0df8408416f9340521df442cebfc4306e2"}, + {file = "pyproj-3.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1e9fbaf920f0f9b4ee62aab832be3ae3968f33f24e2e3f7fbb8c6728ef1d9746"}, + {file = "pyproj-3.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6d227a865356f225591b6732430b1d1781e946893789a609bb34f59d09b8b0f8"}, + {file = "pyproj-3.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83039e5ae04e5afc974f7d25ee0870a80a6bd6b7957c3aca5613ccbe0d3e72bf"}, + {file = "pyproj-3.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb059ba3bced6f6725961ba758649261d85ed6ce670d3e3b0a26e81cf1aa8d"}, + {file = "pyproj-3.6.1-cp312-cp312-win32.whl", hash = "sha256:2d6ff73cc6dbbce3766b6c0bce70ce070193105d8de17aa2470009463682a8eb"}, + {file = "pyproj-3.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:7a27151ddad8e1439ba70c9b4b2b617b290c39395fa9ddb7411ebb0eb86d6fb0"}, + {file = "pyproj-3.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4ba1f9b03d04d8cab24d6375609070580a26ce76eaed54631f03bab00a9c737b"}, + {file = "pyproj-3.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18faa54a3ca475bfe6255156f2f2874e9a1c8917b0004eee9f664b86ccc513d3"}, + {file = "pyproj-3.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd43bd9a9b9239805f406fd82ba6b106bf4838d9ef37c167d3ed70383943ade1"}, + {file = "pyproj-3.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50100b2726a3ca946906cbaa789dd0749f213abf0cbb877e6de72ca7aa50e1ae"}, + {file = "pyproj-3.6.1-cp39-cp39-win32.whl", hash = "sha256:9274880263256f6292ff644ca92c46d96aa7e57a75c6df3f11d636ce845a1877"}, + {file = "pyproj-3.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:36b64c2cb6ea1cc091f329c5bd34f9c01bb5da8c8e4492c709bda6a09f96808f"}, + {file = "pyproj-3.6.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd93c1a0c6c4aedc77c0fe275a9f2aba4d59b8acf88cebfc19fe3c430cfabf4f"}, + {file = "pyproj-3.6.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6420ea8e7d2a88cb148b124429fba8cd2e0fae700a2d96eab7083c0928a85110"}, + {file = "pyproj-3.6.1.tar.gz", hash = "sha256:44aa7c704c2b7d8fb3d483bbf75af6cb2350d30a63b144279a09b75fead501bf"}, +] + +[package.dependencies] +certifi = "*" + [[package]] name = "pyproject-api" version = "1.6.1" description = "API to interact with the python pyproject.toml based projects" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -790,7 +1235,6 @@ testing = ["covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytes name = "pytest" version = "7.4.4" description = "pytest: simple powerful testing with Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -813,7 +1257,6 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no name = "python-dateutil" version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ @@ -828,7 +1271,6 @@ six = ">=1.5" name = "pytz" version = "2024.1" description = "World timezone definitions, modern and historical" -category = "main" optional = false python-versions = "*" files = [ @@ -840,7 +1282,6 @@ files = [ name = "pyyaml" version = "6.0.1" description = "YAML parser and emitter for Python" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -897,11 +1338,25 @@ files = [ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] +[[package]] +name = "referencing" +version = "0.34.0" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.34.0-py3-none-any.whl", hash = "sha256:d53ae300ceddd3169f1ffa9caf2cb7b769e92657e4fafb23d34b93679116dfd4"}, + {file = "referencing-0.34.0.tar.gz", hash = "sha256:5773bd84ef41799a5a8ca72dc34590c041eb01bf9aa02632b4a973fb0181a844"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + [[package]] name = "requests" version = "2.31.0" description = "Python HTTP for Humans." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -920,25 +1375,177 @@ socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] -name = "retrying" -version = "1.3.4" -description = "Retrying" -category = "main" +name = "rich" +version = "13.7.1" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false -python-versions = "*" +python-versions = ">=3.7.0" files = [ - {file = "retrying-1.3.4-py3-none-any.whl", hash = "sha256:8cc4d43cb8e1125e0ff3344e9de678fefd85db3b750b81b2240dc0183af37b35"}, - {file = "retrying-1.3.4.tar.gz", hash = "sha256:345da8c5765bd982b1d1915deb9102fd3d1f7ad16bd84a9700b85f64d24e8f3e"}, + {file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"}, + {file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"}, ] [package.dependencies] -six = ">=1.7.0" +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "rpds-py" +version = "0.18.0" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.18.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:5b4e7d8d6c9b2e8ee2d55c90b59c707ca59bc30058269b3db7b1f8df5763557e"}, + {file = "rpds_py-0.18.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c463ed05f9dfb9baebef68048aed8dcdc94411e4bf3d33a39ba97e271624f8f7"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01e36a39af54a30f28b73096dd39b6802eddd04c90dbe161c1b8dbe22353189f"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d62dec4976954a23d7f91f2f4530852b0c7608116c257833922a896101336c51"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd18772815d5f008fa03d2b9a681ae38d5ae9f0e599f7dda233c439fcaa00d40"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:923d39efa3cfb7279a0327e337a7958bff00cc447fd07a25cddb0a1cc9a6d2da"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39514da80f971362f9267c600b6d459bfbbc549cffc2cef8e47474fddc9b45b1"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a34d557a42aa28bd5c48a023c570219ba2593bcbbb8dc1b98d8cf5d529ab1434"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:93df1de2f7f7239dc9cc5a4a12408ee1598725036bd2dedadc14d94525192fc3"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:34b18ba135c687f4dac449aa5157d36e2cbb7c03cbea4ddbd88604e076aa836e"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c0b5dcf9193625afd8ecc92312d6ed78781c46ecbf39af9ad4681fc9f464af88"}, + {file = "rpds_py-0.18.0-cp310-none-win32.whl", hash = "sha256:c4325ff0442a12113a6379af66978c3fe562f846763287ef66bdc1d57925d337"}, + {file = "rpds_py-0.18.0-cp310-none-win_amd64.whl", hash = "sha256:7223a2a5fe0d217e60a60cdae28d6949140dde9c3bcc714063c5b463065e3d66"}, + {file = "rpds_py-0.18.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3a96e0c6a41dcdba3a0a581bbf6c44bb863f27c541547fb4b9711fd8cf0ffad4"}, + {file = "rpds_py-0.18.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30f43887bbae0d49113cbaab729a112251a940e9b274536613097ab8b4899cf6"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fcb25daa9219b4cf3a0ab24b0eb9a5cc8949ed4dc72acb8fa16b7e1681aa3c58"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d68c93e381010662ab873fea609bf6c0f428b6d0bb00f2c6939782e0818d37bf"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b34b7aa8b261c1dbf7720b5d6f01f38243e9b9daf7e6b8bc1fd4657000062f2c"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e6d75ab12b0bbab7215e5d40f1e5b738aa539598db27ef83b2ec46747df90e1"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b8612cd233543a3781bc659c731b9d607de65890085098986dfd573fc2befe5"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aec493917dd45e3c69d00a8874e7cbed844efd935595ef78a0f25f14312e33c6"}, + {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:661d25cbffaf8cc42e971dd570d87cb29a665f49f4abe1f9e76be9a5182c4688"}, + {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1df3659d26f539ac74fb3b0c481cdf9d725386e3552c6fa2974f4d33d78e544b"}, + {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a1ce3ba137ed54f83e56fb983a5859a27d43a40188ba798993812fed73c70836"}, + {file = "rpds_py-0.18.0-cp311-none-win32.whl", hash = "sha256:69e64831e22a6b377772e7fb337533c365085b31619005802a79242fee620bc1"}, + {file = "rpds_py-0.18.0-cp311-none-win_amd64.whl", hash = "sha256:998e33ad22dc7ec7e030b3df701c43630b5bc0d8fbc2267653577e3fec279afa"}, + {file = "rpds_py-0.18.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7f2facbd386dd60cbbf1a794181e6aa0bd429bd78bfdf775436020172e2a23f0"}, + {file = "rpds_py-0.18.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1d9a5be316c15ffb2b3c405c4ff14448c36b4435be062a7f578ccd8b01f0c4d8"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd5bf1af8efe569654bbef5a3e0a56eca45f87cfcffab31dd8dde70da5982475"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5417558f6887e9b6b65b4527232553c139b57ec42c64570569b155262ac0754f"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:56a737287efecafc16f6d067c2ea0117abadcd078d58721f967952db329a3e5c"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8f03bccbd8586e9dd37219bce4d4e0d3ab492e6b3b533e973fa08a112cb2ffc9"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4457a94da0d5c53dc4b3e4de1158bdab077db23c53232f37a3cb7afdb053a4e3"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0ab39c1ba9023914297dd88ec3b3b3c3f33671baeb6acf82ad7ce883f6e8e157"}, + {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9d54553c1136b50fd12cc17e5b11ad07374c316df307e4cfd6441bea5fb68496"}, + {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0af039631b6de0397ab2ba16eaf2872e9f8fca391b44d3d8cac317860a700a3f"}, + {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:84ffab12db93b5f6bad84c712c92060a2d321b35c3c9960b43d08d0f639d60d7"}, + {file = "rpds_py-0.18.0-cp312-none-win32.whl", hash = "sha256:685537e07897f173abcf67258bee3c05c374fa6fff89d4c7e42fb391b0605e98"}, + {file = "rpds_py-0.18.0-cp312-none-win_amd64.whl", hash = "sha256:e003b002ec72c8d5a3e3da2989c7d6065b47d9eaa70cd8808b5384fbb970f4ec"}, + {file = "rpds_py-0.18.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:08f9ad53c3f31dfb4baa00da22f1e862900f45908383c062c27628754af2e88e"}, + {file = "rpds_py-0.18.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0013fe6b46aa496a6749c77e00a3eb07952832ad6166bd481c74bda0dcb6d58"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e32a92116d4f2a80b629778280103d2a510a5b3f6314ceccd6e38006b5e92dcb"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e541ec6f2ec456934fd279a3120f856cd0aedd209fc3852eca563f81738f6861"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bed88b9a458e354014d662d47e7a5baafd7ff81c780fd91584a10d6ec842cb73"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2644e47de560eb7bd55c20fc59f6daa04682655c58d08185a9b95c1970fa1e07"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e8916ae4c720529e18afa0b879473049e95949bf97042e938530e072fde061d"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:465a3eb5659338cf2a9243e50ad9b2296fa15061736d6e26240e713522b6235c"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ea7d4a99f3b38c37eac212dbd6ec42b7a5ec51e2c74b5d3223e43c811609e65f"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:67071a6171e92b6da534b8ae326505f7c18022c6f19072a81dcf40db2638767c"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:41ef53e7c58aa4ef281da975f62c258950f54b76ec8e45941e93a3d1d8580594"}, + {file = "rpds_py-0.18.0-cp38-none-win32.whl", hash = "sha256:fdea4952db2793c4ad0bdccd27c1d8fdd1423a92f04598bc39425bcc2b8ee46e"}, + {file = "rpds_py-0.18.0-cp38-none-win_amd64.whl", hash = "sha256:7cd863afe7336c62ec78d7d1349a2f34c007a3cc6c2369d667c65aeec412a5b1"}, + {file = "rpds_py-0.18.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5307def11a35f5ae4581a0b658b0af8178c65c530e94893345bebf41cc139d33"}, + {file = "rpds_py-0.18.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f195baa60a54ef9d2de16fbbfd3ff8b04edc0c0140a761b56c267ac11aa467"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39f5441553f1c2aed4de4377178ad8ff8f9d733723d6c66d983d75341de265ab"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a00312dea9310d4cb7dbd7787e722d2e86a95c2db92fbd7d0155f97127bcb40"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f2fc11e8fe034ee3c34d316d0ad8808f45bc3b9ce5857ff29d513f3ff2923a1"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:586f8204935b9ec884500498ccc91aa869fc652c40c093bd9e1471fbcc25c022"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddc2f4dfd396c7bfa18e6ce371cba60e4cf9d2e5cdb71376aa2da264605b60b9"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ddcba87675b6d509139d1b521e0c8250e967e63b5909a7e8f8944d0f90ff36f"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7bd339195d84439cbe5771546fe8a4e8a7a045417d8f9de9a368c434e42a721e"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:d7c36232a90d4755b720fbd76739d8891732b18cf240a9c645d75f00639a9024"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6b0817e34942b2ca527b0e9298373e7cc75f429e8da2055607f4931fded23e20"}, + {file = "rpds_py-0.18.0-cp39-none-win32.whl", hash = "sha256:99f70b740dc04d09e6b2699b675874367885217a2e9f782bdf5395632ac663b7"}, + {file = "rpds_py-0.18.0-cp39-none-win_amd64.whl", hash = "sha256:6ef687afab047554a2d366e112dd187b62d261d49eb79b77e386f94644363294"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ad36cfb355e24f1bd37cac88c112cd7730873f20fb0bdaf8ba59eedf8216079f"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:36b3ee798c58ace201289024b52788161e1ea133e4ac93fba7d49da5fec0ef9e"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8a2f084546cc59ea99fda8e070be2fd140c3092dc11524a71aa8f0f3d5a55ca"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e4461d0f003a0aa9be2bdd1b798a041f177189c1a0f7619fe8c95ad08d9a45d7"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8db715ebe3bb7d86d77ac1826f7d67ec11a70dbd2376b7cc214199360517b641"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:793968759cd0d96cac1e367afd70c235867831983f876a53389ad869b043c948"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66e6a3af5a75363d2c9a48b07cb27c4ea542938b1a2e93b15a503cdfa8490795"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6ef0befbb5d79cf32d0266f5cff01545602344eda89480e1dd88aca964260b18"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d4acf42190d449d5e89654d5c1ed3a4f17925eec71f05e2a41414689cda02d1"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:a5f446dd5055667aabaee78487f2b5ab72e244f9bc0b2ffebfeec79051679984"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9dbbeb27f4e70bfd9eec1be5477517365afe05a9b2c441a0b21929ee61048124"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:22806714311a69fd0af9b35b7be97c18a0fc2826e6827dbb3a8c94eac6cf7eeb"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b34ae4636dfc4e76a438ab826a0d1eed2589ca7d9a1b2d5bb546978ac6485461"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c8370641f1a7f0e0669ddccca22f1da893cef7628396431eb445d46d893e5cd"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c8362467a0fdeccd47935f22c256bec5e6abe543bf0d66e3d3d57a8fb5731863"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11a8c85ef4a07a7638180bf04fe189d12757c696eb41f310d2426895356dcf05"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b316144e85316da2723f9d8dc75bada12fa58489a527091fa1d5a612643d1a0e"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf1ea2e34868f6fbf070e1af291c8180480310173de0b0c43fc38a02929fc0e3"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e546e768d08ad55b20b11dbb78a745151acbd938f8f00d0cfbabe8b0199b9880"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4901165d170a5fde6f589acb90a6b33629ad1ec976d4529e769c6f3d885e3e80"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:618a3d6cae6ef8ec88bb76dd80b83cfe415ad4f1d942ca2a903bf6b6ff97a2da"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ed4eb745efbff0a8e9587d22a84be94a5eb7d2d99c02dacf7bd0911713ed14dd"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c81e5f372cd0dc5dc4809553d34f832f60a46034a5f187756d9b90586c2c307"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:43fbac5f22e25bee1d482c97474f930a353542855f05c1161fd804c9dc74a09d"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d7faa6f14017c0b1e69f5e2c357b998731ea75a442ab3841c0dbbbfe902d2c4"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:08231ac30a842bd04daabc4d71fddd7e6d26189406d5a69535638e4dcb88fe76"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:044a3e61a7c2dafacae99d1e722cc2d4c05280790ec5a05031b3876809d89a5c"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f26b5bd1079acdb0c7a5645e350fe54d16b17bfc5e71f371c449383d3342e17"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:482103aed1dfe2f3b71a58eff35ba105289b8d862551ea576bd15479aba01f66"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1374f4129f9bcca53a1bba0bb86bf78325a0374577cf7e9e4cd046b1e6f20e24"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:635dc434ff724b178cb192c70016cc0ad25a275228f749ee0daf0eddbc8183b1"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:bc362ee4e314870a70f4ae88772d72d877246537d9f8cb8f7eacf10884862432"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:4832d7d380477521a8c1644bbab6588dfedea5e30a7d967b5fb75977c45fd77f"}, + {file = "rpds_py-0.18.0.tar.gz", hash = "sha256:42821446ee7a76f5d9f71f9e33a4fb2ffd724bb3e7f93386150b61a43115788d"}, +] + +[[package]] +name = "scipy" +version = "1.13.0" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "scipy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ba419578ab343a4e0a77c0ef82f088238a93eef141b2b8017e46149776dfad4d"}, + {file = "scipy-1.13.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:22789b56a999265431c417d462e5b7f2b487e831ca7bef5edeb56efe4c93f86e"}, + {file = "scipy-1.13.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05f1432ba070e90d42d7fd836462c50bf98bd08bed0aa616c359eed8a04e3922"}, + {file = "scipy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8434f6f3fa49f631fae84afee424e2483289dfc30a47755b4b4e6b07b2633a4"}, + {file = "scipy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:dcbb9ea49b0167de4167c40eeee6e167caeef11effb0670b554d10b1e693a8b9"}, + {file = "scipy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:1d2f7bb14c178f8b13ebae93f67e42b0a6b0fc50eba1cd8021c9b6e08e8fb1cd"}, + {file = "scipy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fbcf8abaf5aa2dc8d6400566c1a727aed338b5fe880cde64907596a89d576fa"}, + {file = "scipy-1.13.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5e4a756355522eb60fcd61f8372ac2549073c8788f6114449b37e9e8104f15a5"}, + {file = "scipy-1.13.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5acd8e1dbd8dbe38d0004b1497019b2dbbc3d70691e65d69615f8a7292865d7"}, + {file = "scipy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ff7dad5d24a8045d836671e082a490848e8639cabb3dbdacb29f943a678683d"}, + {file = "scipy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4dca18c3ffee287ddd3bc8f1dabaf45f5305c5afc9f8ab9cbfab855e70b2df5c"}, + {file = "scipy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:a2f471de4d01200718b2b8927f7d76b5d9bde18047ea0fa8bd15c5ba3f26a1d6"}, + {file = "scipy-1.13.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d0de696f589681c2802f9090fff730c218f7c51ff49bf252b6a97ec4a5d19e8b"}, + {file = "scipy-1.13.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:b2a3ff461ec4756b7e8e42e1c681077349a038f0686132d623fa404c0bee2551"}, + {file = "scipy-1.13.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bf9fe63e7a4bf01d3645b13ff2aa6dea023d38993f42aaac81a18b1bda7a82a"}, + {file = "scipy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e7626dfd91cdea5714f343ce1176b6c4745155d234f1033584154f60ef1ff42"}, + {file = "scipy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:109d391d720fcebf2fbe008621952b08e52907cf4c8c7efc7376822151820820"}, + {file = "scipy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:8930ae3ea371d6b91c203b1032b9600d69c568e537b7988a3073dfe4d4774f21"}, + {file = "scipy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5407708195cb38d70fd2d6bb04b1b9dd5c92297d86e9f9daae1576bd9e06f602"}, + {file = "scipy-1.13.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:ac38c4c92951ac0f729c4c48c9e13eb3675d9986cc0c83943784d7390d540c78"}, + {file = "scipy-1.13.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09c74543c4fbeb67af6ce457f6a6a28e5d3739a87f62412e4a16e46f164f0ae5"}, + {file = "scipy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28e286bf9ac422d6beb559bc61312c348ca9b0f0dae0d7c5afde7f722d6ea13d"}, + {file = "scipy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:33fde20efc380bd23a78a4d26d59fc8704e9b5fd9b08841693eb46716ba13d86"}, + {file = "scipy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:45c08bec71d3546d606989ba6e7daa6f0992918171e2a6f7fbedfa7361c2de1e"}, + {file = "scipy-1.13.0.tar.gz", hash = "sha256:58569af537ea29d3f78e5abd18398459f195546bb3be23d16677fb26616cc11e"}, +] + +[package.dependencies] +numpy = ">=1.22.4,<2.3" + +[package.extras] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] +doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.12.0)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0)", "sphinx-design (>=0.4.0)"] +test = ["array-api-strict", "asv", "gmpy2", "hypothesis (>=6.30)", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] [[package]] name = "setuptools" version = "69.0.3" description = "Easily download, build, install, upgrade, and uninstall Python packages" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -951,11 +1558,67 @@ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] +[[package]] +name = "shapely" +version = "2.0.3" +description = "Manipulation and analysis of geometric objects" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shapely-2.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:af7e9abe180b189431b0f490638281b43b84a33a960620e6b2e8d3e3458b61a1"}, + {file = "shapely-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:98040462b36ced9671e266b95c326b97f41290d9d17504a1ee4dc313a7667b9c"}, + {file = "shapely-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:71eb736ef2843f23473c6e37f6180f90f0a35d740ab284321548edf4e55d9a52"}, + {file = "shapely-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:881eb9dbbb4a6419667e91fcb20313bfc1e67f53dbb392c6840ff04793571ed1"}, + {file = "shapely-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f10d2ccf0554fc0e39fad5886c839e47e207f99fdf09547bc687a2330efda35b"}, + {file = "shapely-2.0.3-cp310-cp310-win32.whl", hash = "sha256:6dfdc077a6fcaf74d3eab23a1ace5abc50c8bce56ac7747d25eab582c5a2990e"}, + {file = "shapely-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:64c5013dacd2d81b3bb12672098a0b2795c1bf8190cfc2980e380f5ef9d9e4d9"}, + {file = "shapely-2.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:56cee3e4e8159d6f2ce32e421445b8e23154fd02a0ac271d6a6c0b266a8e3cce"}, + {file = "shapely-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:619232c8276fded09527d2a9fd91a7885ff95c0ff9ecd5e3cb1e34fbb676e2ae"}, + {file = "shapely-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2a7d256db6f5b4b407dc0c98dd1b2fcf1c9c5814af9416e5498d0a2e4307a4b"}, + {file = "shapely-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45f0c8cd4583647db3216d965d49363e6548c300c23fd7e57ce17a03f824034"}, + {file = "shapely-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13cb37d3826972a82748a450328fe02a931dcaed10e69a4d83cc20ba021bc85f"}, + {file = "shapely-2.0.3-cp311-cp311-win32.whl", hash = "sha256:9302d7011e3e376d25acd30d2d9e70d315d93f03cc748784af19b00988fc30b1"}, + {file = "shapely-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6b464f2666b13902835f201f50e835f2f153f37741db88f68c7f3b932d3505fa"}, + {file = "shapely-2.0.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e86e7cb8e331a4850e0c2a8b2d66dc08d7a7b301b8d1d34a13060e3a5b4b3b55"}, + {file = "shapely-2.0.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c91981c99ade980fc49e41a544629751a0ccd769f39794ae913e53b07b2f78b9"}, + {file = "shapely-2.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bd45d456983dc60a42c4db437496d3f08a4201fbf662b69779f535eb969660af"}, + {file = "shapely-2.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:882fb1ffc7577e88c1194f4f1757e277dc484ba096a3b94844319873d14b0f2d"}, + {file = "shapely-2.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9f2d93bff2ea52fa93245798cddb479766a18510ea9b93a4fb9755c79474889"}, + {file = "shapely-2.0.3-cp312-cp312-win32.whl", hash = "sha256:99abad1fd1303b35d991703432c9481e3242b7b3a393c186cfb02373bf604004"}, + {file = "shapely-2.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:6f555fe3304a1f40398977789bc4fe3c28a11173196df9ece1e15c5bc75a48db"}, + {file = "shapely-2.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a983cc418c1fa160b7d797cfef0e0c9f8c6d5871e83eae2c5793fce6a837fad9"}, + {file = "shapely-2.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18bddb8c327f392189a8d5d6b9a858945722d0bb95ccbd6a077b8e8fc4c7890d"}, + {file = "shapely-2.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:442f4dcf1eb58c5a4e3428d88e988ae153f97ab69a9f24e07bf4af8038536325"}, + {file = "shapely-2.0.3-cp37-cp37m-win32.whl", hash = "sha256:31a40b6e3ab00a4fd3a1d44efb2482278642572b8e0451abdc8e0634b787173e"}, + {file = "shapely-2.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:59b16976c2473fec85ce65cc9239bef97d4205ab3acead4e6cdcc72aee535679"}, + {file = "shapely-2.0.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:705efbce1950a31a55b1daa9c6ae1c34f1296de71ca8427974ec2f27d57554e3"}, + {file = "shapely-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:601c5c0058a6192df704cb889439f64994708563f57f99574798721e9777a44b"}, + {file = "shapely-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f24ecbb90a45c962b3b60d8d9a387272ed50dc010bfe605f1d16dfc94772d8a1"}, + {file = "shapely-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8c2a2989222c6062f7a0656e16276c01bb308bc7e5d999e54bf4e294ce62e76"}, + {file = "shapely-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42bceb9bceb3710a774ce04908fda0f28b291323da2688f928b3f213373b5aee"}, + {file = "shapely-2.0.3-cp38-cp38-win32.whl", hash = "sha256:54d925c9a311e4d109ec25f6a54a8bd92cc03481a34ae1a6a92c1fe6729b7e01"}, + {file = "shapely-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:300d203b480a4589adefff4c4af0b13919cd6d760ba3cbb1e56275210f96f654"}, + {file = "shapely-2.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:083d026e97b6c1f4a9bd2a9171c7692461092ed5375218170d91705550eecfd5"}, + {file = "shapely-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:27b6e1910094d93e9627f2664121e0e35613262fc037051680a08270f6058daf"}, + {file = "shapely-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:71b2de56a9e8c0e5920ae5ddb23b923490557ac50cb0b7fa752761bf4851acde"}, + {file = "shapely-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d279e56bbb68d218d63f3efc80c819cedcceef0e64efbf058a1df89dc57201b"}, + {file = "shapely-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88566d01a30f0453f7d038db46bc83ce125e38e47c5f6bfd4c9c287010e9bf74"}, + {file = "shapely-2.0.3-cp39-cp39-win32.whl", hash = "sha256:58afbba12c42c6ed44c4270bc0e22f3dadff5656d711b0ad335c315e02d04707"}, + {file = "shapely-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:5026b30433a70911979d390009261b8c4021ff87c7c3cbd825e62bb2ffa181bc"}, + {file = "shapely-2.0.3.tar.gz", hash = "sha256:4d65d0aa7910af71efa72fd6447e02a8e5dd44da81a983de9d736d6e6ccbe674"}, +] + +[package.dependencies] +numpy = ">=1.14,<2" + +[package.extras] +docs = ["matplotlib", "numpydoc (==1.1.*)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] +test = ["pytest", "pytest-cov"] + [[package]] name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -963,11 +1626,141 @@ files = [ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] +[[package]] +name = "smmap" +version = "5.0.1" +description = "A pure Python implementation of a sliding window memory map manager" +optional = false +python-versions = ">=3.7" +files = [ + {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, + {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, +] + +[[package]] +name = "st-pages" +version = "0.4.5" +description = "An experimental version of Streamlit Multi-Page Apps" +optional = false +python-versions = ">=3.8, !=2.7.*, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*, !=3.7.*" +files = [ + {file = "st_pages-0.4.5-py3-none-any.whl", hash = "sha256:7cc0c9137bc2a3aba2c7918f76a9b220673b4344762544ebf5c5e56d16a6f360"}, + {file = "st_pages-0.4.5.tar.gz", hash = "sha256:0b95b2ae53e91f9922f2f254b356e1063981b5fcc89a48c4b89011806ccda465"}, +] + +[package.dependencies] +streamlit = ">=1.10.0" + +[[package]] +name = "statsmodels" +version = "0.14.1" +description = "Statistical computations and models for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "statsmodels-0.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:43af9c0b07c9d72f275cf14ea54a481a3f20911f0b443181be4769def258fdeb"}, + {file = "statsmodels-0.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a16975ab6ad505d837ba9aee11f92a8c5b49c4fa1ff45b60fe23780b19e5705e"}, + {file = "statsmodels-0.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e278fe74da5ed5e06c11a30851eda1af08ef5af6be8507c2c45d2e08f7550dde"}, + {file = "statsmodels-0.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0564d92cb05b219b4538ed09e77d96658a924a691255e1f7dd23ee338df441b"}, + {file = "statsmodels-0.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5385e22e72159a09c099c4fb975f350a9f3afeb57c1efce273b89dcf1fe44c0f"}, + {file = "statsmodels-0.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:0a8aae75a2e08ebd990e5fa394f8e32738b55785cb70798449a3f4207085e667"}, + {file = "statsmodels-0.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b69a63ad6c979a6e4cde11870ffa727c76a318c225a7e509f031fbbdfb4e416a"}, + {file = "statsmodels-0.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7562cb18a90a114f39fab6f1c25b9c7b39d9cd5f433d0044b430ca9d44a8b52c"}, + {file = "statsmodels-0.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3abaca4b963259a2bf349c7609cfbb0ce64ad5fb3d92d6f08e21453e4890248"}, + {file = "statsmodels-0.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0f727fe697f6406d5f677b67211abe5a55101896abdfacdb3f38410405f6ad8"}, + {file = "statsmodels-0.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b6838ac6bdb286daabb5e91af90fd4258f09d0cec9aace78cc441cb2b17df428"}, + {file = "statsmodels-0.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:709bfcef2dbe66f705b17e56d1021abad02243ee1a5d1efdb90f9bad8b06a329"}, + {file = "statsmodels-0.14.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f32a7cd424cf33304a54daee39d32cccf1d0265e652c920adeaeedff6d576457"}, + {file = "statsmodels-0.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f8c30181c084173d662aaf0531867667be2ff1bee103b84feb64f149f792dbd2"}, + {file = "statsmodels-0.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de2b97413913d52ad6342dece2d653e77f78620013b7705fad291d4e4266ccb"}, + {file = "statsmodels-0.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3420f88289c593ba2bca33619023059c476674c160733bd7d858564787c83d3"}, + {file = "statsmodels-0.14.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c008e16096f24f0514e53907890ccac6589a16ad6c81c218f2ee6752fdada555"}, + {file = "statsmodels-0.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:bc0351d279c4e080f0ce638a3d886d312aa29eade96042e3ba0a73771b1abdfb"}, + {file = "statsmodels-0.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf293ada63b2859d95210165ad1dfcd97bd7b994a5266d6fbeb23659d8f0bf68"}, + {file = "statsmodels-0.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44ca8cb88fa3d3a4ffaff1fb8eb0e98bbf83fc936fcd9b9eedee258ecc76696a"}, + {file = "statsmodels-0.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d5373d176239993c095b00d06036690a50309a4e00c2da553b65b840f956ae6"}, + {file = "statsmodels-0.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a532dfe899f8b6632cd8caa0b089b403415618f51e840d1817a1e4b97e200c73"}, + {file = "statsmodels-0.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:4fe0a60695952b82139ae8750952786a700292f9e0551d572d7685070944487b"}, + {file = "statsmodels-0.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:04293890f153ffe577e60a227bd43babd5f6c1fc50ea56a3ab1862ae85247a95"}, + {file = "statsmodels-0.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e70a2e93d54d40b2cb6426072acbc04f35501b1ea2569f6786964adde6ca572"}, + {file = "statsmodels-0.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab3a73d16c0569adbba181ebb967e5baaa74935f6d2efe86ac6fc5857449b07d"}, + {file = "statsmodels-0.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eefa5bcff335440ee93e28745eab63559a20cd34eea0375c66d96b016de909b3"}, + {file = "statsmodels-0.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:bc43765710099ca6a942b5ffa1bac7668965052542ba793dd072d26c83453572"}, + {file = "statsmodels-0.14.1.tar.gz", hash = "sha256:2260efdc1ef89f39c670a0bd8151b1d0843567781bcafec6cda0534eb47a94f6"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.18,<2", markers = "python_version != \"3.10\" or platform_system != \"Windows\" or platform_python_implementation == \"PyPy\""}, + {version = ">=1.22.3,<2", markers = "python_version == \"3.10\" and platform_system == \"Windows\" and platform_python_implementation != \"PyPy\""}, +] +packaging = ">=21.3" +pandas = ">=1.0,<2.1.0 || >2.1.0" +patsy = ">=0.5.4" +scipy = ">=1.4,<1.9.2 || >1.9.2" + +[package.extras] +build = ["cython (>=0.29.33)"] +develop = ["colorama", "cython (>=0.29.33)", "cython (>=0.29.33,<4.0.0)", "flake8", "isort", "joblib", "matplotlib (>=3)", "oldest-supported-numpy (>=2022.4.18)", "pytest (>=7.3.0)", "pytest-cov", "pytest-randomly", "pytest-xdist", "pywinpty", "setuptools-scm[toml] (>=8.0,<9.0)"] +docs = ["ipykernel", "jupyter-client", "matplotlib", "nbconvert", "nbformat", "numpydoc", "pandas-datareader", "sphinx"] + +[[package]] +name = "streamlit" +version = "1.32.2" +description = "A faster way to build and share data apps" +optional = false +python-versions = ">=3.8, !=3.9.7" +files = [ + {file = "streamlit-1.32.2-py2.py3-none-any.whl", hash = "sha256:a0b8044e76fec364b07be145f8b40dbd8d083e20ebbb189ceb1fa9423f3dedea"}, + {file = "streamlit-1.32.2.tar.gz", hash = "sha256:1258b9cbc3ff957bf7d09b1bfc85cedc308f1065b30748545295a9af8d5577ab"}, +] + +[package.dependencies] +altair = ">=4.0,<6" +blinker = ">=1.0.0,<2" +cachetools = ">=4.0,<6" +click = ">=7.0,<9" +gitpython = ">=3.0.7,<3.1.19 || >3.1.19,<4" +numpy = ">=1.19.3,<2" +packaging = ">=16.8,<24" +pandas = ">=1.3.0,<3" +pillow = ">=7.1.0,<11" +protobuf = ">=3.20,<5" +pyarrow = ">=7.0" +pydeck = ">=0.8.0b4,<1" +requests = ">=2.27,<3" +rich = ">=10.14.0,<14" +tenacity = ">=8.1.0,<9" +toml = ">=0.10.1,<2" +tornado = ">=6.0.3,<7" +typing-extensions = ">=4.3.0,<5" +watchdog = {version = ">=2.1.5", markers = "platform_system != \"Darwin\""} + +[package.extras] +snowflake = ["snowflake-connector-python (>=2.8.0)", "snowflake-snowpark-python (>=0.9.0)"] + +[[package]] +name = "streamlit-authenticator" +version = "0.3.2" +description = "A secure authentication module to validate user credentials in a Streamlit application." +optional = false +python-versions = ">=3.6" +files = [ + {file = "streamlit-authenticator-0.3.2.tar.gz", hash = "sha256:f17a77d0394a45d6554a72d890cc270d9be5328eeb12958898bf0183e7321ed6"}, + {file = "streamlit_authenticator-0.3.2-py3-none-any.whl", hash = "sha256:0620768d01aa6c7bff4200f062effff333a8e0cfde9b5300a67ba878c51b0adc"}, +] + +[package.dependencies] +bcrypt = ">=3.1.7" +extra-streamlit-components = ">=0.1.70" +PyJWT = ">=2.3.0" +PyYAML = ">=5.3.1" +streamlit = ">=1.25.0" + [[package]] name = "tenacity" version = "8.2.3" description = "Retry code until it succeeds" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -978,11 +1771,21 @@ files = [ [package.extras] doc = ["reno", "sphinx", "tornado (>=4.5)"] +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + [[package]] name = "tomli" version = "2.0.1" description = "A lil' TOML parser" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -990,11 +1793,41 @@ files = [ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] +[[package]] +name = "toolz" +version = "0.12.1" +description = "List processing tools and functional utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "toolz-0.12.1-py3-none-any.whl", hash = "sha256:d22731364c07d72eea0a0ad45bafb2c2937ab6fd38a3507bf55eae8744aa7d85"}, + {file = "toolz-0.12.1.tar.gz", hash = "sha256:ecca342664893f177a13dac0e6b41cbd8ac25a358e5f215316d43e2100224f4d"}, +] + +[[package]] +name = "tornado" +version = "6.4" +description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +optional = false +python-versions = ">= 3.8" +files = [ + {file = "tornado-6.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0"}, + {file = "tornado-6.4-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263"}, + {file = "tornado-6.4-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e"}, + {file = "tornado-6.4-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579"}, + {file = "tornado-6.4-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212"}, + {file = "tornado-6.4-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2"}, + {file = "tornado-6.4-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78"}, + {file = "tornado-6.4-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f"}, + {file = "tornado-6.4-cp38-abi3-win32.whl", hash = "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052"}, + {file = "tornado-6.4-cp38-abi3-win_amd64.whl", hash = "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63"}, + {file = "tornado-6.4.tar.gz", hash = "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee"}, +] + [[package]] name = "tox" version = "4.12.1" description = "tox is a generic virtualenv management and test command line tool" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1022,7 +1855,6 @@ testing = ["build[virtualenv] (>=1.0.3)", "covdefaults (>=2.3)", "detect-test-po name = "typing-extensions" version = "4.10.0" description = "Backported and Experimental Type Hints for Python 3.8+" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1034,7 +1866,6 @@ files = [ name = "tzdata" version = "2024.1" description = "Provider of IANA time zone data" -category = "main" optional = false python-versions = ">=2" files = [ @@ -1046,7 +1877,6 @@ files = [ name = "urllib3" version = "2.2.1" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1064,7 +1894,6 @@ zstd = ["zstandard (>=0.18.0)"] name = "virtualenv" version = "20.25.0" description = "Virtual Python Environment builder" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1082,40 +1911,58 @@ docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx- test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] [[package]] -name = "werkzeug" -version = "3.0.1" -description = "The comprehensive WSGI web application library." -category = "main" +name = "watchdog" +version = "4.0.0" +description = "Filesystem events monitoring" optional = false python-versions = ">=3.8" files = [ - {file = "werkzeug-3.0.1-py3-none-any.whl", hash = "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10"}, - {file = "werkzeug-3.0.1.tar.gz", hash = "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, + {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, + {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, + {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, + {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, + {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, + {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, + {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, ] -[package.dependencies] -MarkupSafe = ">=2.1.1" - [package.extras] -watchdog = ["watchdog (>=2.3)"] +watchmedo = ["PyYAML (>=3.10)"] [[package]] -name = "zipp" -version = "3.18.1" -description = "Backport of pathlib-compatible object wrapper for zip files" -category = "main" +name = "xyzservices" +version = "2024.4.0" +description = "Source of XYZ tiles providers" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.18.1-py3-none-any.whl", hash = "sha256:206f5a15f2af3dbaee80769fb7dc6f249695e940acca08dfb2a4769fe61e538b"}, - {file = "zipp-3.18.1.tar.gz", hash = "sha256:2884ed22e7d8961de1c9a05142eb69a247f120291bc0206a00a7642f09b5b715"}, + {file = "xyzservices-2024.4.0-py3-none-any.whl", hash = "sha256:b83e48c5b776c9969fffcfff57b03d02b1b1cd6607a9d9c4e7f568b01ef47f4c"}, + {file = "xyzservices-2024.4.0.tar.gz", hash = "sha256:6a04f11487a6fb77d92a98984cd107fbd9157fd5e65f929add9c3d6e604ee88c"}, ] -[package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] - [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "a604d3b769ffc5079bf789d1557a112f77a5fbf91071732ab27de41caf356da8" +content-hash = "48851ceb7f8426b10ba5d87ce97d4cb7ee8cf455c61de64b1db4534b48435a2f" diff --git a/pyproject.toml b/pyproject.toml index 3476549..30f37cb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,13 +17,13 @@ python = "^3.10" # jupyter = "^1.0.0" # ipykernel = "^5.3.4" pandas = "2.0.3" -dash = "^2.16.1" duckdb = "0.10.0" geopandas = "^0.14.3" folium = "^0.16.0" streamlit = "^1.32.2" plotly-express = "^0.4.1" -streamlit-dynamic-filters = "^0.1.6" +streamlit-authenticator = "^0.3.2" +st-pages = "^0.4.5" [tool.poetry.group.dev.dependencies] pre-commit = "^2.20.0" From 76a2c387d0f1685f7fb9fe9f62e409f1d9f8e992 Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Fri, 26 Jul 2024 09:37:28 -0400 Subject: [PATCH 145/147] =?UTF-8?q?[kb]=20=F0=9F=90=9B=20Fix=20fr=5FFR=20l?= =?UTF-8?q?ocale=20#31?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/Dockerfile-dev | 3 +++ dashboards/app/pages/data.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/dashboards/Dockerfile-dev b/dashboards/Dockerfile-dev index 623dc9d..d3fcc38 100644 --- a/dashboards/Dockerfile-dev +++ b/dashboards/Dockerfile-dev @@ -8,6 +8,9 @@ RUN apt-get update RUN mkdir zds WORKDIR zds +RUN apt-get install -y locales +RUN echo "fr_FR.UTF-8 UTF-8" | tee -a /etc/locale.gen && locale-gen fr_FR.UTF-8 + COPY app/requirements.txt . RUN pip install -r requirements.txt diff --git a/dashboards/app/pages/data.py b/dashboards/app/pages/data.py index 3779ace..3e65cbb 100644 --- a/dashboards/app/pages/data.py +++ b/dashboards/app/pages/data.py @@ -34,7 +34,7 @@ ) # Définir les paramètres linguistiques FR pour l'affichage des nombres -locale.setlocale(locale.LC_NUMERIC, "fr_FR") +locale.setlocale(locale.LC_NUMERIC, "fr_FR.utf8") # Fonction pour améliorer l'affichage des nombres (milliers, millions, milliards) def french_format(x: int) -> str: From 09aca6ebd93c860b88d705f23acc0511bd8f5a4a Mon Sep 17 00:00:00 2001 From: KyllianBeguin Date: Fri, 26 Jul 2024 09:38:59 -0400 Subject: [PATCH 146/147] =?UTF-8?q?[kb]=20=F0=9F=90=9B=20Fix=20fr=5FFR=20l?= =?UTF-8?q?ocale=20#31?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboards/Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dashboards/Dockerfile b/dashboards/Dockerfile index 5d9610b..9cc9ad3 100644 --- a/dashboards/Dockerfile +++ b/dashboards/Dockerfile @@ -8,6 +8,9 @@ RUN apt-get update RUN mkdir zds WORKDIR zds +RUN apt-get install -y locales +RUN echo "fr_FR.UTF-8 UTF-8" | tee -a /etc/locale.gen && locale-gen fr_FR.UTF-8 + COPY app/ ./ RUN pip install -r requirements.txt From 0581672fad662c5c1ad86b9514615bb6386b28c9 Mon Sep 17 00:00:00 2001 From: Kyllian Beguin <50613619+KyllianBeguin@users.noreply.github.com> Date: Sun, 17 Nov 2024 09:31:57 +0100 Subject: [PATCH 147/147] =?UTF-8?q?[kb]=20=F0=9F=93=84=20Update=20README?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 95 ++++++++++++++++++++++++------------------------------- 1 file changed, 42 insertions(+), 53 deletions(-) diff --git a/README.md b/README.md index 3a03607..47b396c 100644 --- a/README.md +++ b/README.md @@ -1,53 +1,42 @@ -Template DataForGood -================ - - - -This file will become your README and also the index of your -documentation. - -# Contributing - - -## Use a venv - - python3 -m venv name-of-your-venv - - source name-of-your-venv/bin/activate - - -## Utiliser Poetry - -[Installer Poetry](https://python-poetry.org/docs/): - - python3 -m pip install "poetry==1.4.0" - -Installer les dépendances: - - poetry install - -Ajouter une dépendance: - - poetry add pandas - -Mettre à jour les dépendances: - - poetry update - -## Utiliser Jupyter Notebook - - jupyter notebook - -and check your browser ! - -## Lancer les precommit-hook localement - -[Installer les precommit](https://pre-commit.com/) - - - pre-commit run --all-files - - -## Utiliser Tox pour tester votre code - - tox -vv \ No newline at end of file +``` + _____ YAao, +|__ /___ _ __ ___ Y8888b, + / // _ \ '__/ _ \ ,oA8888888b, + / /| __/ | | (_) | ,aaad8888888888888888bo, +/____\___|_| \___/ _ _ ,d888888888888888888888888888b, + | _ \ ___ ___| |__ ___| |_ ,888888888888888888888888888888888b, + | | | |/ _ \/ __| '_ \ / _ \ __| d8888888888888888888888888888888888888, + | |_| | __/ (__| | | | __/ |_ d888888888888888888888888888888888888888b + |____/_\___|\___|_| |_|\___|\__| d888888P' `Y888888888888, + / ___| __ _ _ ___ ____ _ __ _ ___ 88888P' Ybaaaa8888888888l + \___ \ / _` | | | \ \ / / _` |/ _` |/ _ \ a8888' `Y8888P' `V888888 + ___) | (_| | |_| |\ V / (_| | (_| | __/ d8888888a `Y8888 + |____/ \__,_|\__,_| \_/ \__,_|\__, |\___| AY/'' `\Y8b ``Y8b + |___/ Y' `YP ~~ +``` + +# Zéro Déchat Sauvage +## À propos +[Zéro Déchet Sauvage](http://zds-app.duckdns.org/) (ZDS) est une application de visualisation de données de déchets diffus et collectés au cours d'évènements de collectes. Il s'agit d'une plateforme appartenant à l'association [MerTerre](https://mer-terre.org/) et développée par des bénévoles de l'association Data For Good. + +## Démo +Une démo de l'applicaiton a été réalisée lors du Demo Day organisée à la fin de la saison 12 par Data For Good. + + + video youtube du demo day + + +## Remerciement +Merci à l'équipe de développement : +* [Floriane](https://github.com/florianeduccini) +* [Hadrien](https://github.com/DridrM) +* [Mehdi](https://github.com/Mendi33) +* [Vincent](https://github.com/Vincentdata) +* [Thibault](https://github.com/tgazagnes) +* [Linh](https://github.com/linh-dinh-1012) +* [Joaquim](https://github.com/JoaquimDiaz) +* [Valérie](https://github.com/ValerieNevo) +* [Kyllian](https://github.com/KyllianBeguin) + +## Licence +Ce projet est sous licence MIT. Plus d'information sur le fichier [LICENCE](https://github.com/dataforgoodfr/12_zero_dechet_sauvage/blob/staging/LICENSE).